python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/scatterlist.h>
#include <linux/idr.h>
#include "rnbd-clt.h"
MODULE_DESCRIPTION("RDMA Network Block Device Client");
MODULE_LICENSE("GPL");
static int rnbd_client_major;
static DEFINE_IDA(index_ida);
static DEFINE_MUTEX(sess_lock);
static LIST_HEAD(sess_list);
static struct workqueue_struct *rnbd_clt_wq;
/*
* Maximum number of partitions an instance can have.
* 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
*/
#define RNBD_PART_BITS 6
static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
{
return refcount_inc_not_zero(&sess->refcount);
}
static void free_sess(struct rnbd_clt_session *sess);
static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
{
might_sleep();
if (refcount_dec_and_test(&sess->refcount))
free_sess(sess);
}
static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
{
might_sleep();
if (!refcount_dec_and_test(&dev->refcount))
return;
ida_free(&index_ida, dev->clt_device_id);
kfree(dev->hw_queues);
kfree(dev->pathname);
rnbd_clt_put_sess(dev->sess);
mutex_destroy(&dev->lock);
kfree(dev);
}
static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
{
return refcount_inc_not_zero(&dev->refcount);
}
static void rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
sector_t new_nsectors)
{
if (get_capacity(dev->gd) == new_nsectors)
return;
/*
* If the size changed, we need to revalidate it
*/
rnbd_clt_info(dev, "Device size changed from %llu to %llu sectors\n",
get_capacity(dev->gd), new_nsectors);
set_capacity_and_notify(dev->gd, new_nsectors);
}
static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp)
{
struct kobject *gd_kobj;
int err = 0;
mutex_lock(&dev->lock);
if (dev->dev_state == DEV_STATE_UNMAPPED) {
rnbd_clt_info(dev,
"Ignoring Open-Response message from server for unmapped device\n");
err = -ENOENT;
goto out;
}
if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
u64 nsectors = le64_to_cpu(rsp->nsectors);
rnbd_clt_change_capacity(dev, nsectors);
gd_kobj = &disk_to_dev(dev->gd)->kobj;
kobject_uevent(gd_kobj, KOBJ_ONLINE);
rnbd_clt_info(dev, "Device online, device remapped successfully\n");
}
if (!rsp->logical_block_size) {
err = -EINVAL;
goto out;
}
dev->device_id = le32_to_cpu(rsp->device_id);
dev->dev_state = DEV_STATE_MAPPED;
out:
mutex_unlock(&dev->lock);
return err;
}
int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize)
{
int ret = 0;
mutex_lock(&dev->lock);
if (dev->dev_state != DEV_STATE_MAPPED) {
pr_err("Failed to set new size of the device, device is not opened\n");
ret = -ENOENT;
goto out;
}
rnbd_clt_change_capacity(dev, newsize);
out:
mutex_unlock(&dev->lock);
return ret;
}
static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
{
if (WARN_ON(!q->hctx))
return;
/* We can come here from interrupt, thus async=true */
blk_mq_run_hw_queue(q->hctx, true);
}
enum {
RNBD_DELAY_IFBUSY = -1,
};
/**
* rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
* @sess: Session to find a queue for
* @cpu: Cpu to start the search from
*
* Description:
* Each CPU has a list of HW queues, which needs to be rerun. If a list
* is not empty - it is marked with a bit. This function finds first
* set bit in a bitmap and returns corresponding CPU list.
*/
static struct rnbd_cpu_qlist *
rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
{
int bit;
/* Search from cpu to nr_cpu_ids */
bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
if (bit < nr_cpu_ids) {
return per_cpu_ptr(sess->cpu_queues, bit);
} else if (cpu != 0) {
/* Search from 0 to cpu */
bit = find_first_bit(sess->cpu_queues_bm, cpu);
if (bit < cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
}
return NULL;
}
static inline int nxt_cpu(int cpu)
{
return (cpu + 1) % nr_cpu_ids;
}
/**
* rnbd_rerun_if_needed() - rerun next queue marked as stopped
* @sess: Session to rerun a queue on
*
* Description:
* Each CPU has it's own list of HW queues, which should be rerun.
* Function finds such list with HW queues, takes a list lock, picks up
* the first HW queue out of the list and requeues it.
*
* Return:
* True if the queue was requeued, false otherwise.
*
* Context:
* Does not matter.
*/
static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
{
struct rnbd_queue *q = NULL;
struct rnbd_cpu_qlist *cpu_q;
unsigned long flags;
int *cpup;
/*
* To keep fairness and not to let other queues starve we always
* try to wake up someone else in round-robin manner. That of course
* increases latency but queues always have a chance to be executed.
*/
cpup = get_cpu_ptr(sess->cpu_rr);
for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
continue;
if (!test_bit(cpu_q->cpu, sess->cpu_queues_bm))
goto unlock;
q = list_first_entry_or_null(&cpu_q->requeue_list,
typeof(*q), requeue_list);
if (WARN_ON(!q))
goto clear_bit;
list_del_init(&q->requeue_list);
clear_bit_unlock(0, &q->in_list);
if (list_empty(&cpu_q->requeue_list)) {
/* Clear bit if nothing is left */
clear_bit:
clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
}
unlock:
spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
if (q)
break;
}
/**
* Saves the CPU that is going to be requeued on the per-cpu var. Just
* incrementing it doesn't work because rnbd_get_cpu_qlist() will
* always return the first CPU with something on the queue list when the
* value stored on the var is greater than the last CPU with something
* on the list.
*/
if (cpu_q)
*cpup = cpu_q->cpu;
put_cpu_ptr(sess->cpu_rr);
if (q)
rnbd_clt_dev_requeue(q);
return q;
}
/**
* rnbd_rerun_all_if_idle() - rerun all queues left in the list if
* session is idling (there are no requests
* in-flight).
* @sess: Session to rerun the queues on
*
* Description:
* This function tries to rerun all stopped queues if there are no
* requests in-flight anymore. This function tries to solve an obvious
* problem, when number of tags < than number of queues (hctx), which
* are stopped and put to sleep. If last permit, which has been just put,
* does not wake up all left queues (hctxs), IO requests hang forever.
*
* That can happen when all number of permits, say N, have been exhausted
* from one CPU, and we have many block devices per session, say M.
* Each block device has it's own queue (hctx) for each CPU, so eventually
* we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
* If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
*
* To avoid this hang last caller of rnbd_put_permit() (last caller is the
* one who observes sess->busy == 0) must wake up all remaining queues.
*
* Context:
* Does not matter.
*/
static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
{
bool requeued;
do {
requeued = rnbd_rerun_if_needed(sess);
} while (atomic_read(&sess->busy) == 0 && requeued);
}
static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
enum rtrs_clt_con_type con_type,
enum wait_type wait)
{
struct rtrs_permit *permit;
permit = rtrs_clt_get_permit(sess->rtrs, con_type, wait);
if (permit)
/* We have a subtle rare case here, when all permits can be
* consumed before busy counter increased. This is safe,
* because loser will get NULL as a permit, observe 0 busy
* counter and immediately restart the queue himself.
*/
atomic_inc(&sess->busy);
return permit;
}
static void rnbd_put_permit(struct rnbd_clt_session *sess,
struct rtrs_permit *permit)
{
rtrs_clt_put_permit(sess->rtrs, permit);
atomic_dec(&sess->busy);
/* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first
* and then check queue bits.
*/
smp_mb__after_atomic();
rnbd_rerun_all_if_idle(sess);
}
static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
enum rtrs_clt_con_type con_type,
enum wait_type wait)
{
struct rnbd_iu *iu;
struct rtrs_permit *permit;
iu = kzalloc(sizeof(*iu), GFP_KERNEL);
if (!iu)
return NULL;
permit = rnbd_get_permit(sess, con_type, wait);
if (!permit) {
kfree(iu);
return NULL;
}
iu->permit = permit;
/*
* 1st reference is dropped after finishing sending a "user" message,
* 2nd reference is dropped after confirmation with the response is
* returned.
* 1st and 2nd can happen in any order, so the rnbd_iu should be
* released (rtrs_permit returned to rtrs) only after both
* are finished.
*/
atomic_set(&iu->refcount, 2);
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
rnbd_put_permit(sess, permit);
kfree(iu);
return NULL;
}
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
if (atomic_dec_and_test(&iu->refcount)) {
sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
kfree(iu);
}
}
static void rnbd_softirq_done_fn(struct request *rq)
{
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
struct rnbd_clt_session *sess = dev->sess;
struct rnbd_iu *iu;
iu = blk_mq_rq_to_pdu(rq);
sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
rnbd_put_permit(sess, iu->permit);
blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
}
static void msg_io_conf(void *priv, int errno)
{
struct rnbd_iu *iu = priv;
struct rnbd_clt_dev *dev = iu->dev;
struct request *rq = iu->rq;
int rw = rq_data_dir(rq);
iu->errno = errno;
blk_mq_complete_request(rq);
if (errno)
rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
rw == READ ? "read" : "write", errno);
}
static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
{
iu->comp.errno = errno;
wake_up(&iu->comp.wait);
}
static void msg_conf(void *priv, int errno)
{
struct rnbd_iu *iu = priv;
iu->errno = errno;
schedule_work(&iu->work);
}
static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir,
struct rnbd_iu *iu, struct kvec *vec,
size_t len, struct scatterlist *sg, unsigned int sg_len,
void (*conf)(struct work_struct *work),
int *errno, int wait)
{
int err;
struct rtrs_clt_req_ops req_ops;
INIT_WORK(&iu->work, conf);
req_ops = (struct rtrs_clt_req_ops) {
.priv = iu,
.conf_fn = msg_conf,
};
err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
vec, 1, len, sg, sg_len);
if (!err && wait) {
wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
*errno = iu->comp.errno;
} else {
*errno = 0;
}
return err;
}
static void msg_close_conf(struct work_struct *work)
{
struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
struct rnbd_clt_dev *dev = iu->dev;
wake_up_iu_comp(iu, iu->errno);
rnbd_put_iu(dev->sess, iu);
rnbd_clt_put_dev(dev);
}
static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id,
enum wait_type wait)
{
struct rnbd_clt_session *sess = dev->sess;
struct rnbd_msg_close msg;
struct rnbd_iu *iu;
struct kvec vec = {
.iov_base = &msg,
.iov_len = sizeof(msg)
};
int err, errno;
iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
if (!iu)
return -ENOMEM;
iu->buf = NULL;
iu->dev = dev;
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
WARN_ON(!rnbd_clt_get_dev(dev));
err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
msg_close_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
rnbd_put_iu(sess, iu);
} else {
err = errno;
}
rnbd_put_iu(sess, iu);
return err;
}
static void msg_open_conf(struct work_struct *work)
{
struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
struct rnbd_msg_open_rsp *rsp = iu->buf;
struct rnbd_clt_dev *dev = iu->dev;
int errno = iu->errno;
bool from_map = false;
/* INIT state is only triggered from rnbd_clt_map_device */
if (dev->dev_state == DEV_STATE_INIT)
from_map = true;
if (errno) {
rnbd_clt_err(dev,
"Opening failed, server responded: %d\n",
errno);
} else {
errno = process_msg_open_rsp(dev, rsp);
if (errno) {
u32 device_id = le32_to_cpu(rsp->device_id);
/*
* If server thinks its fine, but we fail to process
* then be nice and send a close to server.
*/
send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT);
}
}
/* We free rsp in rnbd_clt_map_device for map scenario */
if (!from_map)
kfree(rsp);
wake_up_iu_comp(iu, errno);
rnbd_put_iu(dev->sess, iu);
rnbd_clt_put_dev(dev);
}
static void msg_sess_info_conf(struct work_struct *work)
{
struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
struct rnbd_clt_session *sess = iu->sess;
if (!iu->errno)
sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
kfree(rsp);
wake_up_iu_comp(iu, iu->errno);
rnbd_put_iu(sess, iu);
rnbd_clt_put_sess(sess);
}
static int send_msg_open(struct rnbd_clt_dev *dev, enum wait_type wait)
{
struct rnbd_clt_session *sess = dev->sess;
struct rnbd_msg_open_rsp *rsp;
struct rnbd_msg_open msg;
struct rnbd_iu *iu;
struct kvec vec = {
.iov_base = &msg,
.iov_len = sizeof(msg)
};
int err, errno;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (!rsp)
return -ENOMEM;
iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
if (!iu) {
kfree(rsp);
return -ENOMEM;
}
iu->buf = rsp;
iu->dev = dev;
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
msg.access_mode = dev->access_mode;
strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
WARN_ON(!rnbd_clt_get_dev(dev));
err = send_usr_msg(sess->rtrs, READ, iu,
&vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_open_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
rnbd_put_iu(sess, iu);
kfree(rsp);
} else {
err = errno;
}
rnbd_put_iu(sess, iu);
return err;
}
static int send_msg_sess_info(struct rnbd_clt_session *sess, enum wait_type wait)
{
struct rnbd_msg_sess_info_rsp *rsp;
struct rnbd_msg_sess_info msg;
struct rnbd_iu *iu;
struct kvec vec = {
.iov_base = &msg,
.iov_len = sizeof(msg)
};
int err, errno;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (!rsp)
return -ENOMEM;
iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
if (!iu) {
kfree(rsp);
return -ENOMEM;
}
iu->buf = rsp;
iu->sess = sess;
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
msg.ver = RNBD_PROTO_VER_MAJOR;
if (!rnbd_clt_get_sess(sess)) {
/*
* That can happen only in one case, when RTRS has restablished
* the connection and link_ev() is called, but session is almost
* dead, last reference on session is put and caller is waiting
* for RTRS to close everything.
*/
err = -ENODEV;
goto put_iu;
}
err = send_usr_msg(sess->rtrs, READ, iu,
&vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_sess_info_conf, &errno, wait);
if (err) {
rnbd_clt_put_sess(sess);
put_iu:
rnbd_put_iu(sess, iu);
kfree(rsp);
} else {
err = errno;
}
rnbd_put_iu(sess, iu);
return err;
}
static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
{
struct rnbd_clt_dev *dev;
struct kobject *gd_kobj;
mutex_lock(&sess->lock);
list_for_each_entry(dev, &sess->devs_list, list) {
rnbd_clt_err(dev, "Device disconnected.\n");
mutex_lock(&dev->lock);
if (dev->dev_state == DEV_STATE_MAPPED) {
dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
gd_kobj = &disk_to_dev(dev->gd)->kobj;
kobject_uevent(gd_kobj, KOBJ_OFFLINE);
}
mutex_unlock(&dev->lock);
}
mutex_unlock(&sess->lock);
}
static void remap_devs(struct rnbd_clt_session *sess)
{
struct rnbd_clt_dev *dev;
struct rtrs_attrs attrs;
int err;
/*
* Careful here: we are called from RTRS link event directly,
* thus we can't send any RTRS request and wait for response
* or RTRS will not be able to complete request with failure
* if something goes wrong (failing of outstanding requests
* happens exactly from the context where we are blocking now).
*
* So to avoid deadlocks each usr message sent from here must
* be asynchronous.
*/
err = send_msg_sess_info(sess, RTRS_PERMIT_NOWAIT);
if (err) {
pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
return;
}
err = rtrs_clt_query(sess->rtrs, &attrs);
if (err) {
pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
return;
}
mutex_lock(&sess->lock);
sess->max_io_size = attrs.max_io_size;
list_for_each_entry(dev, &sess->devs_list, list) {
bool skip;
mutex_lock(&dev->lock);
skip = (dev->dev_state == DEV_STATE_INIT);
mutex_unlock(&dev->lock);
if (skip)
/*
* When device is establishing connection for the first
* time - do not remap, it will be closed soon.
*/
continue;
rnbd_clt_info(dev, "session reconnected, remapping device\n");
err = send_msg_open(dev, RTRS_PERMIT_NOWAIT);
if (err) {
rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
break;
}
}
mutex_unlock(&sess->lock);
}
static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
{
struct rnbd_clt_session *sess = priv;
switch (ev) {
case RTRS_CLT_LINK_EV_DISCONNECTED:
set_dev_states_to_disconnected(sess);
break;
case RTRS_CLT_LINK_EV_RECONNECTED:
remap_devs(sess);
break;
default:
pr_err("Unknown session event received (%d), session: %s\n",
ev, sess->sessname);
}
}
static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
{
unsigned int cpu;
struct rnbd_cpu_qlist *cpu_q;
for_each_possible_cpu(cpu) {
cpu_q = per_cpu_ptr(cpu_queues, cpu);
cpu_q->cpu = cpu;
INIT_LIST_HEAD(&cpu_q->requeue_list);
spin_lock_init(&cpu_q->requeue_lock);
}
}
static void destroy_mq_tags(struct rnbd_clt_session *sess)
{
if (sess->tag_set.tags)
blk_mq_free_tag_set(&sess->tag_set);
}
static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
{
sess->rtrs_ready = true;
wake_up_all(&sess->rtrs_waitq);
}
static void close_rtrs(struct rnbd_clt_session *sess)
{
might_sleep();
if (!IS_ERR_OR_NULL(sess->rtrs)) {
rtrs_clt_close(sess->rtrs);
sess->rtrs = NULL;
wake_up_rtrs_waiters(sess);
}
}
static void free_sess(struct rnbd_clt_session *sess)
{
WARN_ON(!list_empty(&sess->devs_list));
might_sleep();
close_rtrs(sess);
destroy_mq_tags(sess);
if (!list_empty(&sess->list)) {
mutex_lock(&sess_lock);
list_del(&sess->list);
mutex_unlock(&sess_lock);
}
free_percpu(sess->cpu_queues);
free_percpu(sess->cpu_rr);
mutex_destroy(&sess->lock);
kfree(sess);
}
static struct rnbd_clt_session *alloc_sess(const char *sessname)
{
struct rnbd_clt_session *sess;
int err, cpu;
sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
if (!sess)
return ERR_PTR(-ENOMEM);
strscpy(sess->sessname, sessname, sizeof(sess->sessname));
atomic_set(&sess->busy, 0);
mutex_init(&sess->lock);
INIT_LIST_HEAD(&sess->devs_list);
INIT_LIST_HEAD(&sess->list);
bitmap_zero(sess->cpu_queues_bm, num_possible_cpus());
init_waitqueue_head(&sess->rtrs_waitq);
refcount_set(&sess->refcount, 1);
sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
if (!sess->cpu_queues) {
err = -ENOMEM;
goto err;
}
rnbd_init_cpu_qlists(sess->cpu_queues);
/*
* That is simple percpu variable which stores cpu indices, which are
* incremented on each access. We need that for the sake of fairness
* to wake up queues in a round-robin manner.
*/
sess->cpu_rr = alloc_percpu(int);
if (!sess->cpu_rr) {
err = -ENOMEM;
goto err;
}
for_each_possible_cpu(cpu)
* per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
return sess;
err:
free_sess(sess);
return ERR_PTR(err);
}
static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
{
wait_event(sess->rtrs_waitq, sess->rtrs_ready);
if (IS_ERR_OR_NULL(sess->rtrs))
return -ECONNRESET;
return 0;
}
static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
__releases(&sess_lock)
__acquires(&sess_lock)
{
DEFINE_WAIT(wait);
prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
if (IS_ERR_OR_NULL(sess->rtrs)) {
finish_wait(&sess->rtrs_waitq, &wait);
return;
}
mutex_unlock(&sess_lock);
/* loop in caller, see __find_and_get_sess().
* You can't leave mutex locked and call schedule(), you will catch a
* deadlock with a caller of free_sess(), which has just put the last
* reference and is about to take the sess_lock in order to delete
* the session from the list.
*/
schedule();
mutex_lock(&sess_lock);
}
static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
__releases(&sess_lock)
__acquires(&sess_lock)
{
struct rnbd_clt_session *sess, *sn;
int err;
again:
list_for_each_entry_safe(sess, sn, &sess_list, list) {
if (strcmp(sessname, sess->sessname))
continue;
if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
/*
* No RTRS connection, session is dying.
*/
continue;
if (rnbd_clt_get_sess(sess)) {
/*
* Alive session is found, wait for RTRS connection.
*/
mutex_unlock(&sess_lock);
err = wait_for_rtrs_connection(sess);
if (err)
rnbd_clt_put_sess(sess);
mutex_lock(&sess_lock);
if (err)
/* Session is dying, repeat the loop */
goto again;
return sess;
}
/*
* Ref is 0, session is dying, wait for RTRS disconnect
* in order to avoid session names clashes.
*/
wait_for_rtrs_disconnection(sess);
/*
* RTRS is disconnected and soon session will be freed,
* so repeat a loop.
*/
goto again;
}
return NULL;
}
/* caller is responsible for initializing 'first' to false */
static struct
rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
{
struct rnbd_clt_session *sess = NULL;
mutex_lock(&sess_lock);
sess = __find_and_get_sess(sessname);
if (!sess) {
sess = alloc_sess(sessname);
if (IS_ERR(sess)) {
mutex_unlock(&sess_lock);
return sess;
}
list_add(&sess->list, &sess_list);
*first = true;
}
mutex_unlock(&sess_lock);
return sess;
}
static int rnbd_client_open(struct gendisk *disk, blk_mode_t mode)
{
struct rnbd_clt_dev *dev = disk->private_data;
if (get_disk_ro(dev->gd) && (mode & BLK_OPEN_WRITE))
return -EPERM;
if (dev->dev_state == DEV_STATE_UNMAPPED ||
!rnbd_clt_get_dev(dev))
return -EIO;
return 0;
}
static void rnbd_client_release(struct gendisk *gen)
{
struct rnbd_clt_dev *dev = gen->private_data;
rnbd_clt_put_dev(dev);
}
static int rnbd_client_getgeo(struct block_device *block_device,
struct hd_geometry *geo)
{
u64 size;
struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
geo->cylinders = size >> 6; /* size/64 */
geo->heads = 4;
geo->sectors = 16;
geo->start = 0;
return 0;
}
static const struct block_device_operations rnbd_client_ops = {
.owner = THIS_MODULE,
.open = rnbd_client_open,
.release = rnbd_client_release,
.getgeo = rnbd_client_getgeo
};
/* The amount of data that belongs to an I/O and the amount of data that
* should be read or written to the disk (bi_size) can differ.
*
* E.g. When WRITE_SAME is used, only a small amount of data is
* transferred that is then written repeatedly over a lot of sectors.
*
* Get the size of data to be transferred via RTRS by summing up the size
* of the scather-gather list entries.
*/
static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
{
struct scatterlist *sg;
size_t tsize = 0;
int i;
for_each_sg(sglist, sg, len, i)
tsize += sg->length;
return tsize;
}
static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
struct request *rq,
struct rnbd_iu *iu)
{
struct rtrs_clt_sess *rtrs = dev->sess->rtrs;
struct rtrs_permit *permit = iu->permit;
struct rnbd_msg_io msg;
struct rtrs_clt_req_ops req_ops;
unsigned int sg_cnt = 0;
struct kvec vec;
size_t size;
int err;
iu->rq = rq;
iu->dev = dev;
msg.sector = cpu_to_le64(blk_rq_pos(rq));
msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
msg.prio = cpu_to_le16(req_get_ioprio(rq));
/*
* We only support discards with single segment for now.
* See queue limits.
*/
if (req_op(rq) != REQ_OP_DISCARD)
sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
if (sg_cnt == 0)
sg_mark_end(&iu->sgt.sgl[0]);
msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
msg.device_id = cpu_to_le32(dev->device_id);
vec = (struct kvec) {
.iov_base = &msg,
.iov_len = sizeof(msg)
};
size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt);
req_ops = (struct rtrs_clt_req_ops) {
.priv = iu,
.conf_fn = msg_io_conf,
};
err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
&vec, 1, size, iu->sgt.sgl, sg_cnt);
if (err) {
rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
err);
return err;
}
return 0;
}
/**
* rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
* @dev: Device to be checked
* @q: Queue to be added to the requeue list if required
*
* Description:
* If session is busy, that means someone will requeue us when resources
* are freed. If session is not doing anything - device is not added to
* the list and @false is returned.
*/
static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
struct rnbd_queue *q)
{
struct rnbd_clt_session *sess = dev->sess;
struct rnbd_cpu_qlist *cpu_q;
unsigned long flags;
bool added = true;
bool need_set;
cpu_q = get_cpu_ptr(sess->cpu_queues);
spin_lock_irqsave(&cpu_q->requeue_lock, flags);
if (!test_and_set_bit_lock(0, &q->in_list)) {
if (WARN_ON(!list_empty(&q->requeue_list)))
goto unlock;
need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
if (need_set) {
set_bit(cpu_q->cpu, sess->cpu_queues_bm);
/* Paired with rnbd_put_permit(). Set a bit first
* and then observe the busy counter.
*/
smp_mb__before_atomic();
}
if (atomic_read(&sess->busy)) {
list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
} else {
/* Very unlikely, but possible: busy counter was
* observed as zero. Drop all bits and return
* false to restart the queue by ourselves.
*/
if (need_set)
clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
clear_bit_unlock(0, &q->in_list);
added = false;
}
}
unlock:
spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
put_cpu_ptr(sess->cpu_queues);
return added;
}
static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
struct blk_mq_hw_ctx *hctx,
int delay)
{
struct rnbd_queue *q = hctx->driver_data;
if (delay != RNBD_DELAY_IFBUSY)
blk_mq_delay_run_hw_queue(hctx, delay);
else if (!rnbd_clt_dev_add_to_requeue(dev, q))
/*
* If session is not busy we have to restart
* the queue ourselves.
*/
blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
}
static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
int err;
blk_status_t ret = BLK_STS_IOERR;
if (dev->dev_state != DEV_STATE_MAPPED)
return BLK_STS_IOERR;
iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
RTRS_PERMIT_NOWAIT);
if (!iu->permit) {
rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
return BLK_STS_RESOURCE;
}
iu->sgt.sgl = iu->first_sgl;
err = sg_alloc_table_chained(&iu->sgt,
/* Even-if the request has no segment,
* sglist must have one entry at least.
*/
blk_rq_nr_phys_segments(rq) ? : 1,
iu->sgt.sgl,
RNBD_INLINE_SG_CNT);
if (err) {
rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err);
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
rnbd_put_permit(dev->sess, iu->permit);
return BLK_STS_RESOURCE;
}
blk_mq_start_request(rq);
err = rnbd_client_xfer_request(dev, rq, iu);
if (err == 0)
return BLK_STS_OK;
if (err == -EAGAIN || err == -ENOMEM) {
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
ret = BLK_STS_RESOURCE;
}
sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
rnbd_put_permit(dev->sess, iu->permit);
return ret;
}
static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct rnbd_queue *q = hctx->driver_data;
struct rnbd_clt_dev *dev = q->dev;
return rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
}
static void rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct rnbd_clt_session *sess = set->driver_data;
/* shared read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus();
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus();
set->map[HCTX_TYPE_READ].queue_offset = 0;
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
if (sess->nr_poll_queues) {
/* dedicated queue for poll */
set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues;
set->map[HCTX_TYPE_POLL].queue_offset = set->map[HCTX_TYPE_READ].queue_offset +
set->map[HCTX_TYPE_READ].nr_queues;
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
pr_info("[session=%s] mapped %d/%d/%d default/read/poll queues.\n",
sess->sessname,
set->map[HCTX_TYPE_DEFAULT].nr_queues,
set->map[HCTX_TYPE_READ].nr_queues,
set->map[HCTX_TYPE_POLL].nr_queues);
} else {
pr_info("[session=%s] mapped %d/%d default/read queues.\n",
sess->sessname,
set->map[HCTX_TYPE_DEFAULT].nr_queues,
set->map[HCTX_TYPE_READ].nr_queues);
}
}
static struct blk_mq_ops rnbd_mq_ops = {
.queue_rq = rnbd_queue_rq,
.complete = rnbd_softirq_done_fn,
.map_queues = rnbd_rdma_map_queues,
.poll = rnbd_rdma_poll,
};
static int setup_mq_tags(struct rnbd_clt_session *sess)
{
struct blk_mq_tag_set *tag_set = &sess->tag_set;
memset(tag_set, 0, sizeof(*tag_set));
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
tag_set->nr_maps = sess->nr_poll_queues ? HCTX_MAX_TYPES : 2;
/*
* HCTX_TYPE_DEFAULT and HCTX_TYPE_READ share one set of queues
* others are for HCTX_TYPE_POLL
*/
tag_set->nr_hw_queues = num_online_cpus() + sess->nr_poll_queues;
tag_set->driver_data = sess;
return blk_mq_alloc_tag_set(tag_set);
}
static struct rnbd_clt_session *
find_and_get_or_create_sess(const char *sessname,
const struct rtrs_addr *paths,
size_t path_cnt, u16 port_nr, u32 nr_poll_queues)
{
struct rnbd_clt_session *sess;
struct rtrs_attrs attrs;
int err;
bool first = false;
struct rtrs_clt_ops rtrs_ops;
sess = find_or_create_sess(sessname, &first);
if (sess == ERR_PTR(-ENOMEM)) {
return ERR_PTR(-ENOMEM);
} else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
/*
* A device MUST have its own session to use the polling-mode.
* It must fail to map new device with the same session.
*/
err = -EINVAL;
goto put_sess;
}
if (!first)
return sess;
if (!path_cnt) {
pr_err("Session %s not found, and path parameter not given", sessname);
err = -ENXIO;
goto put_sess;
}
rtrs_ops = (struct rtrs_clt_ops) {
.priv = sess,
.link_ev = rnbd_clt_link_ev,
};
/*
* Nothing was found, establish rtrs connection and proceed further.
*/
sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
paths, path_cnt, port_nr,
0, /* Do not use pdu of rtrs */
RECONNECT_DELAY,
MAX_RECONNECTS, nr_poll_queues);
if (IS_ERR(sess->rtrs)) {
err = PTR_ERR(sess->rtrs);
goto wake_up_and_put;
}
err = rtrs_clt_query(sess->rtrs, &attrs);
if (err)
goto close_rtrs;
sess->max_io_size = attrs.max_io_size;
sess->queue_depth = attrs.queue_depth;
sess->nr_poll_queues = nr_poll_queues;
sess->max_segments = attrs.max_segments;
err = setup_mq_tags(sess);
if (err)
goto close_rtrs;
err = send_msg_sess_info(sess, RTRS_PERMIT_WAIT);
if (err)
goto close_rtrs;
wake_up_rtrs_waiters(sess);
return sess;
close_rtrs:
close_rtrs(sess);
put_sess:
rnbd_clt_put_sess(sess);
return ERR_PTR(err);
wake_up_and_put:
wake_up_rtrs_waiters(sess);
goto put_sess;
}
static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
struct rnbd_queue *q,
struct blk_mq_hw_ctx *hctx)
{
INIT_LIST_HEAD(&q->requeue_list);
q->dev = dev;
q->hctx = hctx;
}
static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
{
unsigned long i;
struct blk_mq_hw_ctx *hctx;
struct rnbd_queue *q;
queue_for_each_hw_ctx(dev->queue, hctx, i) {
q = &dev->hw_queues[i];
rnbd_init_hw_queue(dev, q, hctx);
hctx->driver_data = q;
}
}
static void setup_request_queue(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp)
{
blk_queue_logical_block_size(dev->queue,
le16_to_cpu(rsp->logical_block_size));
blk_queue_physical_block_size(dev->queue,
le16_to_cpu(rsp->physical_block_size));
blk_queue_max_hw_sectors(dev->queue,
dev->sess->max_io_size / SECTOR_SIZE);
/*
* we don't support discards to "discontiguous" segments
* in on request
*/
blk_queue_max_discard_segments(dev->queue, 1);
blk_queue_max_discard_sectors(dev->queue,
le32_to_cpu(rsp->max_discard_sectors));
dev->queue->limits.discard_granularity =
le32_to_cpu(rsp->discard_granularity);
dev->queue->limits.discard_alignment =
le32_to_cpu(rsp->discard_alignment);
if (le16_to_cpu(rsp->secure_discard))
blk_queue_max_secure_erase_sectors(dev->queue,
le32_to_cpu(rsp->max_discard_sectors));
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
blk_queue_max_segments(dev->queue, dev->sess->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
blk_queue_write_cache(dev->queue,
!!(rsp->cache_policy & RNBD_WRITEBACK),
!!(rsp->cache_policy & RNBD_FUA));
}
static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp, int idx)
{
int err;
dev->gd->major = rnbd_client_major;
dev->gd->first_minor = idx << RNBD_PART_BITS;
dev->gd->minors = 1 << RNBD_PART_BITS;
dev->gd->fops = &rnbd_client_ops;
dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
idx);
pr_debug("disk_name=%s, capacity=%llu\n",
dev->gd->disk_name,
le64_to_cpu(rsp->nsectors) *
(le16_to_cpu(rsp->logical_block_size) / SECTOR_SIZE));
set_capacity(dev->gd, le64_to_cpu(rsp->nsectors));
if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
/*
* Network device does not need rotational
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
put_disk(dev->gd);
return err;
}
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp)
{
int idx = dev->clt_device_id;
dev->size = le64_to_cpu(rsp->nsectors) *
le16_to_cpu(rsp->logical_block_size);
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
if (IS_ERR(dev->gd))
return PTR_ERR(dev->gd);
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
setup_request_queue(dev, rsp);
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
enum rnbd_access_mode access_mode,
const char *pathname,
u32 nr_poll_queues)
{
struct rnbd_clt_dev *dev;
int ret;
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
if (!dev)
return ERR_PTR(-ENOMEM);
/*
* nr_cpu_ids: the number of softirq queues
* nr_poll_queues: the number of polling queues
*/
dev->hw_queues = kcalloc(nr_cpu_ids + nr_poll_queues,
sizeof(*dev->hw_queues),
GFP_KERNEL);
if (!dev->hw_queues) {
ret = -ENOMEM;
goto out_alloc;
}
ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
GFP_KERNEL);
if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
pathname, sess->sessname, ret);
goto out_queues;
}
dev->pathname = kstrdup(pathname, GFP_KERNEL);
if (!dev->pathname) {
ret = -ENOMEM;
goto out_queues;
}
dev->clt_device_id = ret;
dev->sess = sess;
dev->access_mode = access_mode;
dev->nr_poll_queues = nr_poll_queues;
mutex_init(&dev->lock);
refcount_set(&dev->refcount, 1);
dev->dev_state = DEV_STATE_INIT;
/*
* Here we called from sysfs entry, thus clt-sysfs is
* responsible that session will not disappear.
*/
WARN_ON(!rnbd_clt_get_sess(sess));
return dev;
out_queues:
kfree(dev->hw_queues);
out_alloc:
kfree(dev);
return ERR_PTR(ret);
}
static bool __exists_dev(const char *pathname, const char *sessname)
{
struct rnbd_clt_session *sess;
struct rnbd_clt_dev *dev;
bool found = false;
list_for_each_entry(sess, &sess_list, list) {
if (sessname && strncmp(sess->sessname, sessname,
sizeof(sess->sessname)))
continue;
mutex_lock(&sess->lock);
list_for_each_entry(dev, &sess->devs_list, list) {
if (strlen(dev->pathname) == strlen(pathname) &&
!strcmp(dev->pathname, pathname)) {
found = true;
break;
}
}
mutex_unlock(&sess->lock);
if (found)
break;
}
return found;
}
static bool exists_devpath(const char *pathname, const char *sessname)
{
bool found;
mutex_lock(&sess_lock);
found = __exists_dev(pathname, sessname);
mutex_unlock(&sess_lock);
return found;
}
static bool insert_dev_if_not_exists_devpath(struct rnbd_clt_dev *dev)
{
bool found;
struct rnbd_clt_session *sess = dev->sess;
mutex_lock(&sess_lock);
found = __exists_dev(dev->pathname, sess->sessname);
if (!found) {
mutex_lock(&sess->lock);
list_add_tail(&dev->list, &sess->devs_list);
mutex_unlock(&sess->lock);
}
mutex_unlock(&sess_lock);
return found;
}
static void delete_dev(struct rnbd_clt_dev *dev)
{
struct rnbd_clt_session *sess = dev->sess;
mutex_lock(&sess->lock);
list_del(&dev->list);
mutex_unlock(&sess->lock);
}
struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
struct rtrs_addr *paths,
size_t path_cnt, u16 port_nr,
const char *pathname,
enum rnbd_access_mode access_mode,
u32 nr_poll_queues)
{
struct rnbd_clt_session *sess;
struct rnbd_clt_dev *dev;
int ret, errno;
struct rnbd_msg_open_rsp *rsp;
struct rnbd_msg_open msg;
struct rnbd_iu *iu;
struct kvec vec = {
.iov_base = &msg,
.iov_len = sizeof(msg)
};
if (exists_devpath(pathname, sessname))
return ERR_PTR(-EEXIST);
sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr, nr_poll_queues);
if (IS_ERR(sess))
return ERR_CAST(sess);
dev = init_dev(sess, access_mode, pathname, nr_poll_queues);
if (IS_ERR(dev)) {
pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
pathname, sess->sessname, PTR_ERR(dev));
ret = PTR_ERR(dev);
goto put_sess;
}
if (insert_dev_if_not_exists_devpath(dev)) {
ret = -EEXIST;
goto put_dev;
}
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (!rsp) {
ret = -ENOMEM;
goto del_dev;
}
iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
if (!iu) {
ret = -ENOMEM;
kfree(rsp);
goto del_dev;
}
iu->buf = rsp;
iu->dev = dev;
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
msg.access_mode = dev->access_mode;
strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
WARN_ON(!rnbd_clt_get_dev(dev));
ret = send_usr_msg(sess->rtrs, READ, iu,
&vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_open_conf, &errno, RTRS_PERMIT_WAIT);
if (ret) {
rnbd_clt_put_dev(dev);
rnbd_put_iu(sess, iu);
} else {
ret = errno;
}
if (ret) {
rnbd_clt_err(dev,
"map_device: failed, can't open remote device, err: %d\n",
ret);
goto put_iu;
}
mutex_lock(&dev->lock);
pr_debug("Opened remote device: session=%s, path='%s'\n",
sess->sessname, pathname);
ret = rnbd_client_setup_device(dev, rsp);
if (ret) {
rnbd_clt_err(dev,
"map_device: Failed to configure device, err: %d\n",
ret);
mutex_unlock(&dev->lock);
goto send_close;
}
rnbd_clt_info(dev,
"map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, le64_to_cpu(rsp->nsectors),
le16_to_cpu(rsp->logical_block_size),
le16_to_cpu(rsp->physical_block_size),
le32_to_cpu(rsp->max_discard_sectors),
le32_to_cpu(rsp->discard_granularity),
le32_to_cpu(rsp->discard_alignment),
le16_to_cpu(rsp->secure_discard),
sess->max_segments, sess->max_io_size / SECTOR_SIZE,
!!(rsp->cache_policy & RNBD_WRITEBACK),
!!(rsp->cache_policy & RNBD_FUA));
mutex_unlock(&dev->lock);
kfree(rsp);
rnbd_put_iu(sess, iu);
rnbd_clt_put_sess(sess);
return dev;
send_close:
send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
put_iu:
kfree(rsp);
rnbd_put_iu(sess, iu);
del_dev:
delete_dev(dev);
put_dev:
rnbd_clt_put_dev(dev);
put_sess:
rnbd_clt_put_sess(sess);
return ERR_PTR(ret);
}
static void destroy_gen_disk(struct rnbd_clt_dev *dev)
{
del_gendisk(dev->gd);
put_disk(dev->gd);
}
static void destroy_sysfs(struct rnbd_clt_dev *dev,
const struct attribute *sysfs_self)
{
rnbd_clt_remove_dev_symlink(dev);
if (dev->kobj.state_initialized) {
if (sysfs_self)
/* To avoid deadlock firstly remove itself */
sysfs_remove_file_self(&dev->kobj, sysfs_self);
kobject_del(&dev->kobj);
kobject_put(&dev->kobj);
}
}
int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
const struct attribute *sysfs_self)
{
struct rnbd_clt_session *sess = dev->sess;
int refcount, ret = 0;
bool was_mapped;
mutex_lock(&dev->lock);
if (dev->dev_state == DEV_STATE_UNMAPPED) {
rnbd_clt_info(dev, "Device is already being unmapped\n");
ret = -EALREADY;
goto err;
}
refcount = refcount_read(&dev->refcount);
if (!force && refcount > 1) {
rnbd_clt_err(dev,
"Closing device failed, device is in use, (%d device users)\n",
refcount - 1);
ret = -EBUSY;
goto err;
}
was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
dev->dev_state = DEV_STATE_UNMAPPED;
mutex_unlock(&dev->lock);
delete_dev(dev);
destroy_sysfs(dev, sysfs_self);
destroy_gen_disk(dev);
if (was_mapped && sess->rtrs)
send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
rnbd_clt_info(dev, "Device is unmapped\n");
/* Likely last reference put */
rnbd_clt_put_dev(dev);
/*
* Here device and session can be vanished!
*/
return 0;
err:
mutex_unlock(&dev->lock);
return ret;
}
int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
{
int err;
mutex_lock(&dev->lock);
if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
err = 0;
else if (dev->dev_state == DEV_STATE_UNMAPPED)
err = -ENODEV;
else if (dev->dev_state == DEV_STATE_MAPPED)
err = -EALREADY;
else
err = -EBUSY;
mutex_unlock(&dev->lock);
if (!err) {
rnbd_clt_info(dev, "Remapping device.\n");
err = send_msg_open(dev, RTRS_PERMIT_WAIT);
if (err)
rnbd_clt_err(dev, "remap_device: %d\n", err);
}
return err;
}
static void unmap_device_work(struct work_struct *work)
{
struct rnbd_clt_dev *dev;
dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
rnbd_clt_unmap_device(dev, true, NULL);
}
static void rnbd_destroy_sessions(void)
{
struct rnbd_clt_session *sess, *sn;
struct rnbd_clt_dev *dev, *tn;
/* Firstly forbid access through sysfs interface */
rnbd_clt_destroy_sysfs_files();
/*
* Here at this point there is no any concurrent access to sessions
* list and devices list:
* 1. New session or device can't be created - session sysfs files
* are removed.
* 2. Device or session can't be removed - module reference is taken
* into account in unmap device sysfs callback.
* 3. No IO requests inflight - each file open of block_dev increases
* module reference in get_disk().
*
* But still there can be user requests inflights, which are sent by
* asynchronous send_msg_*() functions, thus before unmapping devices
* RTRS session must be explicitly closed.
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
if (!rnbd_clt_get_sess(sess))
continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
* Here unmap happens in parallel for only one reason:
* del_gendisk() takes around half a second, so
* on huge amount of devices the whole module unload
* procedure takes minutes.
*/
INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
queue_work(rnbd_clt_wq, &dev->unmap_on_rmmod_work);
}
rnbd_clt_put_sess(sess);
}
/* Wait for all scheduled unmap works */
flush_workqueue(rnbd_clt_wq);
WARN_ON(!list_empty(&sess_list));
}
static int __init rnbd_client_init(void)
{
int err = 0;
BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
if (rnbd_client_major <= 0) {
pr_err("Failed to load module, block device registration failed\n");
return -EBUSY;
}
err = rnbd_clt_create_sysfs_files();
if (err) {
pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
err);
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
unregister_blkdev(rnbd_client_major, "rnbd");
err = -ENOMEM;
}
return err;
}
static void __exit rnbd_client_exit(void)
{
rnbd_destroy_sessions();
unregister_blkdev(rnbd_client_major, "rnbd");
ida_destroy(&index_ida);
destroy_workqueue(rnbd_clt_wq);
}
module_init(rnbd_client_init);
module_exit(rnbd_client_exit);
| linux-master | drivers/block/rnbd/rnbd-clt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_state.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
from Logicworks, Inc. for making SDP replication support possible.
*/
#include <linux/drbd_limits.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_state_change.h"
struct after_state_chg_work {
struct drbd_work w;
struct drbd_device *device;
union drbd_state os;
union drbd_state ns;
enum chg_state_flags flags;
struct completion *done;
struct drbd_state_change *state_change;
};
enum sanitize_state_warnings {
NO_WARNING,
ABORTED_ONLINE_VERIFY,
ABORTED_RESYNC,
CONNECTION_LOST_NEGOTIATING,
IMPLICITLY_UPGRADED_DISK,
IMPLICITLY_UPGRADED_PDSK,
};
static void count_objects(struct drbd_resource *resource,
unsigned int *n_devices,
unsigned int *n_connections)
{
struct drbd_device *device;
struct drbd_connection *connection;
int vnr;
*n_devices = 0;
*n_connections = 0;
idr_for_each_entry(&resource->devices, device, vnr)
(*n_devices)++;
for_each_connection(connection, resource)
(*n_connections)++;
}
static struct drbd_state_change *alloc_state_change(unsigned int n_devices, unsigned int n_connections, gfp_t gfp)
{
struct drbd_state_change *state_change;
unsigned int size, n;
size = sizeof(struct drbd_state_change) +
n_devices * sizeof(struct drbd_device_state_change) +
n_connections * sizeof(struct drbd_connection_state_change) +
n_devices * n_connections * sizeof(struct drbd_peer_device_state_change);
state_change = kmalloc(size, gfp);
if (!state_change)
return NULL;
state_change->n_devices = n_devices;
state_change->n_connections = n_connections;
state_change->devices = (void *)(state_change + 1);
state_change->connections = (void *)&state_change->devices[n_devices];
state_change->peer_devices = (void *)&state_change->connections[n_connections];
state_change->resource->resource = NULL;
for (n = 0; n < n_devices; n++)
state_change->devices[n].device = NULL;
for (n = 0; n < n_connections; n++)
state_change->connections[n].connection = NULL;
return state_change;
}
struct drbd_state_change *remember_old_state(struct drbd_resource *resource, gfp_t gfp)
{
struct drbd_state_change *state_change;
struct drbd_device *device;
unsigned int n_devices;
struct drbd_connection *connection;
unsigned int n_connections;
int vnr;
struct drbd_device_state_change *device_state_change;
struct drbd_peer_device_state_change *peer_device_state_change;
struct drbd_connection_state_change *connection_state_change;
/* Caller holds req_lock spinlock.
* No state, no device IDR, no connections lists can change. */
count_objects(resource, &n_devices, &n_connections);
state_change = alloc_state_change(n_devices, n_connections, gfp);
if (!state_change)
return NULL;
kref_get(&resource->kref);
state_change->resource->resource = resource;
state_change->resource->role[OLD] =
conn_highest_role(first_connection(resource));
state_change->resource->susp[OLD] = resource->susp;
state_change->resource->susp_nod[OLD] = resource->susp_nod;
state_change->resource->susp_fen[OLD] = resource->susp_fen;
connection_state_change = state_change->connections;
for_each_connection(connection, resource) {
kref_get(&connection->kref);
connection_state_change->connection = connection;
connection_state_change->cstate[OLD] =
connection->cstate;
connection_state_change->peer_role[OLD] =
conn_highest_peer(connection);
connection_state_change++;
}
device_state_change = state_change->devices;
peer_device_state_change = state_change->peer_devices;
idr_for_each_entry(&resource->devices, device, vnr) {
kref_get(&device->kref);
device_state_change->device = device;
device_state_change->disk_state[OLD] = device->state.disk;
/* The peer_devices for each device have to be enumerated in
the order of the connections. We may not use for_each_peer_device() here. */
for_each_connection(connection, resource) {
struct drbd_peer_device *peer_device;
peer_device = conn_peer_device(connection, device->vnr);
peer_device_state_change->peer_device = peer_device;
peer_device_state_change->disk_state[OLD] =
device->state.pdsk;
peer_device_state_change->repl_state[OLD] =
max_t(enum drbd_conns,
C_WF_REPORT_PARAMS, device->state.conn);
peer_device_state_change->resync_susp_user[OLD] =
device->state.user_isp;
peer_device_state_change->resync_susp_peer[OLD] =
device->state.peer_isp;
peer_device_state_change->resync_susp_dependency[OLD] =
device->state.aftr_isp;
peer_device_state_change++;
}
device_state_change++;
}
return state_change;
}
static void remember_new_state(struct drbd_state_change *state_change)
{
struct drbd_resource_state_change *resource_state_change;
struct drbd_resource *resource;
unsigned int n;
if (!state_change)
return;
resource_state_change = &state_change->resource[0];
resource = resource_state_change->resource;
resource_state_change->role[NEW] =
conn_highest_role(first_connection(resource));
resource_state_change->susp[NEW] = resource->susp;
resource_state_change->susp_nod[NEW] = resource->susp_nod;
resource_state_change->susp_fen[NEW] = resource->susp_fen;
for (n = 0; n < state_change->n_devices; n++) {
struct drbd_device_state_change *device_state_change =
&state_change->devices[n];
struct drbd_device *device = device_state_change->device;
device_state_change->disk_state[NEW] = device->state.disk;
}
for (n = 0; n < state_change->n_connections; n++) {
struct drbd_connection_state_change *connection_state_change =
&state_change->connections[n];
struct drbd_connection *connection =
connection_state_change->connection;
connection_state_change->cstate[NEW] = connection->cstate;
connection_state_change->peer_role[NEW] =
conn_highest_peer(connection);
}
for (n = 0; n < state_change->n_devices * state_change->n_connections; n++) {
struct drbd_peer_device_state_change *peer_device_state_change =
&state_change->peer_devices[n];
struct drbd_device *device =
peer_device_state_change->peer_device->device;
union drbd_dev_state state = device->state;
peer_device_state_change->disk_state[NEW] = state.pdsk;
peer_device_state_change->repl_state[NEW] =
max_t(enum drbd_conns, C_WF_REPORT_PARAMS, state.conn);
peer_device_state_change->resync_susp_user[NEW] =
state.user_isp;
peer_device_state_change->resync_susp_peer[NEW] =
state.peer_isp;
peer_device_state_change->resync_susp_dependency[NEW] =
state.aftr_isp;
}
}
void copy_old_to_new_state_change(struct drbd_state_change *state_change)
{
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
#define OLD_TO_NEW(x) \
(x[NEW] = x[OLD])
OLD_TO_NEW(resource_state_change->role);
OLD_TO_NEW(resource_state_change->susp);
OLD_TO_NEW(resource_state_change->susp_nod);
OLD_TO_NEW(resource_state_change->susp_fen);
for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) {
struct drbd_connection_state_change *connection_state_change =
&state_change->connections[n_connection];
OLD_TO_NEW(connection_state_change->peer_role);
OLD_TO_NEW(connection_state_change->cstate);
}
for (n_device = 0; n_device < state_change->n_devices; n_device++) {
struct drbd_device_state_change *device_state_change =
&state_change->devices[n_device];
OLD_TO_NEW(device_state_change->disk_state);
}
n_peer_devices = state_change->n_devices * state_change->n_connections;
for (n_peer_device = 0; n_peer_device < n_peer_devices; n_peer_device++) {
struct drbd_peer_device_state_change *p =
&state_change->peer_devices[n_peer_device];
OLD_TO_NEW(p->disk_state);
OLD_TO_NEW(p->repl_state);
OLD_TO_NEW(p->resync_susp_user);
OLD_TO_NEW(p->resync_susp_peer);
OLD_TO_NEW(p->resync_susp_dependency);
}
#undef OLD_TO_NEW
}
void forget_state_change(struct drbd_state_change *state_change)
{
unsigned int n;
if (!state_change)
return;
if (state_change->resource->resource)
kref_put(&state_change->resource->resource->kref, drbd_destroy_resource);
for (n = 0; n < state_change->n_devices; n++) {
struct drbd_device *device = state_change->devices[n].device;
if (device)
kref_put(&device->kref, drbd_destroy_device);
}
for (n = 0; n < state_change->n_connections; n++) {
struct drbd_connection *connection =
state_change->connections[n].connection;
if (connection)
kref_put(&connection->kref, drbd_destroy_connection);
}
kfree(state_change);
}
static int w_after_state_ch(struct drbd_work *w, int unused);
static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags,
struct drbd_state_change *);
static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum sanitize_state_warnings *warn);
static inline bool is_susp(union drbd_state s)
{
return s.susp || s.susp_nod || s.susp_fen;
}
bool conn_all_vols_unconf(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
bool rv = true;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (device->state.disk != D_DISKLESS ||
device->state.conn != C_STANDALONE ||
device->state.role != R_SECONDARY) {
rv = false;
break;
}
}
rcu_read_unlock();
return rv;
}
/* Unfortunately the states where not correctly ordered, when
they where defined. therefore can not use max_t() here. */
static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
{
if (role1 == R_PRIMARY || role2 == R_PRIMARY)
return R_PRIMARY;
if (role1 == R_SECONDARY || role2 == R_SECONDARY)
return R_SECONDARY;
return R_UNKNOWN;
}
static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
{
if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
return R_UNKNOWN;
if (role1 == R_SECONDARY || role2 == R_SECONDARY)
return R_SECONDARY;
return R_PRIMARY;
}
enum drbd_role conn_highest_role(struct drbd_connection *connection)
{
enum drbd_role role = R_SECONDARY;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
role = max_role(role, device->state.role);
}
rcu_read_unlock();
return role;
}
enum drbd_role conn_highest_peer(struct drbd_connection *connection)
{
enum drbd_role peer = R_UNKNOWN;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
peer = max_role(peer, device->state.peer);
}
rcu_read_unlock();
return peer;
}
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
{
enum drbd_disk_state disk_state = D_DISKLESS;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk);
}
rcu_read_unlock();
return disk_state;
}
enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
{
enum drbd_disk_state disk_state = D_MASK;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
}
rcu_read_unlock();
return disk_state;
}
enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
{
enum drbd_disk_state disk_state = D_DISKLESS;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk);
}
rcu_read_unlock();
return disk_state;
}
enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
{
enum drbd_conns conn = C_MASK;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
conn = min_t(enum drbd_conns, conn, device->state.conn);
}
rcu_read_unlock();
return conn;
}
static bool no_peer_wf_report_params(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
bool rv = true;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
rv = false;
break;
}
rcu_read_unlock();
return rv;
}
static void wake_up_all_devices(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
wake_up(&peer_device->device->state_wait);
rcu_read_unlock();
}
/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
* @device: DRBD device.
* @os: old (current) state.
* @ns: new (wanted) state.
*/
static int cl_wide_st_chg(struct drbd_device *device,
union drbd_state os, union drbd_state ns)
{
return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
(os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
(os.disk != D_FAILED && ns.disk == D_FAILED))) ||
(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
(os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
}
static union drbd_state
apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
{
union drbd_state ns;
ns.i = (os.i & ~mask.i) | val.i;
return ns;
}
enum drbd_state_rv
drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
union drbd_state mask, union drbd_state val)
{
unsigned long flags;
union drbd_state ns;
enum drbd_state_rv rv;
spin_lock_irqsave(&device->resource->req_lock, flags);
ns = apply_mask_val(drbd_read_state(device), mask, val);
rv = _drbd_set_state(device, ns, f, NULL);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
return rv;
}
/**
* drbd_force_state() - Impose a change which happens outside our control on our state
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*/
void drbd_force_state(struct drbd_device *device,
union drbd_state mask, union drbd_state val)
{
drbd_change_state(device, CS_HARD, mask, val);
}
static enum drbd_state_rv
_req_st_cond(struct drbd_device *device, union drbd_state mask,
union drbd_state val)
{
union drbd_state os, ns;
unsigned long flags;
enum drbd_state_rv rv;
if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags))
return SS_CW_SUCCESS;
if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
return SS_CW_FAILED_BY_PEER;
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
if (!cl_wide_st_chg(device, os, ns))
rv = SS_CW_NO_NEED;
if (rv == SS_UNKNOWN_ERROR) {
rv = is_valid_state(device, ns);
if (rv >= SS_SUCCESS) {
rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
}
}
spin_unlock_irqrestore(&device->resource->req_lock, flags);
return rv;
}
/**
* drbd_req_state() - Perform an eventually cluster wide state change
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
* @f: flags
*
* Should not be called directly, use drbd_request_state() or
* _drbd_request_state().
*/
static enum drbd_state_rv
drbd_req_state(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
struct completion done;
unsigned long flags;
union drbd_state os, ns;
enum drbd_state_rv rv;
void *buffer = NULL;
init_completion(&done);
if (f & CS_SERIALIZE)
mutex_lock(device->state_mutex);
if (f & CS_INHIBIT_MD_IO)
buffer = drbd_md_get_buffer(device, __func__);
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv < SS_SUCCESS) {
spin_unlock_irqrestore(&device->resource->req_lock, flags);
goto abort;
}
if (cl_wide_st_chg(device, os, ns)) {
rv = is_valid_state(device, ns);
if (rv == SS_SUCCESS)
rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
print_st_err(device, os, ns, rv);
goto abort;
}
if (drbd_send_state_req(first_peer_device(device), mask, val)) {
rv = SS_CW_FAILED_BY_PEER;
if (f & CS_VERBOSE)
print_st_err(device, os, ns, rv);
goto abort;
}
wait_event(device->state_wait,
(rv = _req_st_cond(device, mask, val)));
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
print_st_err(device, os, ns, rv);
goto abort;
}
spin_lock_irqsave(&device->resource->req_lock, flags);
ns = apply_mask_val(drbd_read_state(device), mask, val);
rv = _drbd_set_state(device, ns, f, &done);
} else {
rv = _drbd_set_state(device, ns, f, &done);
}
spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
wait_for_completion(&done);
}
abort:
if (buffer)
drbd_md_put_buffer(device);
if (f & CS_SERIALIZE)
mutex_unlock(device->state_mutex);
return rv;
}
/**
* _drbd_request_state() - Request a state change (with flags)
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
* @f: flags
*
* Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
* flag, or when logging of failed state change requests is not desired.
*/
enum drbd_state_rv
_drbd_request_state(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
enum drbd_state_rv rv;
wait_event(device->state_wait,
(rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE);
return rv;
}
/*
* We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
* there is IO in-flight: the transition into D_FAILED for detach purposes
* may get misinterpreted as actual IO error in a confused endio function.
*
* We wrap it all into wait_event(), to retry in case the drbd_req_state()
* returns SS_IN_TRANSIENT_STATE.
*
* To avoid potential deadlock with e.g. the receiver thread trying to grab
* drbd_md_get_buffer() while trying to get out of the "transient state", we
* need to grab and release the meta data buffer inside of that wait_event loop.
*/
static enum drbd_state_rv
request_detach(struct drbd_device *device)
{
return drbd_req_state(device, NS(disk, D_FAILED),
CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO);
}
int drbd_request_detach_interruptible(struct drbd_device *device)
{
int ret, rv;
drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
wait_event_interruptible(device->state_wait,
(rv = request_detach(device)) != SS_IN_TRANSIENT_STATE);
drbd_resume_io(device);
ret = wait_event_interruptible(device->misc_wait,
device->state.disk != D_FAILED);
if (rv == SS_IS_DISKLESS)
rv = SS_NOTHING_TO_DO;
if (ret)
rv = ERR_INTR;
return rv;
}
enum drbd_state_rv
_drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
enum drbd_state_rv rv;
BUG_ON(f & CS_SERIALIZE);
wait_event_cmd(device->state_wait,
(rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE,
mutex_unlock(device->state_mutex),
mutex_lock(device->state_mutex));
return rv;
}
static void print_st(struct drbd_device *device, const char *name, union drbd_state ns)
{
drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
name,
drbd_conn_str(ns.conn),
drbd_role_str(ns.role),
drbd_role_str(ns.peer),
drbd_disk_str(ns.disk),
drbd_disk_str(ns.pdsk),
is_susp(ns) ? 's' : 'r',
ns.aftr_isp ? 'a' : '-',
ns.peer_isp ? 'p' : '-',
ns.user_isp ? 'u' : '-',
ns.susp_fen ? 'F' : '-',
ns.susp_nod ? 'N' : '-'
);
}
void print_st_err(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum drbd_state_rv err)
{
if (err == SS_IN_TRANSIENT_STATE)
return;
drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
print_st(device, " state", os);
print_st(device, "wanted", ns);
}
static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char *pbp;
pbp = pb;
*pbp = 0;
if (ns.role != os.role && flags & CS_DC_ROLE)
pbp += sprintf(pbp, "role( %s -> %s ) ",
drbd_role_str(os.role),
drbd_role_str(ns.role));
if (ns.peer != os.peer && flags & CS_DC_PEER)
pbp += sprintf(pbp, "peer( %s -> %s ) ",
drbd_role_str(os.peer),
drbd_role_str(ns.peer));
if (ns.conn != os.conn && flags & CS_DC_CONN)
pbp += sprintf(pbp, "conn( %s -> %s ) ",
drbd_conn_str(os.conn),
drbd_conn_str(ns.conn));
if (ns.disk != os.disk && flags & CS_DC_DISK)
pbp += sprintf(pbp, "disk( %s -> %s ) ",
drbd_disk_str(os.disk),
drbd_disk_str(ns.disk));
if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
drbd_disk_str(os.pdsk),
drbd_disk_str(ns.pdsk));
return pbp - pb;
}
static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
char *pbp = pb;
pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
if (ns.aftr_isp != os.aftr_isp)
pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
os.aftr_isp,
ns.aftr_isp);
if (ns.peer_isp != os.peer_isp)
pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
os.peer_isp,
ns.peer_isp);
if (ns.user_isp != os.user_isp)
pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
os.user_isp,
ns.user_isp);
if (pbp != pb)
drbd_info(device, "%s\n", pb);
}
static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
char *pbp = pb;
pbp += print_state_change(pbp, os, ns, flags);
if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
pbp += sprintf(pbp, "susp( %d -> %d ) ",
is_susp(os),
is_susp(ns));
if (pbp != pb)
drbd_info(connection, "%s\n", pb);
}
/**
* is_valid_state() - Returns an SS_ error code if ns is not valid
* @device: DRBD device.
* @ns: State to consider.
*/
static enum drbd_state_rv
is_valid_state(struct drbd_device *device, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
enum drbd_fencing_p fp;
enum drbd_state_rv rv = SS_SUCCESS;
struct net_conf *nc;
rcu_read_lock();
fp = FP_DONT_CARE;
if (get_ldev(device)) {
fp = rcu_dereference(device->ldev->disk_conf)->fencing;
put_ldev(device);
}
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc) {
if (!nc->two_primaries && ns.role == R_PRIMARY) {
if (ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES;
else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
rv = SS_O_VOL_PEER_PRI;
}
}
if (rv <= 0)
goto out; /* already found a reason to abort */
else if (ns.role == R_SECONDARY && device->open_cnt)
rv = SS_DEVICE_IN_USE;
else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
rv = SS_NO_UP_TO_DATE_DISK;
else if (fp >= FP_RESOURCE &&
ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
rv = SS_PRIMARY_NOP;
else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
rv = SS_NO_UP_TO_DATE_DISK;
else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
rv = SS_NO_LOCAL_DISK;
else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
rv = SS_NO_REMOTE_DISK;
else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
rv = SS_NO_UP_TO_DATE_DISK;
else if ((ns.conn == C_CONNECTED ||
ns.conn == C_WF_BITMAP_S ||
ns.conn == C_SYNC_SOURCE ||
ns.conn == C_PAUSED_SYNC_S) &&
ns.disk == D_OUTDATED)
rv = SS_CONNECTED_OUTDATES;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
first_peer_device(device)->connection->agreed_pro_version < 88)
rv = SS_NOT_SUPPORTED;
else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
rv = SS_NO_UP_TO_DATE_DISK;
else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
ns.pdsk == D_UNKNOWN)
rv = SS_NEED_CONNECTION;
else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
rv = SS_CONNECTED_OUTDATES;
out:
rcu_read_unlock();
return rv;
}
/**
* is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
* This function limits state transitions that may be declined by DRBD. I.e.
* user requests (aka soft transitions).
* @os: old state.
* @ns: new state.
* @connection: DRBD connection.
*/
static enum drbd_state_rv
is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
{
enum drbd_state_rv rv = SS_SUCCESS;
if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
os.conn > C_CONNECTED)
rv = SS_RESYNC_RUNNING;
if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
rv = SS_ALREADY_STANDALONE;
if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
rv = SS_IS_DISKLESS;
if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
rv = SS_NO_NET_CONFIG;
if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
rv = SS_LOWER_THAN_OUTDATED;
if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
rv = SS_IN_TRANSIENT_STATE;
/* While establishing a connection only allow cstate to change.
Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
if (test_bit(STATE_SENT, &connection->flags) &&
!((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) ||
(ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
rv = SS_IN_TRANSIENT_STATE;
/* Do not promote during resync handshake triggered by "force primary".
* This is a hack. It should really be rejected by the peer during the
* cluster wide state change request. */
if (os.role != R_PRIMARY && ns.role == R_PRIMARY
&& ns.pdsk == D_UP_TO_DATE
&& ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS
&& (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn))
rv = SS_IN_TRANSIENT_STATE;
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
ns.conn != os.conn && os.conn > C_CONNECTED)
rv = SS_RESYNC_RUNNING;
if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
&& os.conn < C_WF_REPORT_PARAMS)
rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
rv = SS_OUTDATE_WO_CONN;
return rv;
}
static enum drbd_state_rv
is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
{
/* no change -> nothing to do, at least for the connection part */
if (oc == nc)
return SS_NOTHING_TO_DO;
/* disconnect of an unconfigured connection does not make sense */
if (oc == C_STANDALONE && nc == C_DISCONNECTING)
return SS_ALREADY_STANDALONE;
/* from C_STANDALONE, we start with C_UNCONNECTED */
if (oc == C_STANDALONE && nc != C_UNCONNECTED)
return SS_NEED_CONNECTION;
/* When establishing a connection we need to go through WF_REPORT_PARAMS!
Necessary to do the right thing upon invalidate-remote on a disconnected resource */
if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
return SS_NEED_CONNECTION;
/* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
return SS_IN_TRANSIENT_STATE;
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (oc == C_DISCONNECTING && nc != C_STANDALONE)
return SS_IN_TRANSIENT_STATE;
return SS_SUCCESS;
}
/**
* is_valid_transition() - Returns an SS_ error code if the state transition is not possible
* This limits hard state transitions. Hard state transitions are facts there are
* imposed on DRBD by the environment. E.g. disk broke or network broke down.
* But those hard state transitions are still not allowed to do everything.
* @ns: new state.
* @os: old state.
*/
static enum drbd_state_rv
is_valid_transition(union drbd_state os, union drbd_state ns)
{
enum drbd_state_rv rv;
rv = is_valid_conn_transition(os.conn, ns.conn);
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
rv = SS_IS_DISKLESS;
return rv;
}
static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn)
{
static const char *msg_table[] = {
[NO_WARNING] = "",
[ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
[ABORTED_RESYNC] = "Resync aborted.",
[CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
[IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
[IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
};
if (warn != NO_WARNING)
drbd_warn(device, "%s\n", msg_table[warn]);
}
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
* @device: DRBD device.
* @os: old state.
* @ns: new state.
* @warn: placeholder for returned state warning.
*
* When we loose connection, we have to set the state of the peers disk (pdsk)
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
if (warn)
*warn = NO_WARNING;
fp = FP_DONT_CARE;
if (get_ldev(device)) {
rcu_read_lock();
fp = rcu_dereference(device->ldev->disk_conf)->fencing;
rcu_read_unlock();
put_ldev(device);
}
/* Implications from connection to peer and peer_isp */
if (ns.conn < C_CONNECTED) {
ns.peer_isp = 0;
ns.peer = R_UNKNOWN;
if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
ns.pdsk = D_UNKNOWN;
}
/* Clear the aftr_isp when becoming unconfigured */
if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
ns.aftr_isp = 0;
/* An implication of the disk states onto the connection state */
/* Abort resync if a disk fails/detaches */
if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
if (warn)
*warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
ns.conn = C_CONNECTED;
}
/* Connection breaks down before we finished "Negotiating" */
if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
get_ldev_if_state(device, D_NEGOTIATING)) {
if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
ns.disk = device->new_state_tmp.disk;
ns.pdsk = device->new_state_tmp.pdsk;
} else {
if (warn)
*warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
put_ldev(device);
}
/* D_CONSISTENT and D_OUTDATED vanish when we get connected */
if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
ns.disk = D_UP_TO_DATE;
if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
ns.pdsk = D_UP_TO_DATE;
}
/* Implications of the connection state on the disk states */
disk_min = D_DISKLESS;
disk_max = D_UP_TO_DATE;
pdsk_min = D_INCONSISTENT;
pdsk_max = D_UNKNOWN;
switch ((enum drbd_conns)ns.conn) {
case C_WF_BITMAP_T:
case C_PAUSED_SYNC_T:
case C_STARTING_SYNC_T:
case C_WF_SYNC_UUID:
case C_BEHIND:
disk_min = D_INCONSISTENT;
disk_max = D_OUTDATED;
pdsk_min = D_UP_TO_DATE;
pdsk_max = D_UP_TO_DATE;
break;
case C_VERIFY_S:
case C_VERIFY_T:
disk_min = D_UP_TO_DATE;
disk_max = D_UP_TO_DATE;
pdsk_min = D_UP_TO_DATE;
pdsk_max = D_UP_TO_DATE;
break;
case C_CONNECTED:
disk_min = D_DISKLESS;
disk_max = D_UP_TO_DATE;
pdsk_min = D_DISKLESS;
pdsk_max = D_UP_TO_DATE;
break;
case C_WF_BITMAP_S:
case C_PAUSED_SYNC_S:
case C_STARTING_SYNC_S:
case C_AHEAD:
disk_min = D_UP_TO_DATE;
disk_max = D_UP_TO_DATE;
pdsk_min = D_INCONSISTENT;
pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
break;
case C_SYNC_TARGET:
disk_min = D_INCONSISTENT;
disk_max = D_INCONSISTENT;
pdsk_min = D_UP_TO_DATE;
pdsk_max = D_UP_TO_DATE;
break;
case C_SYNC_SOURCE:
disk_min = D_UP_TO_DATE;
disk_max = D_UP_TO_DATE;
pdsk_min = D_INCONSISTENT;
pdsk_max = D_INCONSISTENT;
break;
case C_STANDALONE:
case C_DISCONNECTING:
case C_UNCONNECTED:
case C_TIMEOUT:
case C_BROKEN_PIPE:
case C_NETWORK_FAILURE:
case C_PROTOCOL_ERROR:
case C_TEAR_DOWN:
case C_WF_CONNECTION:
case C_WF_REPORT_PARAMS:
case C_MASK:
break;
}
if (ns.disk > disk_max)
ns.disk = disk_max;
if (ns.disk < disk_min) {
if (warn)
*warn = IMPLICITLY_UPGRADED_DISK;
ns.disk = disk_min;
}
if (ns.pdsk > pdsk_max)
ns.pdsk = pdsk_max;
if (ns.pdsk < pdsk_min) {
if (warn)
*warn = IMPLICITLY_UPGRADED_PDSK;
ns.pdsk = pdsk_min;
}
if (fp == FP_STONITH &&
(ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
!(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
if (ns.conn == C_SYNC_SOURCE)
ns.conn = C_PAUSED_SYNC_S;
if (ns.conn == C_SYNC_TARGET)
ns.conn = C_PAUSED_SYNC_T;
} else {
if (ns.conn == C_PAUSED_SYNC_S)
ns.conn = C_SYNC_SOURCE;
if (ns.conn == C_PAUSED_SYNC_T)
ns.conn = C_SYNC_TARGET;
}
return ns;
}
void drbd_resume_al(struct drbd_device *device)
{
if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
drbd_info(device, "Resumed AL updates\n");
}
/* helper for _drbd_set_state */
static void set_ov_position(struct drbd_peer_device *peer_device, enum drbd_conns cs)
{
struct drbd_device *device = peer_device->device;
if (peer_device->connection->agreed_pro_version < 90)
device->ov_start_sector = 0;
device->rs_total = drbd_bm_bits(device);
device->ov_position = 0;
if (cs == C_VERIFY_T) {
/* starting online verify from an arbitrary position
* does not fit well into the existing protocol.
* on C_VERIFY_T, we initialize ov_left and friends
* implicitly in receive_DataRequest once the
* first P_OV_REQUEST is received */
device->ov_start_sector = ~(sector_t)0;
} else {
unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector);
if (bit >= device->rs_total) {
device->ov_start_sector =
BM_BIT_TO_SECT(device->rs_total - 1);
device->rs_total = 1;
} else
device->rs_total -= bit;
device->ov_position = device->ov_start_sector;
}
device->ov_left = device->rs_total;
}
/**
* _drbd_set_state() - Set a new DRBD state
* @device: DRBD device.
* @ns: new state.
* @flags: Flags
* @done: Optional completion, that will get completed after the after_state_ch() finished
*
* Caller needs to hold req_lock. Do not call directly.
*/
enum drbd_state_rv
_drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
union drbd_state os;
enum drbd_state_rv rv = SS_SUCCESS;
enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
struct drbd_state_change *state_change;
os = drbd_read_state(device);
ns = sanitize_state(device, os, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
rv = is_valid_transition(os, ns);
if (rv < SS_SUCCESS)
return rv;
if (!(flags & CS_HARD)) {
/* pre-state-change checks ; only look at ns */
/* See drbd_state_sw_errors in drbd_strings.c */
rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) {
/* If the old state was illegal as well, then let
this happen...*/
if (is_valid_state(device, os) == rv)
rv = is_valid_soft_transition(os, ns, connection);
} else
rv = is_valid_soft_transition(os, ns, connection);
}
if (rv < SS_SUCCESS) {
if (flags & CS_VERBOSE)
print_st_err(device, os, ns, rv);
return rv;
}
print_sanitize_warnings(device, ssw);
drbd_pr_state_change(device, os, ns, flags);
/* Display changes to the susp* flags that where caused by the call to
sanitize_state(). Only display it here if we where not called from
_conn_request_state() */
if (!(flags & CS_DC_SUSP))
conn_pr_state_change(connection, os, ns,
(flags & ~CS_DC_MASK) | CS_DC_SUSP);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
* drbd_ldev_destroy() won't happen before our corresponding
* after_state_ch works run, where we put_ldev again. */
if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
atomic_inc(&device->local_cnt);
if (!is_sync_state(os.conn) && is_sync_state(ns.conn))
clear_bit(RS_DONE, &device->flags);
/* FIXME: Have any flags been set earlier in this function already? */
state_change = remember_old_state(device->resource, GFP_ATOMIC);
/* changes to local_cnt and device flags should be visible before
* changes to state, which again should be visible before anything else
* depending on that change happens. */
smp_wmb();
device->state.i = ns.i;
device->resource->susp = ns.susp;
device->resource->susp_nod = ns.susp_nod;
device->resource->susp_fen = ns.susp_fen;
smp_wmb();
remember_new_state(state_change);
/* put replicated vs not-replicated requests in seperate epochs */
if (drbd_should_do_remote((union drbd_dev_state)os.i) !=
drbd_should_do_remote((union drbd_dev_state)ns.i))
start_new_tl_epoch(connection);
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
drbd_print_uuids(device, "attached to UUIDs");
/* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
no_peer_wf_report_params(connection)) {
clear_bit(STATE_SENT, &connection->flags);
wake_up_all_devices(connection);
}
wake_up(&device->misc_wait);
wake_up(&device->state_wait);
wake_up(&connection->ping_wait);
/* Aborted verify run, or we reached the stop sector.
* Log the last position, unless end-of-device. */
if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
ns.conn <= C_CONNECTED) {
device->ov_start_sector =
BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
if (device->ov_left)
drbd_info(device, "Online Verify reached sector %llu\n",
(unsigned long long)device->ov_start_sector);
}
if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
drbd_info(device, "Syncer continues.\n");
device->rs_paused += (long)jiffies
-(long)device->rs_mark_time[device->rs_last_mark];
if (ns.conn == C_SYNC_TARGET)
mod_timer(&device->resync_timer, jiffies);
}
if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
(ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
drbd_info(device, "Resync suspended\n");
device->rs_mark_time[device->rs_last_mark] = jiffies;
}
if (os.conn == C_CONNECTED &&
(ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
unsigned long now = jiffies;
int i;
set_ov_position(peer_device, ns.conn);
device->rs_start = now;
device->rs_last_sect_ev = 0;
device->ov_last_oos_size = 0;
device->ov_last_oos_start = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
device->rs_mark_left[i] = device->ov_left;
device->rs_mark_time[i] = now;
}
drbd_rs_controller_reset(peer_device);
if (ns.conn == C_VERIFY_S) {
drbd_info(device, "Starting Online Verify from sector %llu\n",
(unsigned long long)device->ov_position);
mod_timer(&device->resync_timer, jiffies);
}
}
if (get_ldev(device)) {
u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
mdf &= ~MDF_AL_CLEAN;
if (test_bit(CRASHED_PRIMARY, &device->flags))
mdf |= MDF_CRASHED_PRIMARY;
if (device->state.role == R_PRIMARY ||
(device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
mdf |= MDF_PRIMARY_IND;
if (device->state.conn > C_WF_REPORT_PARAMS)
mdf |= MDF_CONNECTED_IND;
if (device->state.disk > D_INCONSISTENT)
mdf |= MDF_CONSISTENT;
if (device->state.disk > D_OUTDATED)
mdf |= MDF_WAS_UP_TO_DATE;
if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT)
mdf |= MDF_PEER_OUT_DATED;
if (mdf != device->ldev->md.flags) {
device->ldev->md.flags = mdf;
drbd_md_mark_dirty(device);
}
if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
put_ldev(device);
}
/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
set_bit(CONSIDER_RESYNC, &device->flags);
/* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
drbd_thread_stop_nowait(&connection->receiver);
/* Now the receiver finished cleaning up itself, it should die */
if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
drbd_thread_stop_nowait(&connection->receiver);
/* Upon network failure, we need to restart the receiver. */
if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&connection->receiver);
/* Resume AL writing if we get a connection */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
drbd_resume_al(device);
connection->connect_cnt++;
}
/* remember last attach time so request_timer_fn() won't
* kill newly established sessions while we are still trying to thaw
* previously frozen IO */
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
ns.disk > D_NEGOTIATING)
device->last_reattach_jif = jiffies;
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
ascw->os = os;
ascw->ns = ns;
ascw->flags = flags;
ascw->w.cb = w_after_state_ch;
ascw->device = device;
ascw->done = done;
ascw->state_change = state_change;
drbd_queue_work(&connection->sender_work,
&ascw->w);
} else {
drbd_err(device, "Could not kmalloc an ascw\n");
}
return rv;
}
static int w_after_state_ch(struct drbd_work *w, int unused)
{
struct after_state_chg_work *ascw =
container_of(w, struct after_state_chg_work, w);
struct drbd_device *device = ascw->device;
after_state_ch(device, ascw->os, ascw->ns, ascw->flags, ascw->state_change);
forget_state_change(ascw->state_change);
if (ascw->flags & CS_WAIT_COMPLETE)
complete(ascw->done);
kfree(ascw);
return 0;
}
static void abw_start_sync(struct drbd_device *device, int rv)
{
if (rv) {
drbd_err(device, "Writing the bitmap failed not starting resync.\n");
_drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
return;
}
switch (device->state.conn) {
case C_STARTING_SYNC_T:
_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
break;
case C_STARTING_SYNC_S:
drbd_start_resync(device, C_SYNC_SOURCE);
break;
}
}
int drbd_bitmap_io_from_worker(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device)
{
int rv;
D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
/* open coded non-blocking drbd_suspend_io(device); */
atomic_inc(&device->suspend_cnt);
drbd_bm_lock(device, why, flags);
rv = io_fn(device, peer_device);
drbd_bm_unlock(device);
drbd_resume_io(device);
return rv;
}
int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource_state_change *resource_state_change,
enum drbd_notification_type type)
{
struct drbd_resource *resource = resource_state_change->resource;
struct resource_info resource_info = {
.res_role = resource_state_change->role[NEW],
.res_susp = resource_state_change->susp[NEW],
.res_susp_nod = resource_state_change->susp_nod[NEW],
.res_susp_fen = resource_state_change->susp_fen[NEW],
};
return notify_resource_state(skb, seq, resource, &resource_info, type);
}
int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection_state_change *connection_state_change,
enum drbd_notification_type type)
{
struct drbd_connection *connection = connection_state_change->connection;
struct connection_info connection_info = {
.conn_connection_state = connection_state_change->cstate[NEW],
.conn_role = connection_state_change->peer_role[NEW],
};
return notify_connection_state(skb, seq, connection, &connection_info, type);
}
int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_device_state_change *device_state_change,
enum drbd_notification_type type)
{
struct drbd_device *device = device_state_change->device;
struct device_info device_info = {
.dev_disk_state = device_state_change->disk_state[NEW],
};
return notify_device_state(skb, seq, device, &device_info, type);
}
int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device_state_change *p,
enum drbd_notification_type type)
{
struct drbd_peer_device *peer_device = p->peer_device;
struct peer_device_info peer_device_info = {
.peer_repl_state = p->repl_state[NEW],
.peer_disk_state = p->disk_state[NEW],
.peer_resync_susp_user = p->resync_susp_user[NEW],
.peer_resync_susp_peer = p->resync_susp_peer[NEW],
.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
};
return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
}
static void broadcast_state_change(struct drbd_state_change *state_change)
{
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
int (*last_func)(struct sk_buff *, unsigned int, void *,
enum drbd_notification_type) = NULL;
void *last_arg = NULL;
#define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
#define FINAL_STATE_CHANGE(type) \
({ if (last_func) \
last_func(NULL, 0, last_arg, type); \
})
#define REMEMBER_STATE_CHANGE(func, arg, type) \
({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
last_func = (typeof(last_func))func; \
last_arg = arg; \
})
mutex_lock(¬ification_mutex);
resource_state_has_changed =
HAS_CHANGED(resource_state_change->role) ||
HAS_CHANGED(resource_state_change->susp) ||
HAS_CHANGED(resource_state_change->susp_nod) ||
HAS_CHANGED(resource_state_change->susp_fen);
if (resource_state_has_changed)
REMEMBER_STATE_CHANGE(notify_resource_state_change,
resource_state_change, NOTIFY_CHANGE);
for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) {
struct drbd_connection_state_change *connection_state_change =
&state_change->connections[n_connection];
if (HAS_CHANGED(connection_state_change->peer_role) ||
HAS_CHANGED(connection_state_change->cstate))
REMEMBER_STATE_CHANGE(notify_connection_state_change,
connection_state_change, NOTIFY_CHANGE);
}
for (n_device = 0; n_device < state_change->n_devices; n_device++) {
struct drbd_device_state_change *device_state_change =
&state_change->devices[n_device];
if (HAS_CHANGED(device_state_change->disk_state))
REMEMBER_STATE_CHANGE(notify_device_state_change,
device_state_change, NOTIFY_CHANGE);
}
n_peer_devices = state_change->n_devices * state_change->n_connections;
for (n_peer_device = 0; n_peer_device < n_peer_devices; n_peer_device++) {
struct drbd_peer_device_state_change *p =
&state_change->peer_devices[n_peer_device];
if (HAS_CHANGED(p->disk_state) ||
HAS_CHANGED(p->repl_state) ||
HAS_CHANGED(p->resync_susp_user) ||
HAS_CHANGED(p->resync_susp_peer) ||
HAS_CHANGED(p->resync_susp_dependency))
REMEMBER_STATE_CHANGE(notify_peer_device_state_change,
p, NOTIFY_CHANGE);
}
FINAL_STATE_CHANGE(NOTIFY_CHANGE);
mutex_unlock(¬ification_mutex);
#undef HAS_CHANGED
#undef FINAL_STATE_CHANGE
#undef REMEMBER_STATE_CHANGE
}
/* takes old and new peer disk state */
static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns)
{
if ((os >= D_INCONSISTENT && os != D_UNKNOWN && os != D_OUTDATED)
&& (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED))
return true;
/* Scenario, starting with normal operation
* Connected Primary/Secondary UpToDate/UpToDate
* NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
* ...
* Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
*/
if (os == D_UNKNOWN
&& (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED))
return true;
return false;
}
/**
* after_state_ch() - Perform after state change actions that may sleep
* @device: DRBD device.
* @os: old state.
* @ns: new state.
* @flags: Flags
* @state_change: state change to broadcast
*/
static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags,
struct drbd_state_change *state_change)
{
struct drbd_resource *resource = device->resource;
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct sib_info sib;
broadcast_state_change(state_change);
sib.sib_reason = SIB_STATE_CHANGE;
sib.os = os;
sib.ns = ns;
if ((os.disk != D_UP_TO_DATE || os.pdsk != D_UP_TO_DATE)
&& (ns.disk == D_UP_TO_DATE && ns.pdsk == D_UP_TO_DATE)) {
clear_bit(CRASHED_PRIMARY, &device->flags);
if (device->p_uuid)
device->p_uuid[UI_FLAGS] &= ~((u64)2);
}
/* Inform userspace about the change... */
drbd_bcast_event(device, &sib);
if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
drbd_khelper(device, "pri-on-incon-degr");
/* Here we have the actions that are performed after a
state change. This function might sleep */
if (ns.susp_nod) {
enum drbd_req_event what = NOTHING;
spin_lock_irq(&device->resource->req_lock);
if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
what = RESEND;
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
conn_lowest_disk(connection) == D_UP_TO_DATE)
what = RESTART_FROZEN_DISK_IO;
if (resource->susp_nod && what != NOTHING) {
_tl_restart(connection, what);
_conn_request_state(connection,
(union drbd_state) { { .susp_nod = 1 } },
(union drbd_state) { { .susp_nod = 0 } },
CS_VERBOSE);
}
spin_unlock_irq(&device->resource->req_lock);
}
if (ns.susp_fen) {
spin_lock_irq(&device->resource->req_lock);
if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
/* case2: The connection was established again: */
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
rcu_read_unlock();
/* We should actively create a new uuid, _before_
* we resume/resent, if the peer is diskless
* (recovery from a multiple error scenario).
* Currently, this happens with a slight delay
* below when checking lost_contact_to_peer_data() ...
*/
_tl_restart(connection, RESEND);
_conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE);
}
spin_unlock_irq(&device->resource->req_lock);
}
/* Became sync source. With protocol >= 96, we still need to send out
* the sync uuid now. Need to do that before any drbd_send_state, or
* the other side may go "paused sync" before receiving the sync uuids,
* which is unexpected. */
if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
connection->agreed_pro_version >= 96 && get_ldev(device)) {
drbd_gen_and_send_sync_uuid(peer_device);
put_ldev(device);
}
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS &&
ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */
/* we probably will start a resync soon.
* make sure those things are properly reset. */
device->rs_total = 0;
device->rs_failed = 0;
atomic_set(&device->rs_pending_cnt, 0);
drbd_rs_cancel_all(device);
drbd_send_uuids(peer_device);
drbd_send_state(peer_device, ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
* at the time this work was queued. */
if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
device->state.conn == C_WF_BITMAP_S)
drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
"send_bitmap (WFBitMapS)",
BM_LOCKED_TEST_ALLOWED, peer_device);
/* Lost contact to peer's copy of the data */
if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) {
if (get_ldev(device)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
if (drbd_suspended(device)) {
set_bit(NEW_CUR_UUID, &device->flags);
} else {
drbd_uuid_new_current(device);
drbd_send_uuids(peer_device);
}
}
put_ldev(device);
}
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
if (os.peer != R_PRIMARY && ns.peer == R_PRIMARY &&
device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(device);
drbd_send_uuids(peer_device);
}
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
* No harm done if the bitmap still changes,
* redirtied pages will follow later. */
drbd_bitmap_io_from_worker(device, &drbd_bm_write,
"demote diskless peer", BM_LOCKED_SET_ALLOWED, peer_device);
put_ldev(device);
}
/* Write out all changed bits on demote.
* Though, no need to da that just yet
* if there is a resync going on still */
if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
device->state.conn <= C_CONNECTED && get_ldev(device)) {
/* No changes to the bitmap expected this time, so assert that,
* even though no harm was done if it did change. */
drbd_bitmap_io_from_worker(device, &drbd_bm_write,
"demote", BM_LOCKED_TEST_ALLOWED, peer_device);
put_ldev(device);
}
/* Last part of the attaching process ... */
if (ns.conn >= C_CONNECTED &&
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
drbd_send_sizes(peer_device, 0, 0); /* to start sync... */
drbd_send_uuids(peer_device);
drbd_send_state(peer_device, ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
drbd_send_state(peer_device, ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
(ns.aftr_isp || ns.peer_isp || ns.user_isp))
suspend_other_sg(device);
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
drbd_send_state(peer_device, ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
drbd_send_state(peer_device, ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
/* no other bitmap changes expected during this phase */
drbd_queue_bitmap_io(device,
&drbd_bmio_set_n_write, &abw_start_sync,
"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED,
peer_device);
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
enum drbd_io_error_p eh = EP_PASS_ON;
int was_io_error = 0;
/* corresponding get_ldev was in _drbd_set_state, to serialize
* our cleanup here with the transition to D_DISKLESS.
* But is is still not save to dreference ldev here, since
* we might come from an failed Attach before ldev was set. */
if (device->ldev) {
rcu_read_lock();
eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
rcu_read_unlock();
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
/* Intentionally call this handler first, before drbd_send_state().
* See: 2932204 drbd: call local-io-error handler early
* People may chose to hard-reset the box from this handler.
* It is useful if this looks like a "regular node crash". */
if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(device, "local-io-error");
/* Immediately allow completion of all application IO,
* that waits for completion from the local disk,
* if this was a force-detach due to disk_timeout
* or administrator request (drbdsetup detach --force).
* Do NOT abort otherwise.
* Aborting local requests may cause serious problems,
* if requests are completed to upper layers already,
* and then later the already submitted local bio completes.
* This can cause DMA into former bio pages that meanwhile
* have been re-used for other things.
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
if (test_and_clear_bit(FORCE_DETACH, &device->flags))
tl_abort_disk_io(device);
/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
* and that may only happen after our put_ldev below. */
if (device->state.disk != D_FAILED)
drbd_err(device,
"ASSERT FAILED: disk is %s during detach\n",
drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED)
drbd_send_state(peer_device, ns);
drbd_rs_cancel_all(device);
/* In case we want to get something to stable storage still,
* this may be the last chance.
* Following put_ldev may transition to D_DISKLESS. */
drbd_md_sync(device);
}
put_ldev(device);
}
/* second half of local IO error, failure to attach,
* or administrative detach,
* after local_cnt references have reached zero again */
if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
/* We must still be diskless,
* re-attach has to be serialized with this! */
if (device->state.disk != D_DISKLESS)
drbd_err(device,
"ASSERT FAILED: disk is %s while going diskless\n",
drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED)
drbd_send_state(peer_device, ns);
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
put_ldev(device);
}
/* Notify peer that I had a local IO error, and did not detached.. */
if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
drbd_send_state(peer_device, ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
if (ns.conn == C_CONNECTED)
resync_after_online_grow(device);
}
/* A resync finished or aborted, wake paused devices... */
if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
(os.peer_isp && !ns.peer_isp) ||
(os.user_isp && !ns.user_isp))
resume_next_sg(device);
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
drbd_send_state(peer_device, ns);
/* Verify finished, or reached stop sector. Peer did not know about
* the stop sector, and we may even have changed the stop sector during
* verify to interrupt/stop early. Send the new state. */
if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
&& verify_can_do_stop_sector(device))
drbd_send_state(peer_device, ns);
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
* failure, or on transition from resync back to AHEAD/BEHIND.
*
* Connection loss is handled in drbd_disconnected() by the receiver.
*
* For resync aborted because of local disk failure, we cannot do
* any bitmap writeout anymore.
*
* No harm done if some bits change during this phase.
*/
if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) &&
(ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) {
drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED,
peer_device);
put_ldev(device);
}
if (ns.disk == D_DISKLESS &&
ns.conn == C_STANDALONE &&
ns.role == R_SECONDARY) {
if (os.aftr_isp != ns.aftr_isp)
resume_next_sg(device);
}
drbd_md_sync(device);
}
struct after_conn_state_chg_work {
struct drbd_work w;
enum drbd_conns oc;
union drbd_state ns_min;
union drbd_state ns_max; /* new, max state, over all devices */
enum chg_state_flags flags;
struct drbd_connection *connection;
struct drbd_state_change *state_change;
};
static int w_after_conn_state_ch(struct drbd_work *w, int unused)
{
struct after_conn_state_chg_work *acscw =
container_of(w, struct after_conn_state_chg_work, w);
struct drbd_connection *connection = acscw->connection;
enum drbd_conns oc = acscw->oc;
union drbd_state ns_max = acscw->ns_max;
struct drbd_peer_device *peer_device;
int vnr;
broadcast_state_change(acscw->state_change);
forget_state_change(acscw->state_change);
kfree(acscw);
/* Upon network configuration, we need to start the receiver */
if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
drbd_thread_start(&connection->receiver);
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
struct net_conf *old_conf;
mutex_lock(¬ification_mutex);
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
notify_peer_device_state(NULL, 0, peer_device, NULL,
NOTIFY_DESTROY | NOTIFY_CONTINUES);
notify_connection_state(NULL, 0, connection, NULL, NOTIFY_DESTROY);
mutex_unlock(¬ification_mutex);
mutex_lock(&connection->resource->conf_update);
old_conf = connection->net_conf;
connection->my_addr_len = 0;
connection->peer_addr_len = 0;
RCU_INIT_POINTER(connection->net_conf, NULL);
conn_free_crypto(connection);
mutex_unlock(&connection->resource->conf_update);
kvfree_rcu_mightsleep(old_conf);
}
if (ns_max.susp_fen) {
/* case1: The outdate peer handler is successful: */
if (ns_max.pdsk <= D_OUTDATED) {
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (test_bit(NEW_CUR_UUID, &device->flags)) {
drbd_uuid_new_current(device);
clear_bit(NEW_CUR_UUID, &device->flags);
}
}
rcu_read_unlock();
spin_lock_irq(&connection->resource->req_lock);
_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
_conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE);
spin_unlock_irq(&connection->resource->req_lock);
}
}
conn_md_sync(connection);
kref_put(&connection->kref, drbd_destroy_connection);
return 0;
}
static void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
{
enum chg_state_flags flags = ~0;
struct drbd_peer_device *peer_device;
int vnr, first_vol = 1;
union drbd_dev_state os, cs = {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = connection->cstate,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
} };
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
os = device->state;
if (first_vol) {
cs = os;
first_vol = 0;
continue;
}
if (cs.role != os.role)
flags &= ~CS_DC_ROLE;
if (cs.peer != os.peer)
flags &= ~CS_DC_PEER;
if (cs.conn != os.conn)
flags &= ~CS_DC_CONN;
if (cs.disk != os.disk)
flags &= ~CS_DC_DISK;
if (cs.pdsk != os.pdsk)
flags &= ~CS_DC_PDSK;
}
rcu_read_unlock();
*pf |= CS_DC_MASK;
*pf &= flags;
(*pcs).i = cs.i;
}
static enum drbd_state_rv
conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv = SS_SUCCESS;
union drbd_state ns, os;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
os = drbd_read_state(device);
ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
if (ns.i == os.i)
continue;
rv = is_valid_transition(os, ns);
if (rv >= SS_SUCCESS && !(flags & CS_HARD)) {
rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) {
if (is_valid_state(device, os) == rv)
rv = is_valid_soft_transition(os, ns, connection);
} else
rv = is_valid_soft_transition(os, ns, connection);
}
if (rv < SS_SUCCESS) {
if (flags & CS_VERBOSE)
print_st_err(device, os, ns, rv);
break;
}
}
rcu_read_unlock();
return rv;
}
static void
conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
{
union drbd_state ns, os, ns_max = { };
union drbd_state ns_min = {
{ .role = R_MASK,
.peer = R_MASK,
.conn = val.conn,
.disk = D_MASK,
.pdsk = D_MASK
} };
struct drbd_peer_device *peer_device;
enum drbd_state_rv rv;
int vnr, number_of_volumes = 0;
if (mask.conn == C_MASK) {
/* remember last connect time so request_timer_fn() won't
* kill newly established sessions while we are still trying to thaw
* previously frozen IO */
if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
connection->last_reconnect_jif = jiffies;
connection->cstate = val.conn;
}
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
number_of_volumes++;
os = drbd_read_state(device);
ns = apply_mask_val(os, mask, val);
ns = sanitize_state(device, os, ns, NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
rv = _drbd_set_state(device, ns, flags, NULL);
BUG_ON(rv < SS_SUCCESS);
ns.i = device->state.i;
ns_max.role = max_role(ns.role, ns_max.role);
ns_max.peer = max_role(ns.peer, ns_max.peer);
ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
ns_min.role = min_role(ns.role, ns_min.role);
ns_min.peer = min_role(ns.peer, ns_min.peer);
ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
}
rcu_read_unlock();
if (number_of_volumes == 0) {
ns_min = ns_max = (union drbd_state) { {
.role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = val.conn,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN
} };
}
ns_min.susp = ns_max.susp = connection->resource->susp;
ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod;
ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen;
*pns_min = ns_min;
*pns_max = ns_max;
}
static enum drbd_state_rv
_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */;
if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
rv = SS_CW_SUCCESS;
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
rv = SS_CW_FAILED_BY_PEER;
err = conn_is_valid_transition(connection, mask, val, 0);
if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
return rv;
return err;
}
enum drbd_state_rv
_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv = SS_SUCCESS;
struct after_conn_state_chg_work *acscw;
enum drbd_conns oc = connection->cstate;
union drbd_state ns_max, ns_min, os;
bool have_mutex = false;
struct drbd_state_change *state_change;
if (mask.conn) {
rv = is_valid_conn_transition(oc, val.conn);
if (rv < SS_SUCCESS)
goto abort;
}
rv = conn_is_valid_transition(connection, mask, val, flags);
if (rv < SS_SUCCESS)
goto abort;
if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
!(flags & (CS_LOCAL_ONLY | CS_HARD))) {
/* This will be a cluster-wide state change.
* Need to give up the spinlock, grab the mutex,
* then send the state change request, ... */
spin_unlock_irq(&connection->resource->req_lock);
mutex_lock(&connection->cstate_mutex);
have_mutex = true;
set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
if (conn_send_state_req(connection, mask, val)) {
/* sending failed. */
clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
rv = SS_CW_FAILED_BY_PEER;
/* need to re-aquire the spin lock, though */
goto abort_unlocked;
}
if (val.conn == C_DISCONNECTING)
set_bit(DISCONNECT_SENT, &connection->flags);
/* ... and re-aquire the spinlock.
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call
* conn_set_state() within the same spinlock. */
spin_lock_irq(&connection->resource->req_lock);
wait_event_lock_irq(connection->ping_wait,
(rv = _conn_rq_cond(connection, mask, val)),
connection->resource->req_lock);
clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
if (rv < SS_SUCCESS)
goto abort;
}
state_change = remember_old_state(connection->resource, GFP_ATOMIC);
conn_old_common_state(connection, &os, &flags);
flags |= CS_DC_SUSP;
conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
conn_pr_state_change(connection, os, ns_max, flags);
remember_new_state(state_change);
acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
if (acscw) {
acscw->oc = os.conn;
acscw->ns_min = ns_min;
acscw->ns_max = ns_max;
acscw->flags = flags;
acscw->w.cb = w_after_conn_state_ch;
kref_get(&connection->kref);
acscw->connection = connection;
acscw->state_change = state_change;
drbd_queue_work(&connection->sender_work, &acscw->w);
} else {
drbd_err(connection, "Could not kmalloc an acscw\n");
}
abort:
if (have_mutex) {
/* mutex_unlock() "... must not be used in interrupt context.",
* so give up the spinlock, then re-aquire it */
spin_unlock_irq(&connection->resource->req_lock);
abort_unlocked:
mutex_unlock(&connection->cstate_mutex);
spin_lock_irq(&connection->resource->req_lock);
}
if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
drbd_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
}
return rv;
}
enum drbd_state_rv
conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv;
spin_lock_irq(&connection->resource->req_lock);
rv = _conn_request_state(connection, mask, val, flags);
spin_unlock_irq(&connection->resource->req_lock);
return rv;
}
| linux-master | drivers/block/drbd/drbd_state.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
from Logicworks, Inc. for making SDP replication support possible.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/drbd.h>
#include <linux/uaccess.h>
#include <asm/types.h>
#include <net/sock.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/drbd_limits.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
#include "drbd_vli.h"
#include "drbd_debugfs.h"
static DEFINE_MUTEX(drbd_main_mutex);
static int drbd_open(struct gendisk *disk, blk_mode_t mode);
static void drbd_release(struct gendisk *gd);
static void md_sync_timer_fn(struct timer_list *t);
static int w_bitmap_io(struct drbd_work *w, int unused);
MODULE_AUTHOR("Philipp Reisner <[email protected]>, "
"Lars Ellenberg <[email protected]>");
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
MODULE_VERSION(REL_VERSION);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
__stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
#include <linux/moduleparam.h>
/* thanks to these macros, if compiled into the kernel (not-module),
* these become boot parameters (e.g., drbd.minor_count) */
#ifdef CONFIG_DRBD_FAULT_INJECTION
int drbd_enable_faults;
int drbd_fault_rate;
static int drbd_fault_count;
static int drbd_fault_devs;
/* bitmap of enabled faults */
module_param_named(enable_faults, drbd_enable_faults, int, 0664);
/* fault rate % value - applies to all enabled faults */
module_param_named(fault_rate, drbd_fault_rate, int, 0664);
/* count of faults inserted */
module_param_named(fault_count, drbd_fault_count, int, 0664);
/* bitmap of devices to insert faults on */
module_param_named(fault_devs, drbd_fault_devs, int, 0644);
#endif
/* module parameters we can keep static */
static bool drbd_allow_oos; /* allow_open_on_secondary */
static bool drbd_disable_sendpage;
MODULE_PARM_DESC(allow_oos, "DONT USE!");
module_param_named(allow_oos, drbd_allow_oos, bool, 0);
module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
/* module parameters we share */
int drbd_proc_details; /* Detail level in proc drbd*/
module_param_named(proc_details, drbd_proc_details, int, 0644);
/* module parameters shared with defaults */
unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
/* Module parameter for setting the user mode helper program
* to run. Default is /sbin/drbdadm */
char drbd_usermode_helper[80] = "/sbin/drbdadm";
module_param_named(minor_count, drbd_minor_count, uint, 0444);
module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
struct idr drbd_devices;
struct list_head drbd_resources;
struct mutex resources_mutex;
struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */
struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
2) I want to be able to interrupt sleeping allocation with a signal.
Note: This is a single linked list, the next pointer is the private
member of struct page.
*/
struct page *drbd_pp_pool;
DEFINE_SPINLOCK(drbd_pp_lock);
int drbd_pp_vacant;
wait_queue_head_t drbd_pp_wait;
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
.owner = THIS_MODULE,
.submit_bio = drbd_submit_bio,
.open = drbd_open,
.release = drbd_release,
};
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
*/
int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
atomic_inc(&device->local_cnt);
io_allowed = (device->state.disk >= mins);
if (!io_allowed) {
if (atomic_dec_and_test(&device->local_cnt))
wake_up(&device->misc_wait);
}
return io_allowed;
}
#endif
/**
* tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
* @connection: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
* In case the passed barrier_nr or set_size does not match the oldest
* epoch of not yet barrier-acked requests, this function will cause a
* termination of the connection.
*/
void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
spin_lock_irq(&connection->resource->req_lock);
/* find oldest not yet barrier-acked write request,
* count writes in its epoch. */
list_for_each_entry(r, &connection->transfer_log, tl_requests) {
const unsigned s = r->rq_state;
if (!req) {
if (!(s & RQ_WRITE))
continue;
if (!(s & RQ_NET_MASK))
continue;
if (s & RQ_NET_DONE)
continue;
req = r;
expect_epoch = req->epoch;
expect_size ++;
} else {
if (r->epoch != expect_epoch)
break;
if (!(s & RQ_WRITE))
continue;
/* if (s & RQ_DONE): not expected */
/* if (!(s & RQ_NET_MASK)): not expected */
expect_size++;
}
}
/* first some paranoia code */
if (req == NULL) {
drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
barrier_nr);
goto bail;
}
if (expect_epoch != barrier_nr) {
drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
barrier_nr, expect_epoch);
goto bail;
}
if (expect_size != set_size) {
drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
barrier_nr, set_size, expect_size);
goto bail;
}
/* Clean up list of requests processed during current epoch. */
/* this extra list walk restart is paranoia,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch) {
tmp = req;
break;
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
struct drbd_peer_device *peer_device;
if (req->epoch != expect_epoch)
break;
peer_device = conn_peer_device(connection, req->device->vnr);
_req_mod(req, BARRIER_ACKED, peer_device);
}
spin_unlock_irq(&connection->resource->req_lock);
return;
bail:
spin_unlock_irq(&connection->resource->req_lock);
conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
/**
* _tl_restart() - Walks the transfer log, and applies an action to all requests
* @connection: DRBD connection to operate on.
* @what: The action/event to perform with all request objects
*
* @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
* RESTART_FROZEN_DISK_IO.
*/
/* must hold resource->req_lock */
void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
struct drbd_peer_device *peer_device;
struct drbd_request *req, *r;
list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
peer_device = conn_peer_device(connection, req->device->vnr);
_req_mod(req, what, peer_device);
}
}
void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
spin_lock_irq(&connection->resource->req_lock);
_tl_restart(connection, what);
spin_unlock_irq(&connection->resource->req_lock);
}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
* @connection: DRBD connection.
*
* This is called after the connection to the peer was lost. The storage covered
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
void tl_clear(struct drbd_connection *connection)
{
tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
}
/**
* tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
* @device: DRBD device.
*/
void tl_abort_disk_io(struct drbd_device *device)
{
struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req, *r;
spin_lock_irq(&connection->resource->req_lock);
list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
if (req->device != device)
continue;
_req_mod(req, ABORT_DISK_IO, NULL);
}
spin_unlock_irq(&connection->resource->req_lock);
}
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
struct drbd_resource *resource = thi->resource;
unsigned long flags;
int retval;
snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
thi->name[0],
resource->name);
allow_kernel_signal(DRBD_SIGKILL);
allow_kernel_signal(SIGXCPU);
restart:
retval = thi->function(thi);
spin_lock_irqsave(&thi->t_lock, flags);
/* if the receiver has been "EXITING", the last thing it did
* was set the conn state to "StandAlone",
* if now a re-connect request comes in, conn state goes C_UNCONNECTED,
* and receiver thread will be "started".
* drbd_thread_start needs to set "RESTARTING" in that case.
* t_state check and assignment needs to be within the same spinlock,
* so either thread_start sees EXITING, and can remap to RESTARTING,
* or thread_start see NONE, and can proceed as normal.
*/
if (thi->t_state == RESTARTING) {
drbd_info(resource, "Restarting %s thread\n", thi->name);
thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
}
thi->task = NULL;
thi->t_state = NONE;
smp_mb();
complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
drbd_info(resource, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
if (thi->connection)
kref_put(&thi->connection->kref, drbd_destroy_connection);
kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return retval;
}
static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
int (*func) (struct drbd_thread *), const char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
thi->t_state = NONE;
thi->function = func;
thi->resource = resource;
thi->connection = NULL;
thi->name = name;
}
int drbd_thread_start(struct drbd_thread *thi)
{
struct drbd_resource *resource = thi->resource;
struct task_struct *nt;
unsigned long flags;
/* is used from state engine doing drbd_thread_stop_nowait,
* while holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
switch (thi->t_state) {
case NONE:
drbd_info(resource, "Starting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
kref_get(&resource->kref);
if (thi->connection)
kref_get(&thi->connection->kref);
init_completion(&thi->stop);
thi->reset_cpu_mask = 1;
thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
"drbd_%c_%s", thi->name[0], thi->resource->name);
if (IS_ERR(nt)) {
drbd_err(resource, "Couldn't start thread\n");
if (thi->connection)
kref_put(&thi->connection->kref, drbd_destroy_connection);
kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return false;
}
spin_lock_irqsave(&thi->t_lock, flags);
thi->task = nt;
thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
wake_up_process(nt);
break;
case EXITING:
thi->t_state = RESTARTING;
drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
fallthrough;
case RUNNING:
case RESTARTING:
default:
spin_unlock_irqrestore(&thi->t_lock, flags);
break;
}
return true;
}
void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
{
unsigned long flags;
enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
/* may be called from state engine, holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
if (thi->t_state == NONE) {
spin_unlock_irqrestore(&thi->t_lock, flags);
if (restart)
drbd_thread_start(thi);
return;
}
if (thi->t_state != ns) {
if (thi->task == NULL) {
spin_unlock_irqrestore(&thi->t_lock, flags);
return;
}
thi->t_state = ns;
smp_mb();
init_completion(&thi->stop);
if (thi->task != current)
send_sig(DRBD_SIGKILL, thi->task, 1);
}
spin_unlock_irqrestore(&thi->t_lock, flags);
if (wait)
wait_for_completion(&thi->stop);
}
int conn_lowest_minor(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr = 0, minor = -1;
rcu_read_lock();
peer_device = idr_get_next(&connection->peer_devices, &vnr);
if (peer_device)
minor = device_to_minor(peer_device->device);
rcu_read_unlock();
return minor;
}
#ifdef CONFIG_SMP
/*
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
*
* Forces all threads of a resource onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
{
unsigned int *resources_per_cpu, min_index = ~0;
resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
GFP_KERNEL);
if (resources_per_cpu) {
struct drbd_resource *resource;
unsigned int cpu, min = ~0;
rcu_read_lock();
for_each_resource_rcu(resource, &drbd_resources) {
for_each_cpu(cpu, resource->cpu_mask)
resources_per_cpu[cpu]++;
}
rcu_read_unlock();
for_each_online_cpu(cpu) {
if (resources_per_cpu[cpu] < min) {
min = resources_per_cpu[cpu];
min_index = cpu;
}
}
kfree(resources_per_cpu);
}
if (min_index == ~0) {
cpumask_setall(*cpu_mask);
return;
}
cpumask_set_cpu(min_index, *cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
* @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
* prematurely.
*/
void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
struct drbd_resource *resource = thi->resource;
struct task_struct *p = current;
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
set_cpus_allowed_ptr(p, resource->cpu_mask);
}
#else
#define drbd_calc_cpu_mask(A) ({})
#endif
/*
* drbd_header_size - size of a packet header
*
* The header size is a multiple of 8, so any payload following the header is
* word aligned on 64-bit architectures. (The bitmap send and receive code
* relies on this.)
*/
unsigned int drbd_header_size(struct drbd_connection *connection)
{
if (connection->agreed_pro_version >= 100) {
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
return sizeof(struct p_header100);
} else {
BUILD_BUG_ON(sizeof(struct p_header80) !=
sizeof(struct p_header95));
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
return sizeof(struct p_header80);
}
}
static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
{
h->magic = cpu_to_be32(DRBD_MAGIC);
h->command = cpu_to_be16(cmd);
h->length = cpu_to_be16(size);
return sizeof(struct p_header80);
}
static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
{
h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
h->command = cpu_to_be16(cmd);
h->length = cpu_to_be32(size);
return sizeof(struct p_header95);
}
static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
int size, int vnr)
{
h->magic = cpu_to_be32(DRBD_MAGIC_100);
h->volume = cpu_to_be16(vnr);
h->command = cpu_to_be16(cmd);
h->length = cpu_to_be32(size);
h->pad = 0;
return sizeof(struct p_header100);
}
static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
void *buffer, enum drbd_packet cmd, int size)
{
if (connection->agreed_pro_version >= 100)
return prepare_header100(buffer, cmd, size, vnr);
else if (connection->agreed_pro_version >= 95 &&
size > DRBD_MAX_SIZE_H80_PACKET)
return prepare_header95(buffer, cmd, size);
else
return prepare_header80(buffer, cmd, size);
}
static void *__conn_prepare_command(struct drbd_connection *connection,
struct drbd_socket *sock)
{
if (!sock->socket)
return NULL;
return sock->sbuf + drbd_header_size(connection);
}
void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
{
void *p;
mutex_lock(&sock->mutex);
p = __conn_prepare_command(connection, sock);
if (!p)
mutex_unlock(&sock->mutex);
return p;
}
void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
{
return conn_prepare_command(peer_device->connection, sock);
}
static int __send_command(struct drbd_connection *connection, int vnr,
struct drbd_socket *sock, enum drbd_packet cmd,
unsigned int header_size, void *data,
unsigned int size)
{
int msg_flags;
int err;
/*
* Called with @data == NULL and the size of the data blocks in @size
* for commands that send data blocks. For those commands, omit the
* MSG_MORE flag: this will increase the likelihood that data blocks
* which are page aligned on the sender will end up page aligned on the
* receiver.
*/
msg_flags = data ? MSG_MORE : 0;
header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
header_size + size);
err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
msg_flags);
if (data && !err)
err = drbd_send_all(connection, sock->socket, data, size, 0);
/* DRBD protocol "pings" are latency critical.
* This is supposed to trigger tcp_push_pending_frames() */
if (!err && (cmd == P_PING || cmd == P_PING_ACK))
tcp_sock_set_nodelay(sock->socket->sk);
return err;
}
static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
return __send_command(connection, 0, sock, cmd, header_size, data, size);
}
int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
err = __conn_send_command(connection, sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
err = __send_command(peer_device->connection, peer_device->device->vnr,
sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
int drbd_send_ping(struct drbd_connection *connection)
{
struct drbd_socket *sock;
sock = &connection->meta;
if (!conn_prepare_command(connection, sock))
return -EIO;
return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
}
int drbd_send_ping_ack(struct drbd_connection *connection)
{
struct drbd_socket *sock;
sock = &connection->meta;
if (!conn_prepare_command(connection, sock))
return -EIO;
return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
}
int drbd_send_sync_param(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_rs_param_95 *p;
int size;
const int apv = peer_device->connection->agreed_pro_version;
enum drbd_packet cmd;
struct net_conf *nc;
struct disk_conf *dc;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
+ strlen(nc->verify_alg) + 1
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
/* initialize verify_alg and csums_alg */
BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
memset(&p->algs, 0, sizeof(p->algs));
if (get_ldev(peer_device->device)) {
dc = rcu_dereference(peer_device->device->ldev->disk_conf);
p->resync_rate = cpu_to_be32(dc->resync_rate);
p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
p->c_delay_target = cpu_to_be32(dc->c_delay_target);
p->c_fill_target = cpu_to_be32(dc->c_fill_target);
p->c_max_rate = cpu_to_be32(dc->c_max_rate);
put_ldev(peer_device->device);
} else {
p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
}
if (apv >= 88)
strcpy(p->verify_alg, nc->verify_alg);
if (apv >= 89)
strcpy(p->csums_alg, nc->csums_alg);
rcu_read_unlock();
return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
}
int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
{
struct drbd_socket *sock;
struct p_protocol *p;
struct net_conf *nc;
int size, cf;
sock = &connection->data;
p = __conn_prepare_command(connection, sock);
if (!p)
return -EIO;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
size = sizeof(*p);
if (connection->agreed_pro_version >= 87)
size += strlen(nc->integrity_alg) + 1;
p->protocol = cpu_to_be32(nc->wire_protocol);
p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
p->two_primaries = cpu_to_be32(nc->two_primaries);
cf = 0;
if (nc->discard_my_data)
cf |= CF_DISCARD_MY_DATA;
if (nc->tentative)
cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
if (connection->agreed_pro_version >= 87)
strcpy(p->integrity_alg, nc->integrity_alg);
rcu_read_unlock();
return __conn_send_command(connection, sock, cmd, size, NULL, 0);
}
int drbd_send_protocol(struct drbd_connection *connection)
{
int err;
mutex_lock(&connection->data.mutex);
err = __drbd_send_protocol(connection, P_PROTOCOL);
mutex_unlock(&connection->data.mutex);
return err;
}
static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
{
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_uuids *p;
int i;
if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p) {
put_ldev(device);
return -EIO;
}
spin_lock_irq(&device->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
spin_unlock_irq(&device->ldev->md.uuid_lock);
device->comm_bm_set = drbd_bm_total_weight(device);
p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
rcu_read_lock();
uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
rcu_read_unlock();
uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
put_ldev(device);
return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
int drbd_send_uuids(struct drbd_peer_device *peer_device)
{
return _drbd_send_uuids(peer_device, 0);
}
int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
{
return _drbd_send_uuids(peer_device, 8);
}
void drbd_print_uuids(struct drbd_device *device, const char *text)
{
if (get_ldev_if_state(device, D_NEGOTIATING)) {
u64 *uuid = device->ldev->md.uuid;
drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
(unsigned long long)uuid[UI_HISTORY_START],
(unsigned long long)uuid[UI_HISTORY_END]);
put_ldev(device);
} else {
drbd_info(device, "%s effective data uuid: %016llX\n",
text,
(unsigned long long)device->ed_uuid);
}
}
void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_rs_uuid *p;
u64 uuid;
D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
uuid = device->ldev->md.uuid[UI_BITMAP];
if (uuid && uuid != UUID_JUST_CREATED)
uuid = uuid + UUID_NEW_BM_OFFSET;
else
get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(device, UI_BITMAP, uuid);
drbd_print_uuids(device, "updated sync UUID");
drbd_md_sync(device);
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (p) {
p->uuid = cpu_to_be64(uuid);
drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
}
}
int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
unsigned int packet_size;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
packet_size = sizeof(*p);
if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
packet_size += sizeof(p->qlim[0]);
memset(p, 0, packet_size);
if (get_ldev_if_state(device, D_NEGOTIATING)) {
struct block_device *bdev = device->ldev->backing_bdev;
struct request_queue *q = bdev_get_queue(bdev);
d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
q_order_type = drbd_queue_order_type(device);
max_bio_size = queue_max_hw_sectors(q) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
p->qlim->physical_block_size =
cpu_to_be32(bdev_physical_block_size(bdev));
p->qlim->logical_block_size =
cpu_to_be32(bdev_logical_block_size(bdev));
p->qlim->alignment_offset =
cpu_to_be32(bdev_alignment_offset(bdev));
p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev));
p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev));
p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev);
put_ldev(device);
} else {
struct request_queue *q = device->rq_queue;
p->qlim->physical_block_size =
cpu_to_be32(queue_physical_block_size(q));
p->qlim->logical_block_size =
cpu_to_be32(queue_logical_block_size(q));
p->qlim->alignment_offset = 0;
p->qlim->io_min = cpu_to_be32(queue_io_min(q));
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
p->qlim->discard_enabled = 0;
d_size = 0;
u_size = 0;
q_order_type = QUEUE_ORDERED_NONE;
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
if (peer_device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
else if (peer_device->connection->agreed_pro_version < 100)
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
p->d_size = cpu_to_be64(d_size);
p->u_size = cpu_to_be64(u_size);
if (trigger_reply)
p->c_size = 0;
else
p->c_size = cpu_to_be64(get_capacity(device->vdisk));
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
}
/**
* drbd_send_current_state() - Sends the drbd state to the peer
* @peer_device: DRBD peer device.
*/
int drbd_send_current_state(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_state *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
* @peer_device: DRBD peer device.
* @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
* send the resulting new state to the peer. If more state changes happen
* between queuing and processing of the after_state_ch work, we still
* want to send each intermediary state in the order it occurred.
*/
int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
{
struct drbd_socket *sock;
struct p_state *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->state = cpu_to_be32(state.i); /* Within the send mutex */
return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
{
struct drbd_socket *sock;
struct p_req_state *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
}
int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_packet cmd;
struct drbd_socket *sock;
struct p_req_state *p;
cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
sock = &connection->data;
p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
sock = &peer_device->connection->meta;
p = drbd_prepare_command(peer_device, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
}
}
void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
sock = &connection->meta;
p = conn_prepare_command(connection, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
}
static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
{
BUG_ON(code & ~0xf);
p->encoding = (p->encoding & ~0xf) | code;
}
static void dcbp_set_start(struct p_compressed_bm *p, int set)
{
p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
}
static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
{
BUG_ON(n & ~0x7);
p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
static int fill_bitmap_rle_bits(struct drbd_device *device,
struct p_compressed_bm *p,
unsigned int size,
struct bm_xfer_ctx *c)
{
struct bitstream bs;
unsigned long plain_bits;
unsigned long tmp;
unsigned long rl;
unsigned len;
unsigned toggle;
int bits, use_rle;
/* may we use this feature? */
rcu_read_lock();
use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
rcu_read_unlock();
if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
return 0;
if (c->bit_offset >= c->bm_bits)
return 0; /* nothing to do. */
/* use at most thus many bytes */
bitstream_init(&bs, p->code, size, 0);
memset(p->code, 0, size);
/* plain bits covered in this code string */
plain_bits = 0;
/* p->encoding & 0x80 stores whether the first run length is set.
* bit offset is implicit.
* start with toggle == 2 to be able to tell the first iteration */
toggle = 2;
/* see how much plain bits we can stuff into one packet
* using RLE and VLI. */
do {
tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
: _drbd_bm_find_next(device, c->bit_offset);
if (tmp == -1UL)
tmp = c->bm_bits;
rl = tmp - c->bit_offset;
if (toggle == 2) { /* first iteration */
if (rl == 0) {
/* the first checked bit was set,
* store start value, */
dcbp_set_start(p, 1);
/* but skip encoding of zero run length */
toggle = !toggle;
continue;
}
dcbp_set_start(p, 0);
}
/* paranoia: catch zero runlength.
* can only happen if bitmap is modified while we scan it. */
if (rl == 0) {
drbd_err(device, "unexpected zero runlength while encoding bitmap "
"t:%u bo:%lu\n", toggle, c->bit_offset);
return -1;
}
bits = vli_encode_bits(&bs, rl);
if (bits == -ENOBUFS) /* buffer full */
break;
if (bits <= 0) {
drbd_err(device, "error while encoding bitmap: %d\n", bits);
return 0;
}
toggle = !toggle;
plain_bits += rl;
c->bit_offset = tmp;
} while (c->bit_offset < c->bm_bits);
len = bs.cur.b - p->code + !!bs.cur.bit;
if (plain_bits < (len << 3)) {
/* incompressible with this method.
* we need to rewind both word and bit position. */
c->bit_offset -= plain_bits;
bm_xfer_ctx_bit_to_word_offset(c);
c->bit_offset = c->word_offset * BITS_PER_LONG;
return 0;
}
/* RLE + VLI was able to compress it just fine.
* update c->word_offset. */
bm_xfer_ctx_bit_to_word_offset(c);
/* store pad_bits */
dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
return len;
}
/*
* send_bitmap_rle_or_plain
*
* Return 0 when done, 1 when another iteration is needed, and a negative error
* code upon failure.
*/
static int
send_bitmap_rle_or_plain(struct drbd_peer_device *peer_device, struct bm_xfer_ctx *c)
{
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock = &peer_device->connection->data;
unsigned int header_size = drbd_header_size(peer_device->connection);
struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err;
len = fill_bitmap_rle_bits(device, p,
DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
dcbp_set_code(p, RLE_VLI_Bits);
err = __send_command(peer_device->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0);
c->packets[0]++;
c->bytes[0] += header_size + sizeof(*p) + len;
if (c->bit_offset >= c->bm_bits)
len = 0; /* DONE */
} else {
/* was not compressible.
* send a buffer full of plain text bits instead. */
unsigned int data_size;
unsigned long num_words;
unsigned long *p = sock->sbuf + header_size;
data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
num_words = min_t(size_t, data_size / sizeof(*p),
c->bm_words - c->word_offset);
len = num_words * sizeof(*p);
if (len)
drbd_bm_get_lel(device, c->word_offset, num_words, p);
err = __send_command(peer_device->connection, device->vnr, sock, P_BITMAP,
len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
c->packets[1]++;
c->bytes[1] += header_size + len;
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
}
if (!err) {
if (len == 0) {
INFO_bm_xfer_stats(peer_device, "send", c);
return 0;
} else
return 1;
}
return -EIO;
}
/* See the comment at receive_bitmap() */
static int _drbd_send_bitmap(struct drbd_device *device,
struct drbd_peer_device *peer_device)
{
struct bm_xfer_ctx c;
int err;
if (!expect(device, device->bitmap))
return false;
if (get_ldev(device)) {
if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
drbd_bm_set_all(device);
if (drbd_bm_write(device, peer_device)) {
/* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
drbd_err(device, "Failed to write bitmap to disk!\n");
} else {
drbd_md_clear_flag(device, MDF_FULL_SYNC);
drbd_md_sync(device);
}
}
put_ldev(device);
}
c = (struct bm_xfer_ctx) {
.bm_bits = drbd_bm_bits(device),
.bm_words = drbd_bm_words(device),
};
do {
err = send_bitmap_rle_or_plain(peer_device, &c);
} while (err > 0);
return err == 0;
}
int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock = &peer_device->connection->data;
int err = -1;
mutex_lock(&sock->mutex);
if (sock->socket)
err = !_drbd_send_bitmap(device, peer_device);
mutex_unlock(&sock->mutex);
return err;
}
void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
{
struct drbd_socket *sock;
struct p_barrier_ack *p;
if (connection->cstate < C_WF_REPORT_PARAMS)
return;
sock = &connection->meta;
p = conn_prepare_command(connection, sock);
if (!p)
return;
p->barrier = barrier_nr;
p->set_size = cpu_to_be32(set_size);
conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
* _drbd_send_ack() - Sends an ack packet
* @peer_device: DRBD peer device.
* @cmd: Packet command code.
* @sector: sector, needs to be in big endian byte order
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
u64 sector, u32 blksize, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_ack *p;
if (peer_device->device->state.conn < C_CONNECTED)
return -EIO;
sock = &peer_device->connection->meta;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = sector;
p->block_id = block_id;
p->blksize = blksize;
p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_data *dp, int data_size)
{
if (peer_device->connection->peer_integrity_tfm)
data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_block_req *rp)
{
_drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
* @peer_device: DRBD peer device
* @cmd: packet command code
* @peer_req: peer request
*/
int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(peer_req->i.sector),
cpu_to_be32(peer_req->i.size),
peer_req->block_id);
}
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(sector),
cpu_to_be32(blksize),
cpu_to_be64(block_id));
}
int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
struct drbd_socket *sock;
struct p_block_desc *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(peer_req->i.sector);
p->blksize = cpu_to_be32(peer_req->i.size);
p->pad = 0;
return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
}
int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
sector_t sector, int size, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_req *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = block_id;
p->blksize = cpu_to_be32(size);
return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
void *digest, int digest_size, enum drbd_packet cmd)
{
struct drbd_socket *sock;
struct p_block_req *p;
/* FIXME: Put the digest into the preallocated socket buffer. */
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
}
int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
struct drbd_socket *sock;
struct p_block_req *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
{
int drop_it;
/* long elapsed = (long)(jiffies - device->last_received); */
drop_it = connection->meta.socket == sock
|| !connection->ack_receiver.task
|| get_t_state(&connection->ack_receiver) != RUNNING
|| connection->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
drop_it = !--connection->ko_count;
if (!drop_it) {
drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
current->comm, current->pid, connection->ko_count);
request_ping(connection);
}
return drop_it; /* && (device->state == R_PRIMARY) */;
}
static void drbd_update_congested(struct drbd_connection *connection)
{
struct sock *sk = connection->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
set_bit(NET_CONGESTED, &connection->flags);
}
/* The idea of sendpage seems to be to put some kind of reference
* to the page into the skb, and to hand it over to the NIC. In
* this process get_page() gets called.
*
* As soon as the page was really sent over the network put_page()
* gets called by some part of the network layer. [ NIC driver? ]
*
* [ get_page() / put_page() increment/decrement the count. If count
* reaches 0 the page will be freed. ]
*
* This works nicely with pages from FSs.
* But this means that in protocol A we might signal IO completion too early!
*
* In order not to corrupt data during a resync we must make sure
* that we do not reuse our own buffer pages (EEs) to early, therefore
* we have the net_ee list.
*
* XFS seems to have problems, still, it submits pages with page_count == 0!
* As a workaround, we disable sendpage on pages
* with page_count == 0 or PageSlab.
*/
static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket;
void *addr;
int err;
socket = peer_device->connection->data.socket;
addr = kmap(page) + offset;
err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
kunmap(page);
if (!err)
peer_device->device->send_cnt += size >> 9;
return err;
}
static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket = peer_device->connection->data.socket;
struct msghdr msg = { .msg_flags = msg_flags, };
struct bio_vec bvec;
int len = size;
int err = -EIO;
/* e.g. XFS meta- & log-data is in slab pages, which have a
* page_count of 0 and/or have PageSlab() set.
* we cannot use send_page for those, as that does get_page();
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
if (!drbd_disable_sendpage && sendpage_ok(page))
msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES;
drbd_update_congested(peer_device->connection);
do {
int sent;
bvec_set_page(&bvec, page, len, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
sent = sock_sendmsg(socket, &msg);
if (sent <= 0) {
if (sent == -EAGAIN) {
if (we_should_drop_the_connection(peer_device->connection, socket))
break;
continue;
}
drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
if (sent < 0)
err = sent;
break;
}
len -= sent;
offset += sent;
} while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
clear_bit(NET_CONGESTED, &peer_device->connection->flags);
if (len == 0) {
err = 0;
peer_device->device->send_cnt += size >> 9;
}
return err;
}
static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, iter) {
int err;
err = _drbd_no_send_page(peer_device, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bvec, iter)
? 0 : MSG_MORE);
if (err)
return err;
}
return 0;
}
static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, iter) {
int err;
err = _drbd_send_page(peer_device, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
}
return 0;
}
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
/* hint all but last page with MSG_MORE */
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
err = _drbd_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
if (err)
return err;
len -= l;
}
return 0;
}
static u32 bio_flags_to_wire(struct drbd_connection *connection,
struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
(bio_op(bio) == REQ_OP_WRITE_ZEROES ?
((connection->agreed_features & DRBD_FF_WZEROES) ?
(DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
: DP_DISCARD)
: 0);
else
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_OP_DISCARD requests
* R_PRIMARY -> Peer (P_DATA, P_TRIM)
*/
int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
void *digest_out;
unsigned int dp_flags = 0;
int digest_size;
int err;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
if (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
if (peer_device->connection->agreed_pro_version >= 100) {
if (req->rq_state & RQ_EXP_RECEIVE_ACK)
dp_flags |= DP_SEND_RECEIVE_ACK;
/* During resync, request an explicit write ack,
* even in protocol != C */
if (req->rq_state & RQ_EXP_WRITE_ACK
|| (dp_flags & DP_MAY_SET_IN_SYNC))
dp_flags |= DP_SEND_WRITE_ACK;
}
p->dp_flags = cpu_to_be32(dp_flags);
if (dp_flags & (DP_DISCARD|DP_ZEROES)) {
enum drbd_packet cmd = (dp_flags & DP_ZEROES) ? P_ZEROES : P_TRIM;
struct p_trim *t = (struct p_trim*)p;
t->size = cpu_to_be32(req->i.size);
err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
goto out;
}
digest_out = p + 1;
/* our digest is still only over the payload.
* TRIM does not carry any payload. */
if (digest_size)
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
err = __send_command(peer_device->connection, device->vnr, sock, P_DATA,
sizeof(*p) + digest_size, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
* as soon as we handed it over to tcp, at which point the data
* pages may become invalid.
*
* For data-integrity enabled, we copy it as well, so we can be
* sure that even if the bio pages may still be modified, it
* won't change the data on the wire, thus if the digest checks
* out ok after sending on this side, but does not fit on the
* receiving side, we sure have detected corruption elsewhere.
*/
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
err = _drbd_send_bio(peer_device, req->master_bio);
else
err = _drbd_send_zc_bio(peer_device, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (digest_size > 0 && digest_size <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
if (memcmp(p + 1, digest, digest_size)) {
drbd_warn(device,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
(unsigned long long)req->i.sector, req->i.size);
}
} /* else if (digest_size > 64) {
... Be noisy about digest too large ...
} */
}
out:
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
return err;
}
/* answer packet, used to send data back for read requests:
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
int err;
int digest_size;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(peer_req->i.sector);
p->block_id = peer_req->block_id;
p->seq_num = 0; /* unused */
p->dp_flags = 0;
if (digest_size)
drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
if (!err)
err = _drbd_send_zc_ee(peer_device, peer_req);
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
return err;
}
int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_socket *sock;
struct p_block_desc *p;
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->blksize = cpu_to_be32(req->i.size);
return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
drbd_send distinguishes two cases:
Packets sent via the data socket "sock"
and packets sent via the meta data socket "msock"
sock msock
-----------------+-------------------------+------------------------------
timeout conf.timeout / 2 conf.timeout / 2
timeout action send a ping via msock Abort communication
and close all sockets
*/
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov = {.iov_base = buf, .iov_len = size};
struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
int rv, sent = 0;
if (!sock)
return -EBADR;
/* THINK if (signal_pending) return ... ? */
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
if (sock == connection->data.socket) {
rcu_read_lock();
connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
rcu_read_unlock();
drbd_update_congested(connection);
}
do {
rv = sock_sendmsg(sock, &msg);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(connection, sock))
break;
else
continue;
}
if (rv == -EINTR) {
flush_signals(current);
rv = 0;
}
if (rv < 0)
break;
sent += rv;
} while (sent < size);
if (sock == connection->data.socket)
clear_bit(NET_CONGESTED, &connection->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
drbd_err(connection, "%s_sendmsg returned %d\n",
sock == connection->meta.socket ? "msock" : "sock",
rv);
conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
}
/*
* drbd_send_all - Send an entire buffer
*
* Returns 0 upon success and a negative error value otherwise.
*/
int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
size_t size, unsigned msg_flags)
{
int err;
err = drbd_send(connection, sock, buffer, size, msg_flags);
if (err < 0)
return err;
if (err != size)
return -EIO;
return 0;
}
static int drbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct drbd_device *device = disk->private_data;
unsigned long flags;
int rv = 0;
mutex_lock(&drbd_main_mutex);
spin_lock_irqsave(&device->resource->req_lock, flags);
/* to have a stable device->state.role
* and no race with updating open_cnt */
if (device->state.role != R_PRIMARY) {
if (mode & BLK_OPEN_WRITE)
rv = -EROFS;
else if (!drbd_allow_oos)
rv = -EMEDIUMTYPE;
}
if (!rv)
device->open_cnt++;
spin_unlock_irqrestore(&device->resource->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
}
static void drbd_release(struct gendisk *gd)
{
struct drbd_device *device = gd->private_data;
mutex_lock(&drbd_main_mutex);
device->open_cnt--;
mutex_unlock(&drbd_main_mutex);
}
/* need to hold resource->req_lock */
void drbd_queue_unplug(struct drbd_device *device)
{
if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
D_ASSERT(device, device->state.role == R_PRIMARY);
if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
drbd_queue_work_if_unqueued(
&first_peer_device(device)->connection->sender_work,
&device->unplug_work);
}
}
}
static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
* between big endian and little endian */
device->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
} };
}
void drbd_init_set_defaults(struct drbd_device *device)
{
/* the memset(,0,) did most of this.
* note: only assignments, no allocation in here */
drbd_set_defaults(device);
atomic_set(&device->ap_bio_cnt, 0);
atomic_set(&device->ap_actlog_cnt, 0);
atomic_set(&device->ap_pending_cnt, 0);
atomic_set(&device->rs_pending_cnt, 0);
atomic_set(&device->unacked_cnt, 0);
atomic_set(&device->local_cnt, 0);
atomic_set(&device->pp_in_use_by_net, 0);
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
atomic_set(&device->ap_in_flight, 0);
atomic_set(&device->md_io.in_use, 0);
mutex_init(&device->own_state_mutex);
device->state_mutex = &device->own_state_mutex;
spin_lock_init(&device->al_lock);
spin_lock_init(&device->peer_seq_lock);
INIT_LIST_HEAD(&device->active_ee);
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
INIT_LIST_HEAD(&device->bm_io_work.w.list);
INIT_LIST_HEAD(&device->pending_master_completion[0]);
INIT_LIST_HEAD(&device->pending_master_completion[1]);
INIT_LIST_HEAD(&device->pending_completion[0]);
INIT_LIST_HEAD(&device->pending_completion[1]);
device->resync_work.cb = w_resync_timer;
device->unplug_work.cb = w_send_write_hint;
device->bm_io_work.w.cb = w_bitmap_io;
timer_setup(&device->resync_timer, resync_timer_fn, 0);
timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
timer_setup(&device->request_timer, request_timer_fn, 0);
init_waitqueue_head(&device->misc_wait);
init_waitqueue_head(&device->state_wait);
init_waitqueue_head(&device->ee_wait);
init_waitqueue_head(&device->al_wait);
init_waitqueue_head(&device->seq_wait);
device->resync_wenr = LC_FREE;
device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
}
void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
{
char ppb[10];
set_capacity_and_notify(device->vdisk, size);
drbd_info(device, "size = %s (%llu KB)\n",
ppsize(ppb, size>>1), (unsigned long long)size>>1);
}
void drbd_device_cleanup(struct drbd_device *device)
{
int i;
if (first_peer_device(device)->connection->receiver.t_state != NONE)
drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
first_peer_device(device)->connection->receiver.t_state);
device->al_writ_cnt =
device->bm_writ_cnt =
device->read_cnt =
device->recv_cnt =
device->send_cnt =
device->writ_cnt =
device->p_size =
device->rs_start =
device->rs_total =
device->rs_failed = 0;
device->rs_last_events = 0;
device->rs_last_sect_ev = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
device->rs_mark_left[i] = 0;
device->rs_mark_time[i] = 0;
}
D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
set_capacity_and_notify(device->vdisk, 0);
if (device->bitmap) {
/* maybe never allocated. */
drbd_bm_resize(device, 0, 1);
drbd_bm_cleanup(device);
}
drbd_backing_dev_free(device, device->ldev);
device->ldev = NULL;
clear_bit(AL_SUSPENDED, &device->flags);
D_ASSERT(device, list_empty(&device->active_ee));
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
D_ASSERT(device, list_empty(&device->unplug_work.list));
drbd_set_defaults(device);
}
static void drbd_destroy_mempools(void)
{
struct page *page;
while (drbd_pp_pool) {
page = drbd_pp_pool;
drbd_pp_pool = (struct page *)page_private(page);
__free_page(page);
drbd_pp_vacant--;
}
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
kmem_cache_destroy(drbd_ee_cache);
kmem_cache_destroy(drbd_request_cache);
kmem_cache_destroy(drbd_bm_ext_cache);
kmem_cache_destroy(drbd_al_ext_cache);
drbd_ee_cache = NULL;
drbd_request_cache = NULL;
drbd_bm_ext_cache = NULL;
drbd_al_ext_cache = NULL;
return;
}
static int drbd_create_mempools(void)
{
struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
int i, ret;
/* caches */
drbd_request_cache = kmem_cache_create(
"drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
if (drbd_request_cache == NULL)
goto Enomem;
drbd_ee_cache = kmem_cache_create(
"drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
if (drbd_ee_cache == NULL)
goto Enomem;
drbd_bm_ext_cache = kmem_cache_create(
"drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
if (drbd_bm_ext_cache == NULL)
goto Enomem;
drbd_al_ext_cache = kmem_cache_create(
"drbd_al", sizeof(struct lc_element), 0, 0, NULL);
if (drbd_al_ext_cache == NULL)
goto Enomem;
/* mempools */
ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
if (ret)
goto Enomem;
ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
BIOSET_NEED_BVECS);
if (ret)
goto Enomem;
ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
if (ret)
goto Enomem;
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
goto Enomem;
ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
if (ret)
goto Enomem;
for (i = 0; i < number; i++) {
page = alloc_page(GFP_HIGHUSER);
if (!page)
goto Enomem;
set_page_private(page, (unsigned long)drbd_pp_pool);
drbd_pp_pool = page;
}
drbd_pp_vacant = number;
return 0;
Enomem:
drbd_destroy_mempools(); /* in case we allocated some */
return -ENOMEM;
}
static void drbd_release_all_peer_reqs(struct drbd_device *device)
{
int rr;
rr = drbd_free_peer_reqs(device, &device->active_ee);
if (rr)
drbd_err(device, "%d EEs in active list found!\n", rr);
rr = drbd_free_peer_reqs(device, &device->sync_ee);
if (rr)
drbd_err(device, "%d EEs in sync list found!\n", rr);
rr = drbd_free_peer_reqs(device, &device->read_ee);
if (rr)
drbd_err(device, "%d EEs in read list found!\n", rr);
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
rr = drbd_free_peer_reqs(device, &device->net_ee);
if (rr)
drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
void drbd_destroy_device(struct kref *kref)
{
struct drbd_device *device = container_of(kref, struct drbd_device, kref);
struct drbd_resource *resource = device->resource;
struct drbd_peer_device *peer_device, *tmp_peer_device;
timer_shutdown_sync(&device->request_timer);
/* paranoia asserts */
D_ASSERT(device, device->open_cnt == 0);
/* end paranoia asserts */
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
drbd_backing_dev_free(device, device->ldev);
device->ldev = NULL;
drbd_release_all_peer_reqs(device);
lc_destroy(device->act_log);
lc_destroy(device->resync);
kfree(device->p_uuid);
/* device->p_uuid = NULL; */
if (device->bitmap) /* should no longer be there. */
drbd_bm_cleanup(device);
__free_page(device->md_io.page);
put_disk(device->vdisk);
kfree(device->rs_plan_s);
/* not for_each_connection(connection, resource):
* those may have been cleaned up and disassociated already.
*/
for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
kref_put(&peer_device->connection->kref, drbd_destroy_connection);
kfree(peer_device);
}
if (device->submit.wq)
destroy_workqueue(device->submit.wq);
kfree(device);
kref_put(&resource->kref, drbd_destroy_resource);
}
/* One global retry thread, if we need to push back some bio and have it
* reinserted through our make request function.
*/
static struct retry_worker {
struct workqueue_struct *wq;
struct work_struct worker;
spinlock_t lock;
struct list_head writes;
} retry;
static void do_retry(struct work_struct *ws)
{
struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
LIST_HEAD(writes);
struct drbd_request *req, *tmp;
spin_lock_irq(&retry->lock);
list_splice_init(&retry->writes, &writes);
spin_unlock_irq(&retry->lock);
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
struct drbd_device *device = req->device;
struct bio *bio = req->master_bio;
bool expected;
expected =
expect(device, atomic_read(&req->completion_ref) == 0) &&
expect(device, req->rq_state & RQ_POSTPONED) &&
expect(device, (req->rq_state & RQ_LOCAL_PENDING) == 0 ||
(req->rq_state & RQ_LOCAL_ABORTED) != 0);
if (!expected)
drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
req, atomic_read(&req->completion_ref),
req->rq_state);
/* We still need to put one kref associated with the
* "completion_ref" going zero in the code path that queued it
* here. The request object may still be referenced by a
* frozen local req->private_bio, in case we force-detached.
*/
kref_put(&req->kref, drbd_req_destroy);
/* A single suspended or otherwise blocking device may stall
* all others as well. Fortunately, this code path is to
* recover from a situation that "should not happen":
* concurrent writes in multi-primary setup.
* In a "normal" lifecycle, this workqueue is supposed to be
* destroyed without ever doing anything.
* If it turns out to be an issue anyways, we can do per
* resource (replication group) or per device (minor) retry
* workqueues instead.
*/
/* We are not just doing submit_bio_noacct(),
* as we want to keep the start_time information. */
inc_ap_bio(device);
__drbd_make_request(device, bio);
}
}
/* called via drbd_req_put_completion_ref(),
* holds resource->req_lock */
void drbd_restart_request(struct drbd_request *req)
{
unsigned long flags;
spin_lock_irqsave(&retry.lock, flags);
list_move_tail(&req->tl_requests, &retry.writes);
spin_unlock_irqrestore(&retry.lock, flags);
/* Drop the extra reference that would otherwise
* have been dropped by complete_master_bio.
* do_retry() needs to grab a new one. */
dec_ap_bio(req->device);
queue_work(retry.wq, &retry.worker);
}
void drbd_destroy_resource(struct kref *kref)
{
struct drbd_resource *resource =
container_of(kref, struct drbd_resource, kref);
idr_destroy(&resource->devices);
free_cpumask_var(resource->cpu_mask);
kfree(resource->name);
kfree(resource);
}
void drbd_free_resource(struct drbd_resource *resource)
{
struct drbd_connection *connection, *tmp;
for_each_connection_safe(connection, tmp, resource) {
list_del(&connection->connections);
drbd_debugfs_connection_cleanup(connection);
kref_put(&connection->kref, drbd_destroy_connection);
}
drbd_debugfs_resource_cleanup(resource);
kref_put(&resource->kref, drbd_destroy_resource);
}
static void drbd_cleanup(void)
{
unsigned int i;
struct drbd_device *device;
struct drbd_resource *resource, *tmp;
/* first remove proc,
* drbdsetup uses it's presence to detect
* whether DRBD is loaded.
* If we would get stuck in proc removal,
* but have netlink already deregistered,
* some drbdsetup commands may wait forever
* for an answer.
*/
if (drbd_proc)
remove_proc_entry("drbd", NULL);
if (retry.wq)
destroy_workqueue(retry.wq);
drbd_genl_unregister();
idr_for_each_entry(&drbd_devices, device, i)
drbd_delete_device(device);
/* not _rcu since, no other updater anymore. Genl already unregistered */
for_each_resource_safe(resource, tmp, &drbd_resources) {
list_del(&resource->resources);
drbd_free_resource(resource);
}
drbd_debugfs_cleanup();
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
idr_destroy(&drbd_devices);
pr_info("module cleanup done.\n");
}
static void drbd_init_workqueue(struct drbd_work_queue* wq)
{
spin_lock_init(&wq->q_lock);
INIT_LIST_HEAD(&wq->q);
init_waitqueue_head(&wq->q_wait);
}
struct completion_work {
struct drbd_work w;
struct completion done;
};
static int w_complete(struct drbd_work *w, int cancel)
{
struct completion_work *completion_work =
container_of(w, struct completion_work, w);
complete(&completion_work->done);
return 0;
}
void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
{
struct completion_work completion_work;
completion_work.w.cb = w_complete;
init_completion(&completion_work.done);
drbd_queue_work(work_queue, &completion_work.w);
wait_for_completion(&completion_work.done);
}
struct drbd_resource *drbd_find_resource(const char *name)
{
struct drbd_resource *resource;
if (!name || !name[0])
return NULL;
rcu_read_lock();
for_each_resource_rcu(resource, &drbd_resources) {
if (!strcmp(resource->name, name)) {
kref_get(&resource->kref);
goto found;
}
}
resource = NULL;
found:
rcu_read_unlock();
return resource;
}
struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len)
{
struct drbd_resource *resource;
struct drbd_connection *connection;
rcu_read_lock();
for_each_resource_rcu(resource, &drbd_resources) {
for_each_connection_rcu(connection, resource) {
if (connection->my_addr_len == my_addr_len &&
connection->peer_addr_len == peer_addr_len &&
!memcmp(&connection->my_addr, my_addr, my_addr_len) &&
!memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
kref_get(&connection->kref);
goto found;
}
}
}
connection = NULL;
found:
rcu_read_unlock();
return connection;
}
static int drbd_alloc_socket(struct drbd_socket *socket)
{
socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
if (!socket->rbuf)
return -ENOMEM;
socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
if (!socket->sbuf)
return -ENOMEM;
return 0;
}
static void drbd_free_socket(struct drbd_socket *socket)
{
free_page((unsigned long) socket->sbuf);
free_page((unsigned long) socket->rbuf);
}
void conn_free_crypto(struct drbd_connection *connection)
{
drbd_free_sock(connection);
crypto_free_shash(connection->csums_tfm);
crypto_free_shash(connection->verify_tfm);
crypto_free_shash(connection->cram_hmac_tfm);
crypto_free_shash(connection->integrity_tfm);
crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->csums_tfm = NULL;
connection->verify_tfm = NULL;
connection->cram_hmac_tfm = NULL;
connection->integrity_tfm = NULL;
connection->peer_integrity_tfm = NULL;
connection->int_dig_in = NULL;
connection->int_dig_vv = NULL;
}
int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
{
struct drbd_connection *connection;
cpumask_var_t new_cpu_mask;
int err;
if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
return -ENOMEM;
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err == -EOVERFLOW) {
/* So what. mask it out. */
cpumask_var_t tmp_cpu_mask;
if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
cpumask_setall(tmp_cpu_mask);
cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
res_opts->cpu_mask,
strlen(res_opts->cpu_mask) > 12 ? "..." : "",
nr_cpu_ids);
free_cpumask_var(tmp_cpu_mask);
err = 0;
}
}
if (err) {
drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
/* retcode = ERR_CPU_MASK_PARSE; */
goto fail;
}
}
resource->res_opts = *res_opts;
if (cpumask_empty(new_cpu_mask))
drbd_calc_cpu_mask(&new_cpu_mask);
if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
cpumask_copy(resource->cpu_mask, new_cpu_mask);
for_each_connection_rcu(connection, resource) {
connection->receiver.reset_cpu_mask = 1;
connection->ack_receiver.reset_cpu_mask = 1;
connection->worker.reset_cpu_mask = 1;
}
}
err = 0;
fail:
free_cpumask_var(new_cpu_mask);
return err;
}
struct drbd_resource *drbd_create_resource(const char *name)
{
struct drbd_resource *resource;
resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
if (!resource)
goto fail;
resource->name = kstrdup(name, GFP_KERNEL);
if (!resource->name)
goto fail_free_resource;
if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
goto fail_free_name;
kref_init(&resource->kref);
idr_init(&resource->devices);
INIT_LIST_HEAD(&resource->connections);
resource->write_ordering = WO_BDEV_FLUSH;
list_add_tail_rcu(&resource->resources, &drbd_resources);
mutex_init(&resource->conf_update);
mutex_init(&resource->adm_mutex);
spin_lock_init(&resource->req_lock);
drbd_debugfs_resource_add(resource);
return resource;
fail_free_name:
kfree(resource->name);
fail_free_resource:
kfree(resource);
fail:
return NULL;
}
/* caller must be under adm_mutex */
struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
{
struct drbd_resource *resource;
struct drbd_connection *connection;
connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
if (!connection)
return NULL;
if (drbd_alloc_socket(&connection->data))
goto fail;
if (drbd_alloc_socket(&connection->meta))
goto fail;
connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
if (!connection->current_epoch)
goto fail;
INIT_LIST_HEAD(&connection->transfer_log);
INIT_LIST_HEAD(&connection->current_epoch->list);
connection->epochs = 1;
spin_lock_init(&connection->epoch_lock);
connection->send.seen_any_write_yet = false;
connection->send.current_epoch_nr = 0;
connection->send.current_epoch_writes = 0;
resource = drbd_create_resource(name);
if (!resource)
goto fail;
connection->cstate = C_STANDALONE;
mutex_init(&connection->cstate_mutex);
init_waitqueue_head(&connection->ping_wait);
idr_init(&connection->peer_devices);
drbd_init_workqueue(&connection->sender_work);
mutex_init(&connection->data.mutex);
mutex_init(&connection->meta.mutex);
drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
connection->receiver.connection = connection;
drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
connection->worker.connection = connection;
drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
connection->ack_receiver.connection = connection;
kref_init(&connection->kref);
connection->resource = resource;
if (set_resource_options(resource, res_opts))
goto fail_resource;
kref_get(&resource->kref);
list_add_tail_rcu(&connection->connections, &resource->connections);
drbd_debugfs_connection_add(connection);
return connection;
fail_resource:
list_del(&resource->resources);
drbd_free_resource(resource);
fail:
kfree(connection->current_epoch);
drbd_free_socket(&connection->meta);
drbd_free_socket(&connection->data);
kfree(connection);
return NULL;
}
void drbd_destroy_connection(struct kref *kref)
{
struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
struct drbd_resource *resource = connection->resource;
if (atomic_read(&connection->current_epoch->epoch_size) != 0)
drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
kfree(connection->current_epoch);
idr_destroy(&connection->peer_devices);
drbd_free_socket(&connection->meta);
drbd_free_socket(&connection->data);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
kfree(connection);
kref_put(&resource->kref, drbd_destroy_resource);
}
static int init_submitter(struct drbd_device *device)
{
/* opencoded create_singlethread_workqueue(),
* to be able to say "drbd%d", ..., minor */
device->submit.wq =
alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
if (!device->submit.wq)
return -ENOMEM;
INIT_WORK(&device->submit.worker, do_submit);
INIT_LIST_HEAD(&device->submit.writes);
return 0;
}
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
struct drbd_resource *resource = adm_ctx->resource;
struct drbd_connection *connection, *n;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
int id;
int vnr = adm_ctx->volume;
enum drbd_ret_code err = ERR_NOMEM;
device = minor_to_device(minor);
if (device)
return ERR_MINOR_OR_VOLUME_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
if (!device)
return ERR_NOMEM;
kref_init(&device->kref);
kref_get(&resource->kref);
device->resource = resource;
device->minor = minor;
device->vnr = vnr;
drbd_init_set_defaults(device);
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto out_no_disk;
device->vdisk = disk;
device->rq_queue = disk->queue;
set_disk_ro(disk, true);
disk->major = DRBD_MAJOR;
disk->first_minor = minor;
disk->minors = 1;
disk->fops = &drbd_ops;
disk->flags |= GENHD_FL_NO_PART;
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(disk->queue, DRBD_MAX_BIO_SIZE_SAFE >> 8);
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
goto out_no_io_page;
if (drbd_bm_init(device))
goto out_no_bitmap;
device->read_requests = RB_ROOT;
device->write_requests = RB_ROOT;
id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
if (id < 0) {
if (id == -ENOSPC)
err = ERR_MINOR_OR_VOLUME_EXISTS;
goto out_no_minor_idr;
}
kref_get(&device->kref);
id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
if (id < 0) {
if (id == -ENOSPC)
err = ERR_MINOR_OR_VOLUME_EXISTS;
goto out_idr_remove_minor;
}
kref_get(&device->kref);
INIT_LIST_HEAD(&device->peer_devices);
INIT_LIST_HEAD(&device->pending_bitmap_io);
for_each_connection(connection, resource) {
peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
if (!peer_device)
goto out_idr_remove_from_resource;
peer_device->connection = connection;
peer_device->device = device;
list_add(&peer_device->peer_devices, &device->peer_devices);
kref_get(&device->kref);
id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
if (id < 0) {
if (id == -ENOSPC)
err = ERR_INVALID_REQUEST;
goto out_idr_remove_from_resource;
}
kref_get(&connection->kref);
INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
}
if (init_submitter(device)) {
err = ERR_NOMEM;
goto out_idr_remove_from_resource;
}
err = add_disk(disk);
if (err)
goto out_destroy_workqueue;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
if (device->state.conn == C_WF_REPORT_PARAMS) {
for_each_peer_device(peer_device, device)
drbd_connected(peer_device);
}
/* move to create_peer_device() */
for_each_peer_device(peer_device, device)
drbd_debugfs_peer_device_add(peer_device);
drbd_debugfs_device_add(device);
return NO_ERROR;
out_destroy_workqueue:
destroy_workqueue(device->submit.wq);
out_idr_remove_from_resource:
for_each_connection_safe(connection, n, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);
}
for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
list_del(&peer_device->peer_devices);
kfree(peer_device);
}
idr_remove(&resource->devices, vnr);
out_idr_remove_minor:
idr_remove(&drbd_devices, minor);
synchronize_rcu();
out_no_minor_idr:
drbd_bm_cleanup(device);
out_no_bitmap:
__free_page(device->md_io.page);
out_no_io_page:
put_disk(disk);
out_no_disk:
kref_put(&resource->kref, drbd_destroy_resource);
kfree(device);
return err;
}
void drbd_delete_device(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
struct drbd_connection *connection;
struct drbd_peer_device *peer_device;
/* move to free_peer_device() */
for_each_peer_device(peer_device, device)
drbd_debugfs_peer_device_cleanup(peer_device);
drbd_debugfs_device_cleanup(device);
for_each_connection(connection, resource) {
idr_remove(&connection->peer_devices, device->vnr);
kref_put(&device->kref, drbd_destroy_device);
}
idr_remove(&resource->devices, device->vnr);
kref_put(&device->kref, drbd_destroy_device);
idr_remove(&drbd_devices, device_to_minor(device));
kref_put(&device->kref, drbd_destroy_device);
del_gendisk(device->vdisk);
synchronize_rcu();
kref_put(&device->kref, drbd_destroy_device);
}
static int __init drbd_init(void)
{
int err;
if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
pr_err("invalid minor_count (%d)\n", drbd_minor_count);
#ifdef MODULE
return -EINVAL;
#else
drbd_minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
err = register_blkdev(DRBD_MAJOR, "drbd");
if (err) {
pr_err("unable to register block device major %d\n",
DRBD_MAJOR);
return err;
}
/*
* allocate all necessary structs
*/
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
mutex_init(&resources_mutex);
INIT_LIST_HEAD(&drbd_resources);
err = drbd_genl_register();
if (err) {
pr_err("unable to register generic netlink family\n");
goto fail;
}
err = drbd_create_mempools();
if (err)
goto fail;
err = -ENOMEM;
drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
if (!drbd_proc) {
pr_err("unable to register proc file\n");
goto fail;
}
retry.wq = create_singlethread_workqueue("drbd-reissue");
if (!retry.wq) {
pr_err("unable to create retry workqueue\n");
goto fail;
}
INIT_WORK(&retry.worker, do_retry);
spin_lock_init(&retry.lock);
INIT_LIST_HEAD(&retry.writes);
drbd_debugfs_init();
pr_info("initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
pr_info("%s\n", drbd_buildtag());
pr_info("registered as block device major %d\n", DRBD_MAJOR);
return 0; /* Success! */
fail:
drbd_cleanup();
if (err == -ENOMEM)
pr_err("ran out of memory\n");
else
pr_err("initialization failure\n");
return err;
}
static void drbd_free_one_sock(struct drbd_socket *ds)
{
struct socket *s;
mutex_lock(&ds->mutex);
s = ds->socket;
ds->socket = NULL;
mutex_unlock(&ds->mutex);
if (s) {
/* so debugfs does not need to mutex_lock() */
synchronize_rcu();
kernel_sock_shutdown(s, SHUT_RDWR);
sock_release(s);
}
}
void drbd_free_sock(struct drbd_connection *connection)
{
if (connection->data.socket)
drbd_free_one_sock(&connection->data);
if (connection->meta.socket)
drbd_free_one_sock(&connection->meta);
}
/* meta data management */
void conn_md_sync(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
drbd_md_sync(device);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
/* aligned 4kByte */
struct meta_data_on_disk {
u64 la_size_sect; /* last agreed size. */
u64 uuid[UI_SIZE]; /* UUIDs. */
u64 device_uuid;
u64 reserved_u64_1;
u32 flags; /* MDF */
u32 magic;
u32 md_size_sect;
u32 al_offset; /* offset to this block */
u32 al_nr_extents; /* important for restoring the AL (userspace) */
/* `-- act_log->nr_elements <-- ldev->dc.al_extents */
u32 bm_offset; /* offset to the bitmap, from here */
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 la_peer_max_bio_size; /* last peer max_bio_size */
/* see al_tr_number_to_on_disk_sector() */
u32 al_stripes;
u32 al_stripe_size_4k;
u8 reserved_u8[4096 - (7*8 + 10*4)];
} __packed;
void drbd_md_write(struct drbd_device *device, void *b)
{
struct meta_data_on_disk *buffer = b;
sector_t sector;
int i;
memset(buffer, 0, sizeof(*buffer));
buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(device->ldev->md.flags);
buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
sector = device->ldev->md.md_offset;
if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
/* this was a try anyways ... */
drbd_err(device, "meta data update failed!\n");
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
}
}
/**
* drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
* @device: DRBD device.
*/
void drbd_md_sync(struct drbd_device *device)
{
struct meta_data_on_disk *buffer;
/* Don't accidentally change the DRBD meta data layout. */
BUILD_BUG_ON(UI_SIZE != 4);
BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
del_timer(&device->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
if (!test_and_clear_bit(MD_DIRTY, &device->flags))
return;
/* We use here D_FAILED and not D_ATTACHING because we try to write
* metadata even if we detach due to a disk failure! */
if (!get_ldev_if_state(device, D_FAILED))
return;
buffer = drbd_md_get_buffer(device, __func__);
if (!buffer)
goto out;
drbd_md_write(device, buffer);
/* Update device->ldev->md.la_size_sect,
* since we updated it on metadata. */
device->ldev->md.la_size_sect = get_capacity(device->vdisk);
drbd_md_put_buffer(device);
out:
put_ldev(device);
}
static int check_activity_log_stripe_size(struct drbd_device *device,
struct meta_data_on_disk *on_disk,
struct drbd_md *in_core)
{
u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
u64 al_size_4k;
/* both not set: default to old fixed size activity log */
if (al_stripes == 0 && al_stripe_size_4k == 0) {
al_stripes = 1;
al_stripe_size_4k = MD_32kB_SECT/8;
}
/* some paranoia plausibility checks */
/* we need both values to be set */
if (al_stripes == 0 || al_stripe_size_4k == 0)
goto err;
al_size_4k = (u64)al_stripes * al_stripe_size_4k;
/* Upper limit of activity log area, to avoid potential overflow
* problems in al_tr_number_to_on_disk_sector(). As right now, more
* than 72 * 4k blocks total only increases the amount of history,
* limiting this arbitrarily to 16 GB is not a real limitation ;-) */
if (al_size_4k > (16 * 1024 * 1024/4))
goto err;
/* Lower limit: we need at least 8 transaction slots (32kB)
* to not break existing setups */
if (al_size_4k < MD_32kB_SECT/8)
goto err;
in_core->al_stripe_size_4k = al_stripe_size_4k;
in_core->al_stripes = al_stripes;
in_core->al_size_4k = al_size_4k;
return 0;
err:
drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
al_stripes, al_stripe_size_4k);
return -EINVAL;
}
static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
sector_t capacity = drbd_get_capacity(bdev->md_bdev);
struct drbd_md *in_core = &bdev->md;
s32 on_disk_al_sect;
s32 on_disk_bm_sect;
/* The on-disk size of the activity log, calculated from offsets, and
* the size of the activity log calculated from the stripe settings,
* should match.
* Though we could relax this a bit: it is ok, if the striped activity log
* fits in the available on-disk activity log size.
* Right now, that would break how resize is implemented.
* TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
* of possible unused padding space in the on disk layout. */
if (in_core->al_offset < 0) {
if (in_core->bm_offset > in_core->al_offset)
goto err;
on_disk_al_sect = -in_core->al_offset;
on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
} else {
if (in_core->al_offset != MD_4kB_SECT)
goto err;
if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
goto err;
on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
}
/* old fixed size meta data is exactly that: fixed. */
if (in_core->meta_dev_idx >= 0) {
if (in_core->md_size_sect != MD_128MB_SECT
|| in_core->al_offset != MD_4kB_SECT
|| in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
|| in_core->al_stripes != 1
|| in_core->al_stripe_size_4k != MD_32kB_SECT/8)
goto err;
}
if (capacity < in_core->md_size_sect)
goto err;
if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
goto err;
/* should be aligned, and at least 32k */
if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
goto err;
/* should fit (for now: exactly) into the available on-disk space;
* overflow prevention is in check_activity_log_stripe_size() above. */
if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
goto err;
/* again, should be aligned */
if (in_core->bm_offset & 7)
goto err;
/* FIXME check for device grow with flex external meta data? */
/* can the available bitmap space cover the last agreed device size? */
if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
goto err;
return 0;
err:
drbd_err(device, "meta data offsets don't make sense: idx=%d "
"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
in_core->meta_dev_idx,
in_core->al_stripes, in_core->al_stripe_size_4k,
in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
(unsigned long long)in_core->la_size_sect,
(unsigned long long)capacity);
return -EINVAL;
}
/**
* drbd_md_read() - Reads in the meta data super block
* @device: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
* Return NO_ERROR on success, and an enum drbd_ret_code in case
* something goes wrong.
*
* Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
* even before @bdev is assigned to @device->ldev.
*/
int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
u32 magic, flags;
int i, rv = NO_ERROR;
if (device->state.disk != D_DISKLESS)
return ERR_DISK_CONFIGURED;
buffer = drbd_md_get_buffer(device, __func__);
if (!buffer)
return ERR_NOMEM;
/* First, figure out where our meta data superblock is located,
* and read it. */
bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
bdev->md.md_offset = drbd_md_ss(bdev);
/* Even for (flexible or indexed) external meta data,
* initially restrict us to the 4k superblock for now.
* Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
bdev->md.md_size_sect = 8;
if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
REQ_OP_READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
drbd_err(device, "Error while reading metadata.\n");
rv = ERR_IO_MD_DISK;
goto err;
}
magic = be32_to_cpu(buffer->magic);
flags = be32_to_cpu(buffer->flags);
if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
(magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
/* btw: that's Activity Log clean, not "all" clean. */
drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
rv = ERR_MD_UNCLEAN;
goto err;
}
rv = ERR_MD_INVALID;
if (magic != DRBD_MD_MAGIC_08) {
if (magic == DRBD_MD_MAGIC_07)
drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
goto err;
}
if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
goto err;
}
/* convert to in_core endian */
bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
for (i = UI_CURRENT; i < UI_SIZE; i++)
bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
bdev->md.flags = be32_to_cpu(buffer->flags);
bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
if (check_activity_log_stripe_size(device, buffer, &bdev->md))
goto err;
if (check_offsets_and_sizes(device, bdev))
goto err;
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
drbd_err(device, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
goto err;
}
rv = NO_ERROR;
spin_lock_irq(&device->resource->req_lock);
if (device->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
device->peer_max_bio_size = peer;
}
spin_unlock_irq(&device->resource->req_lock);
err:
drbd_md_put_buffer(device);
return rv;
}
/**
* drbd_md_mark_dirty() - Mark meta data super block as dirty
* @device: DRBD device.
*
* Call this function if you change anything that should be written to
* the meta-data super block. This function sets MD_DIRTY, and starts a
* timer that ensures that within five seconds you have to call drbd_md_sync().
*/
void drbd_md_mark_dirty(struct drbd_device *device)
{
if (!test_and_set_bit(MD_DIRTY, &device->flags))
mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
}
void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
{
int i;
for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
}
void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (device->state.role == R_PRIMARY)
val |= 1;
else
val &= ~((u64)1);
drbd_set_ed_uuid(device, val);
}
device->ldev->md.uuid[idx] = val;
drbd_md_mark_dirty(device);
}
void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
__drbd_uuid_set(device, idx, val);
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (device->ldev->md.uuid[idx]) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
}
__drbd_uuid_set(device, idx, val);
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
/**
* drbd_uuid_new_current() - Creates a new current UUID
* @device: DRBD device.
*
* Creates a new current UUID, and rotates the old current UUID into
* the bitmap slot. Causes an incremental resync upon next connect.
*/
void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
{
u64 val;
unsigned long long bm_uuid;
get_random_bytes(&val, sizeof(u64));
spin_lock_irq(&device->ldev->md.uuid_lock);
bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
__drbd_uuid_set(device, UI_CURRENT, val);
spin_unlock_irq(&device->ldev->md.uuid_lock);
drbd_print_uuids(device, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(device);
}
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
device->ldev->md.uuid[UI_BITMAP] = 0;
} else {
unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
drbd_md_mark_dirty(device);
}
/**
* drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
* @device: DRBD device.
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
int drbd_bmio_set_n_write(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
int rv = -EIO;
drbd_md_set_flag(device, MDF_FULL_SYNC);
drbd_md_sync(device);
drbd_bm_set_all(device);
rv = drbd_bm_write(device, peer_device);
if (!rv) {
drbd_md_clear_flag(device, MDF_FULL_SYNC);
drbd_md_sync(device);
}
return rv;
}
/**
* drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
* @device: DRBD device.
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
int drbd_bmio_clear_n_write(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
drbd_resume_al(device);
drbd_bm_clear_all(device);
return drbd_bm_write(device, peer_device);
}
static int w_bitmap_io(struct drbd_work *w, int unused)
{
struct drbd_device *device =
container_of(w, struct drbd_device, bm_io_work.w);
struct bm_io_work *work = &device->bm_io_work;
int rv = -EIO;
if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
int cnt = atomic_read(&device->ap_bio_cnt);
if (cnt)
drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
cnt, work->why);
}
if (get_ldev(device)) {
drbd_bm_lock(device, work->why, work->flags);
rv = work->io_fn(device, work->peer_device);
drbd_bm_unlock(device);
put_ldev(device);
}
clear_bit_unlock(BITMAP_IO, &device->flags);
wake_up(&device->misc_wait);
if (work->done)
work->done(device, rv);
clear_bit(BITMAP_IO_QUEUED, &device->flags);
work->why = NULL;
work->flags = 0;
return 0;
}
/**
* drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
* @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO
* @flags: Bitmap flags
*
* While IO on the bitmap happens we freeze application IO thus we ensure
* that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
* called from worker context. It MUST NOT be used while a previous such
* work is still pending!
*
* Its worker function encloses the call of io_fn() by get_ldev() and
* put_ldev().
*/
void drbd_queue_bitmap_io(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device)
{
D_ASSERT(device, current == peer_device->connection->worker.task);
D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
if (device->bm_io_work.why)
drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
why, device->bm_io_work.why);
device->bm_io_work.peer_device = peer_device;
device->bm_io_work.io_fn = io_fn;
device->bm_io_work.done = done;
device->bm_io_work.why = why;
device->bm_io_work.flags = flags;
spin_lock_irq(&device->resource->req_lock);
set_bit(BITMAP_IO, &device->flags);
/* don't wait for pending application IO if the caller indicates that
* application IO does not conflict anyways. */
if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
drbd_queue_work(&peer_device->connection->sender_work,
&device->bm_io_work.w);
}
spin_unlock_irq(&device->resource->req_lock);
}
/**
* drbd_bitmap_io() - Does an IO operation on the whole bitmap
* @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @why: Descriptive text of the reason for doing the IO
* @flags: Bitmap flags
*
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
int drbd_bitmap_io(struct drbd_device *device,
int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
char *why, enum bm_flag flags,
struct drbd_peer_device *peer_device)
{
/* Only suspend io, if some operation is supposed to be locked out */
const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
int rv;
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
if (do_suspend_io)
drbd_suspend_io(device);
drbd_bm_lock(device, why, flags);
rv = io_fn(device, peer_device);
drbd_bm_unlock(device);
if (do_suspend_io)
drbd_resume_io(device);
return rv;
}
void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
{
if ((device->ldev->md.flags & flag) != flag) {
drbd_md_mark_dirty(device);
device->ldev->md.flags |= flag;
}
}
void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
{
if ((device->ldev->md.flags & flag) != 0) {
drbd_md_mark_dirty(device);
device->ldev->md.flags &= ~flag;
}
}
int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
{
return (bdev->md.flags & flag) != 0;
}
static void md_sync_timer_fn(struct timer_list *t)
{
struct drbd_device *device = from_timer(device, t, md_sync_timer);
drbd_device_post_work(device, MD_SYNC);
}
const char *cmdname(enum drbd_packet cmd)
{
/* THINK may need to become several global tables
* when we want to support more than
* one PRO_VERSION */
static const char *cmdnames[] = {
[P_DATA] = "Data",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
[P_BITMAP] = "ReportBitMap",
[P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
[P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
[P_UNPLUG_REMOTE] = "UnplugRemote",
[P_DATA_REQUEST] = "DataRequest",
[P_RS_DATA_REQUEST] = "RSDataRequest",
[P_SYNC_PARAM] = "SyncParam",
[P_PROTOCOL] = "ReportProtocol",
[P_UUIDS] = "ReportUUIDs",
[P_SIZES] = "ReportSizes",
[P_STATE] = "ReportState",
[P_SYNC_UUID] = "ReportSyncUUID",
[P_AUTH_CHALLENGE] = "AuthChallenge",
[P_AUTH_RESPONSE] = "AuthResponse",
[P_STATE_CHG_REQ] = "StateChgRequest",
[P_PING] = "Ping",
[P_PING_ACK] = "PingAck",
[P_RECV_ACK] = "RecvAck",
[P_WRITE_ACK] = "WriteAck",
[P_RS_WRITE_ACK] = "RSWriteAck",
[P_SUPERSEDED] = "Superseded",
[P_NEG_ACK] = "NegAck",
[P_NEG_DREPLY] = "NegDReply",
[P_NEG_RS_DREPLY] = "NegRSDReply",
[P_BARRIER_ACK] = "BarrierAck",
[P_STATE_CHG_REPLY] = "StateChgReply",
[P_OV_REQUEST] = "OVRequest",
[P_OV_REPLY] = "OVReply",
[P_OV_RESULT] = "OVResult",
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_SYNC_PARAM89] = "SyncParam89",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_DELAY_PROBE] = "DelayProbe",
[P_OUT_OF_SYNC] = "OutOfSync",
[P_RS_CANCEL] = "RSCancel",
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_PROTOCOL_UPDATE] = "protocol_update",
[P_TRIM] = "Trim",
[P_RS_THIN_REQ] = "rs_thin_req",
[P_RS_DEALLOCATED] = "rs_deallocated",
[P_WSAME] = "WriteSame",
[P_ZEROES] = "Zeroes",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE
* P_MAX_OPT_CMD
*/
};
/* too big for the array: 0xfffX */
if (cmd == P_INITIAL_META)
return "InitialMeta";
if (cmd == P_INITIAL_DATA)
return "InitialData";
if (cmd == P_CONNECTION_FEATURES)
return "ConnectionFeatures";
if (cmd >= ARRAY_SIZE(cmdnames))
return "Unknown";
return cmdnames[cmd];
}
/**
* drbd_wait_misc - wait for a request to make progress
* @device: device associated with the request
* @i: the struct drbd_interval embedded in struct drbd_request or
* struct drbd_peer_request
*/
int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
{
struct net_conf *nc;
DEFINE_WAIT(wait);
long timeout;
rcu_read_lock();
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -ETIMEDOUT;
}
timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
rcu_read_unlock();
/* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irq(&device->resource->req_lock);
timeout = schedule_timeout(timeout);
finish_wait(&device->misc_wait, &wait);
spin_lock_irq(&device->resource->req_lock);
if (!timeout || device->state.conn < C_CONNECTED)
return -ETIMEDOUT;
if (signal_pending(current))
return -ERESTARTSYS;
return 0;
}
void lock_all_resources(void)
{
struct drbd_resource *resource;
int __maybe_unused i = 0;
mutex_lock(&resources_mutex);
local_irq_disable();
for_each_resource(resource, &drbd_resources)
spin_lock_nested(&resource->req_lock, i++);
}
void unlock_all_resources(void)
{
struct drbd_resource *resource;
for_each_resource(resource, &drbd_resources)
spin_unlock(&resource->req_lock);
local_irq_enable();
mutex_unlock(&resources_mutex);
}
#ifdef CONFIG_DRBD_FAULT_INJECTION
/* Fault insertion support including random number generator shamelessly
* stolen from kernel/rcutorture.c */
struct fault_random_state {
unsigned long state;
unsigned long count;
};
#define FAULT_RANDOM_MULT 39916801 /* prime */
#define FAULT_RANDOM_ADD 479001701 /* prime */
#define FAULT_RANDOM_REFRESH 10000
/*
* Crude but fast random-number generator. Uses a linear congruential
* generator, with occasional help from get_random_bytes().
*/
static unsigned long
_drbd_fault_random(struct fault_random_state *rsp)
{
long refresh;
if (!rsp->count--) {
get_random_bytes(&refresh, sizeof(refresh));
rsp->state += refresh;
rsp->count = FAULT_RANDOM_REFRESH;
}
rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
return swahw32(rsp->state);
}
static char *
_drbd_fault_str(unsigned int type) {
static char *_faults[] = {
[DRBD_FAULT_MD_WR] = "Meta-data write",
[DRBD_FAULT_MD_RD] = "Meta-data read",
[DRBD_FAULT_RS_WR] = "Resync write",
[DRBD_FAULT_RS_RD] = "Resync read",
[DRBD_FAULT_DT_WR] = "Data write",
[DRBD_FAULT_DT_RD] = "Data read",
[DRBD_FAULT_DT_RA] = "Data read ahead",
[DRBD_FAULT_BM_ALLOC] = "BM allocation",
[DRBD_FAULT_AL_EE] = "EE allocation",
[DRBD_FAULT_RECEIVE] = "receive data corruption",
};
return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
}
unsigned int
_drbd_insert_fault(struct drbd_device *device, unsigned int type)
{
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
(drbd_fault_devs == 0 ||
((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
(((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
if (ret) {
drbd_fault_count++;
if (drbd_ratelimit())
drbd_warn(device, "***Simulating %s failure\n",
_drbd_fault_str(type));
}
return ret;
}
#endif
module_init(drbd_init)
module_exit(drbd_cleanup)
EXPORT_SYMBOL(drbd_conn_str);
EXPORT_SYMBOL(drbd_role_str);
EXPORT_SYMBOL(drbd_disk_str);
EXPORT_SYMBOL(drbd_set_st_err_str);
| linux-master | drivers/block/drbd/drbd_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2003-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2003-2008, Lars Ellenberg <[email protected]>.
*/
#include <linux/drbd.h>
#include "drbd_strings.h"
static const char * const drbd_conn_s_names[] = {
[C_STANDALONE] = "StandAlone",
[C_DISCONNECTING] = "Disconnecting",
[C_UNCONNECTED] = "Unconnected",
[C_TIMEOUT] = "Timeout",
[C_BROKEN_PIPE] = "BrokenPipe",
[C_NETWORK_FAILURE] = "NetworkFailure",
[C_PROTOCOL_ERROR] = "ProtocolError",
[C_WF_CONNECTION] = "WFConnection",
[C_WF_REPORT_PARAMS] = "WFReportParams",
[C_TEAR_DOWN] = "TearDown",
[C_CONNECTED] = "Connected",
[C_STARTING_SYNC_S] = "StartingSyncS",
[C_STARTING_SYNC_T] = "StartingSyncT",
[C_WF_BITMAP_S] = "WFBitMapS",
[C_WF_BITMAP_T] = "WFBitMapT",
[C_WF_SYNC_UUID] = "WFSyncUUID",
[C_SYNC_SOURCE] = "SyncSource",
[C_SYNC_TARGET] = "SyncTarget",
[C_PAUSED_SYNC_S] = "PausedSyncS",
[C_PAUSED_SYNC_T] = "PausedSyncT",
[C_VERIFY_S] = "VerifyS",
[C_VERIFY_T] = "VerifyT",
[C_AHEAD] = "Ahead",
[C_BEHIND] = "Behind",
};
static const char * const drbd_role_s_names[] = {
[R_PRIMARY] = "Primary",
[R_SECONDARY] = "Secondary",
[R_UNKNOWN] = "Unknown"
};
static const char * const drbd_disk_s_names[] = {
[D_DISKLESS] = "Diskless",
[D_ATTACHING] = "Attaching",
[D_FAILED] = "Failed",
[D_NEGOTIATING] = "Negotiating",
[D_INCONSISTENT] = "Inconsistent",
[D_OUTDATED] = "Outdated",
[D_UNKNOWN] = "DUnknown",
[D_CONSISTENT] = "Consistent",
[D_UP_TO_DATE] = "UpToDate",
};
static const char * const drbd_state_sw_errors[] = {
[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
[-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
[-SS_NO_REMOTE_DISK] = "Can not resync without remote disk",
[-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected",
[-SS_PRIMARY_NOP] = "Refusing to be Primary while peer is not outdated",
[-SS_RESYNC_RUNNING] = "Can not start OV/resync since it is already active",
[-SS_ALREADY_STANDALONE] = "Can not disconnect a StandAlone device",
[-SS_CW_FAILED_BY_PEER] = "State change was refused by peer node",
[-SS_IS_DISKLESS] = "Device is diskless, the requested operation requires a disk",
[-SS_DEVICE_IN_USE] = "Device is held open by someone",
[-SS_NO_NET_CONFIG] = "Have no net/connection configuration",
[-SS_NO_VERIFY_ALG] = "Need a verify algorithm to start online verify",
[-SS_NEED_CONNECTION] = "Need a connection to start verify or resync",
[-SS_NOT_SUPPORTED] = "Peer does not support protocol",
[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
[-SS_OUTDATE_WO_CONN] = "Need a connection for a graceful disconnect/outdate peer",
[-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
};
const char *drbd_conn_str(enum drbd_conns s)
{
/* enums are unsigned... */
return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s];
}
const char *drbd_role_str(enum drbd_role s)
{
return s > R_SECONDARY ? "TOO_LARGE" : drbd_role_s_names[s];
}
const char *drbd_disk_str(enum drbd_disk_state s)
{
return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s];
}
const char *drbd_set_st_err_str(enum drbd_state_rv err)
{
return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" :
err > SS_TWO_PRIMARIES ? "TOO_LARGE"
: drbd_state_sw_errors[-err];
}
| linux-master | drivers/block/drbd/drbd_strings.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_worker.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
*/
#include <linux/module.h>
#include <linux/drbd.h>
#include <linux/sched/signal.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
static int make_ov_request(struct drbd_peer_device *, int);
static int make_resync_request(struct drbd_peer_device *, int);
/* endio handlers:
* drbd_md_endio (defined here)
* drbd_request_endio (defined here)
* drbd_peer_request_endio (defined here)
* drbd_bm_endio (defined in drbd_bitmap.c)
*
* For all these callbacks, note the following:
* The callbacks will be called in irq context by the IDE drivers,
* and in Softirqs/Tasklets/BH context by the SCSI drivers.
* Try to get the locking right :)
*
*/
/* used for synchronous meta data and bitmap IO
* submitted by drbd_md_sync_page_io()
*/
void drbd_md_endio(struct bio *bio)
{
struct drbd_device *device;
device = bio->bi_private;
device->md_io.error = blk_status_to_errno(bio->bi_status);
/* special case: drbd_md_read() during drbd_adm_attach() */
if (device->ldev)
put_ldev(device);
bio_put(bio);
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
* If this io completion runs after that timeout expired, this
* drbd_md_put_buffer() may allow us to finally try and re-attach.
* During normal operation, this only puts that extra reference
* down to 1 again.
* Make sure we first drop the reference, and only then signal
* completion, or we may (in drbd_al_read_log()) cycle so fast into the
* next drbd_md_sync_page_io(), that we trigger the
* ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
*/
drbd_md_put_buffer(device);
device->md_io.done = 1;
wake_up(&device->misc_wait);
}
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
spin_lock_irqsave(&device->resource->req_lock, flags);
device->read_cnt += peer_req->i.size >> 9;
list_del(&peer_req->w.list);
if (list_empty(&device->read_ee))
wake_up(&device->ee_wait);
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
__drbd_chk_io_error(device, DRBD_READ_ERROR);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
put_ldev(device);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
struct drbd_connection *connection = peer_device->connection;
struct drbd_interval i;
int do_wake;
u64 block_id;
int do_al_complete_io;
/* after we moved peer_req to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
i = peer_req->i;
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = peer_req->block_id;
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
if (peer_req->flags & EE_WAS_ERROR) {
/* In protocol != C, we usually do not send write acks.
* In case of a write error, send the neg ack anyways. */
if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
inc_unacked(device);
drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
}
spin_lock_irqsave(&device->resource->req_lock, flags);
device->writ_cnt += peer_req->i.size >> 9;
list_move_tail(&peer_req->w.list, &device->done_ee);
/*
* Do not remove from the write_requests tree here: we did not send the
* Ack yet and did not wake possibly waiting conflicting requests.
* Removed from the tree from "drbd_process_done_ee" within the
* appropriate dw.cb (e_end_block/e_end_resync_block) or from
* _drbd_clear_done_ee.
*/
do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
/* FIXME do we want to detach for failed REQ_OP_DISCARD?
* ((peer_req->flags & (EE_WAS_ERROR|EE_TRIM)) == EE_WAS_ERROR) */
if (peer_req->flags & EE_WAS_ERROR)
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
kref_get(&device->kref); /* put is in drbd_send_acks_wf() */
if (!queue_work(connection->ack_sender, &peer_device->send_acks_work))
kref_put(&device->kref, drbd_destroy_device);
}
spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (block_id == ID_SYNCER)
drbd_rs_complete_io(device, i.sector);
if (do_wake)
wake_up(&device->ee_wait);
if (do_al_complete_io)
drbd_al_complete_io(device, &i);
put_ldev(device);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver.
*/
void drbd_peer_request_endio(struct bio *bio)
{
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_device *device = peer_req->peer_device->device;
bool is_write = bio_data_dir(bio) == WRITE;
bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD;
if (bio->bi_status && drbd_ratelimit())
drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? (is_discard ? "discard" : "write")
: "read", bio->bi_status,
(unsigned long long)peer_req->i.sector);
if (bio->bi_status)
set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */
if (atomic_dec_and_test(&peer_req->pending_bios)) {
if (is_write)
drbd_endio_write_sec_final(peer_req);
else
drbd_endio_read_sec_final(peer_req);
}
}
static void
drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
{
panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
device->minor, device->resource->name, device->vnr);
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
*/
void drbd_request_endio(struct bio *bio)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
struct drbd_device *device = req->device;
struct bio_and_error m;
enum drbd_req_event what;
/* If this request was aborted locally before,
* but now was completed "successfully",
* chances are that this caused arbitrary data corruption.
*
* "aborting" requests, or force-detaching the disk, is intended for
* completely blocked/hung local backing devices which do no longer
* complete requests at all, not even do error completions. In this
* situation, usually a hard-reset and failover is the only way out.
*
* By "aborting", basically faking a local error-completion,
* we allow for a more graceful swichover by cleanly migrating services.
* Still the affected node has to be rebooted "soon".
*
* By completing these requests, we allow the upper layers to re-use
* the associated data pages.
*
* If later the local backing device "recovers", and now DMAs some data
* from disk into the original request pages, in the best case it will
* just put random data into unused pages; but typically it will corrupt
* meanwhile completely unrelated data, causing all sorts of damage.
*
* Which means delayed successful completion,
* especially for READ requests,
* is a reason to panic().
*
* We assume that a delayed *error* completion is OK,
* though we still will complain noisily about it.
*/
if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
if (drbd_ratelimit())
drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
if (!bio->bi_status)
drbd_panic_after_delayed_completion_of_aborted_request(device);
}
/* to avoid recursion in __req_mod */
if (unlikely(bio->bi_status)) {
switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES:
case REQ_OP_DISCARD:
if (bio->bi_status == BLK_STS_NOTSUPP)
what = DISCARD_COMPLETED_NOTSUPP;
else
what = DISCARD_COMPLETED_WITH_ERROR;
break;
case REQ_OP_READ:
if (bio->bi_opf & REQ_RAHEAD)
what = READ_AHEAD_COMPLETED_WITH_ERROR;
else
what = READ_COMPLETED_WITH_ERROR;
break;
default:
what = WRITE_COMPLETED_WITH_ERROR;
break;
}
} else {
what = COMPLETED_OK;
}
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
bio_put(bio);
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
__req_mod(req, what, NULL, &m);
spin_unlock_irqrestore(&device->resource->req_lock, flags);
put_ldev(device);
if (m.bio)
complete_master_bio(device, &m);
}
void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
SHASH_DESC_ON_STACK(desc, tfm);
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
void *src;
desc->tfm = tfm;
crypto_shash_init(desc);
src = kmap_atomic(page);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
crypto_shash_update(desc, src, PAGE_SIZE);
kunmap_atomic(src);
page = tmp;
src = kmap_atomic(page);
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
crypto_shash_update(desc, src, len ?: PAGE_SIZE);
kunmap_atomic(src);
crypto_shash_final(desc, digest);
shash_desc_zero(desc);
}
void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
{
SHASH_DESC_ON_STACK(desc, tfm);
struct bio_vec bvec;
struct bvec_iter iter;
desc->tfm = tfm;
crypto_shash_init(desc);
bio_for_each_segment(bvec, bio, iter) {
u8 *src;
src = bvec_kmap_local(&bvec);
crypto_shash_update(desc, src, bvec.bv_len);
kunmap_local(src);
}
crypto_shash_final(desc, digest);
shash_desc_zero(desc);
}
/* MAYBE merge common code with w_e_end_ov_req */
static int w_e_send_csum(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
int digest_size;
void *digest;
int err = 0;
if (unlikely(cancel))
goto out;
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
/* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(device, peer_req);
peer_req = NULL;
inc_rs_pending(peer_device);
err = drbd_send_drequest_csum(peer_device, sector, size,
digest, digest_size,
P_CSUM_RS_REQUEST);
kfree(digest);
} else {
drbd_err(device, "kmalloc() of digest failed.\n");
err = -ENOMEM;
}
out:
if (peer_req)
drbd_free_peer_req(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
if (!get_ldev(device))
return -EIO;
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
size, size, GFP_TRY);
if (!peer_req)
goto defer;
peer_req->w.cb = w_e_send_csum;
peer_req->opf = REQ_OP_READ;
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->read_ee);
spin_unlock_irq(&device->resource->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev);
if (drbd_submit_peer_request(peer_req) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
spin_unlock_irq(&device->resource->req_lock);
drbd_free_peer_req(device, peer_req);
defer:
put_ldev(device);
return -EAGAIN;
}
int w_resync_timer(struct drbd_work *w, int cancel)
{
struct drbd_device *device =
container_of(w, struct drbd_device, resync_work);
switch (device->state.conn) {
case C_VERIFY_S:
make_ov_request(first_peer_device(device), cancel);
break;
case C_SYNC_TARGET:
make_resync_request(first_peer_device(device), cancel);
break;
}
return 0;
}
void resync_timer_fn(struct timer_list *t)
{
struct drbd_device *device = from_timer(device, t, resync_timer);
drbd_queue_work_if_unqueued(
&first_peer_device(device)->connection->sender_work,
&device->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
{
int i;
for (i = 0; i < fb->size; i++)
fb->values[i] = value;
}
static int fifo_push(struct fifo_buffer *fb, int value)
{
int ov;
ov = fb->values[fb->head_index];
fb->values[fb->head_index++] = value;
if (fb->head_index >= fb->size)
fb->head_index = 0;
return ov;
}
static void fifo_add_val(struct fifo_buffer *fb, int value)
{
int i;
for (i = 0; i < fb->size; i++)
fb->values[i] += value;
}
struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
{
struct fifo_buffer *fb;
fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
if (!fb)
return NULL;
fb->head_index = 0;
fb->size = fifo_size;
fb->total = 0;
return fb;
}
static int drbd_rs_controller(struct drbd_peer_device *peer_device, unsigned int sect_in)
{
struct drbd_device *device = peer_device->device;
struct disk_conf *dc;
unsigned int want; /* The number of sectors we want in-flight */
int req_sect; /* Number of sectors to request in this turn */
int correction; /* Number of sectors more we need in-flight */
int cps; /* correction per invocation of drbd_rs_controller() */
int steps; /* Number of time steps to plan ahead */
int curr_corr;
int max_sect;
struct fifo_buffer *plan;
dc = rcu_dereference(device->ldev->disk_conf);
plan = rcu_dereference(device->rs_plan_s);
steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
} else { /* normal path */
want = dc->c_fill_target ? dc->c_fill_target :
sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
}
correction = want - device->rs_in_flight - plan->total;
/* Plan ahead */
cps = correction / steps;
fifo_add_val(plan, cps);
plan->total += cps * steps;
/* What we do in this step */
curr_corr = fifo_push(plan, 0);
plan->total -= curr_corr;
req_sect = sect_in + curr_corr;
if (req_sect < 0)
req_sect = 0;
max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
if (req_sect > max_sect)
req_sect = max_sect;
/*
drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
sect_in, device->rs_in_flight, want, correction,
steps, cps, device->rs_planed, curr_corr, req_sect);
*/
return req_sect;
}
static int drbd_rs_number_requests(struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
int number, mxb;
sect_in = atomic_xchg(&device->rs_sect_in, 0);
device->rs_in_flight -= sect_in;
rcu_read_lock();
mxb = drbd_get_max_buffers(device) / 2;
if (rcu_dereference(device->rs_plan_s)->size) {
number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9);
device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
}
rcu_read_unlock();
/* Don't have more than "max-buffers"/2 in-flight.
* Otherwise we may cause the remote site to stall on drbd_alloc_pages(),
* potentially causing a distributed deadlock on congestion during
* online-verify or (checksum-based) resync, if max-buffers,
* socket buffer sizes and resync rate settings are mis-configured. */
/* note that "number" is in units of "BM_BLOCK_SIZE" (which is 4k),
* mxb (as used here, and in drbd_alloc_pages on the peer) is
* "number of pages" (typically also 4k),
* but "rs_in_flight" is in "sectors" (512 Byte). */
if (mxb - device->rs_in_flight/8 < number)
number = mxb - device->rs_in_flight/8;
return number;
}
static int make_resync_request(struct drbd_peer_device *const peer_device, int cancel)
{
struct drbd_device *const device = peer_device->device;
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
unsigned long bit;
sector_t sector;
const sector_t capacity = get_capacity(device->vdisk);
int max_bio_size;
int number, rollback_i, size;
int align, requeue = 0;
int i = 0;
int discard_granularity = 0;
if (unlikely(cancel))
return 0;
if (device->rs_total == 0) {
/* empty resync? */
drbd_resync_finished(peer_device);
return 0;
}
if (!get_ldev(device)) {
/* Since we only need to access device->rsync a
get_ldev_if_state(device,D_FAILED) would be sufficient, but
to continue resync with a broken disk makes no sense at
all */
drbd_err(device, "Disk broke down during resync!\n");
return 0;
}
if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
rcu_read_lock();
discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
rcu_read_unlock();
}
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
number = drbd_rs_number_requests(peer_device);
if (number <= 0)
goto requeue;
for (i = 0; i < number; i++) {
/* Stop generating RS requests when half of the send buffer is filled,
* but notify TCP that we'd like to have more space. */
mutex_lock(&connection->data.mutex);
if (connection->data.socket) {
struct sock *sk = connection->data.socket->sk;
int queued = sk->sk_wmem_queued;
int sndbuf = sk->sk_sndbuf;
if (queued > sndbuf / 2) {
requeue = 1;
if (sk->sk_socket)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
} else
requeue = 1;
mutex_unlock(&connection->data.mutex);
if (requeue)
goto requeue;
next_sector:
size = BM_BLOCK_SIZE;
bit = drbd_bm_find_next(device, device->bm_resync_fo);
if (bit == DRBD_END_OF_BITMAP) {
device->bm_resync_fo = drbd_bm_bits(device);
put_ldev(device);
return 0;
}
sector = BM_BIT_TO_SECT(bit);
if (drbd_try_rs_begin_io(peer_device, sector)) {
device->bm_resync_fo = bit;
goto requeue;
}
device->bm_resync_fo = bit + 1;
if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
drbd_rs_complete_io(device, sector);
goto next_sector;
}
#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
/* try to find some adjacent bits.
* we stop if we have already the maximum req size.
*
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
*/
align = 1;
rollback_i = i;
while (i < number) {
if (size + BM_BLOCK_SIZE > max_bio_size)
break;
/* Be always aligned */
if (sector & ((1<<(align+3))-1))
break;
if (discard_granularity && size == discard_granularity)
break;
/* do not cross extent boundaries */
if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
break;
/* now, is it actually dirty, after all?
* caution, drbd_bm_test_bit is tri-state for some
* obscure reason; ( b == 0 ) would get the out-of-band
* only accidentally right because of the "oddly sized"
* adjustment below */
if (drbd_bm_test_bit(device, bit+1) != 1)
break;
bit++;
size += BM_BLOCK_SIZE;
if ((BM_BLOCK_SIZE << align) <= size)
align++;
i++;
}
/* if we merged some,
* reset the offset to start the next drbd_bm_find_next from */
if (size > BM_BLOCK_SIZE)
device->bm_resync_fo = bit + 1;
#endif
/* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
if (device->use_csums) {
switch (read_for_csum(peer_device, sector, size)) {
case -EIO: /* Disk failure */
put_ldev(device);
return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
drbd_rs_complete_io(device, sector);
device->bm_resync_fo = BM_SECT_TO_BIT(sector);
i = rollback_i;
goto requeue;
case 0:
/* everything ok */
break;
default:
BUG();
}
} else {
int err;
inc_rs_pending(peer_device);
err = drbd_send_drequest(peer_device,
size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
sector, size, ID_SYNCER);
if (err) {
drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(peer_device);
put_ldev(device);
return err;
}
}
}
if (device->bm_resync_fo >= drbd_bm_bits(device)) {
/* last syncer _request_ was sent,
* but the P_RS_DATA_REPLY not yet received. sync will end (and
* next sync group will resume), as soon as we receive the last
* resync data block, and the last bit is cleared.
* until then resync "work" is "inactive" ...
*/
put_ldev(device);
return 0;
}
requeue:
device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
put_ldev(device);
return 0;
}
static int make_ov_request(struct drbd_peer_device *peer_device, int cancel)
{
struct drbd_device *device = peer_device->device;
int number, i, size;
sector_t sector;
const sector_t capacity = get_capacity(device->vdisk);
bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
number = drbd_rs_number_requests(peer_device);
sector = device->ov_position;
for (i = 0; i < number; i++) {
if (sector >= capacity)
return 1;
/* We check for "finished" only in the reply path:
* w_e_end_ov_reply().
* We need to send at least one request out. */
stop_sector_reached = i > 0
&& verify_can_do_stop_sector(device)
&& sector >= device->ov_stop_sector;
if (stop_sector_reached)
break;
size = BM_BLOCK_SIZE;
if (drbd_try_rs_begin_io(peer_device, sector)) {
device->ov_position = sector;
goto requeue;
}
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
inc_rs_pending(peer_device);
if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
dec_rs_pending(peer_device);
return 0;
}
sector += BM_SECT_PER_BIT;
}
device->ov_position = sector;
requeue:
device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
if (i == 0 || !stop_sector_reached)
mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
int w_ov_finished(struct drbd_work *w, int cancel)
{
struct drbd_device_work *dw =
container_of(w, struct drbd_device_work, w);
struct drbd_device *device = dw->device;
kfree(dw);
ov_out_of_sync_print(first_peer_device(device));
drbd_resync_finished(first_peer_device(device));
return 0;
}
static int w_resync_finished(struct drbd_work *w, int cancel)
{
struct drbd_device_work *dw =
container_of(w, struct drbd_device_work, w);
struct drbd_device *device = dw->device;
kfree(dw);
drbd_resync_finished(first_peer_device(device));
return 0;
}
static void ping_peer(struct drbd_device *device)
{
struct drbd_connection *connection = first_peer_device(device)->connection;
clear_bit(GOT_PING_ACK, &connection->flags);
request_ping(connection);
wait_event(connection->ping_wait,
test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
struct drbd_connection *connection = peer_device->connection;
unsigned long db, dt, dbdt;
unsigned long n_oos;
union drbd_state os, ns;
struct drbd_device_work *dw;
char *khelper_cmd = NULL;
int verify_done = 0;
/* Remove all elements from the resync LRU. Since future actions
* might set bits in the (main) bitmap, then the entries in the
* resync LRU would be wrong. */
if (drbd_rs_del_all(device)) {
/* In case this is not possible now, most probably because
* there are P_RS_DATA_REPLY Packets lingering on the worker's
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
schedule_timeout_interruptible(HZ / 10);
dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
if (dw) {
dw->w.cb = w_resync_finished;
dw->device = device;
drbd_queue_work(&connection->sender_work, &dw->w);
return 1;
}
drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
}
dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
db = device->rs_total;
/* adjust for verify start and stop sectors, respective reached position */
if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
db -= device->ov_left;
dbdt = Bit2KB(db/dt);
device->rs_paused /= HZ;
if (!get_ldev(device))
goto out;
ping_peer(device);
spin_lock_irq(&device->resource->req_lock);
os = drbd_read_state(device);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
/* This protects us against multiple calls (that can happen in the presence
of application IO), and against connectivity loss just before we arrive here. */
if (os.conn <= C_CONNECTED)
goto out_unlock;
ns = os;
ns.conn = C_CONNECTED;
drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
verify_done ? "Online verify" : "Resync",
dt + device->rs_paused, device->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(device);
if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
if (n_oos) {
drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
n_oos, Bit2KB(1));
khelper_cmd = "out-of-sync";
}
} else {
D_ASSERT(device, (n_oos - device->rs_failed) == 0);
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
if (device->use_csums && device->rs_total) {
const unsigned long s = device->rs_same_csum;
const unsigned long t = device->rs_total;
const int ratio =
(t == 0) ? 0 :
(t < 100000) ? ((s*100)/t) : (s/(t/100));
drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
"transferred %luK total %luK\n",
ratio,
Bit2KB(device->rs_same_csum),
Bit2KB(device->rs_total - device->rs_same_csum),
Bit2KB(device->rs_total));
}
}
if (device->rs_failed) {
drbd_info(device, " %lu failed blocks\n", device->rs_failed);
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
ns.disk = D_INCONSISTENT;
ns.pdsk = D_UP_TO_DATE;
} else {
ns.disk = D_UP_TO_DATE;
ns.pdsk = D_INCONSISTENT;
}
} else {
ns.disk = D_UP_TO_DATE;
ns.pdsk = D_UP_TO_DATE;
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
if (device->p_uuid) {
int i;
for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
_drbd_uuid_set(device, i, device->p_uuid[i]);
drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
_drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
} else {
drbd_err(device, "device->p_uuid is NULL! BUG\n");
}
}
if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
/* for verify runs, we don't update uuids here,
* so there would be nothing to report. */
drbd_uuid_set_bm(device, 0UL);
drbd_print_uuids(device, "updated UUIDs");
if (device->p_uuid) {
/* Now the two UUID sets are equal, update what we
* know of the peer. */
int i;
for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
device->p_uuid[i] = device->ldev->md.uuid[i];
}
}
}
_drbd_set_state(device, ns, CS_VERBOSE, NULL);
out_unlock:
spin_unlock_irq(&device->resource->req_lock);
/* If we have been sync source, and have an effective fencing-policy,
* once *all* volumes are back in sync, call "unfence". */
if (os.conn == C_SYNC_SOURCE) {
enum drbd_disk_state disk_state = D_MASK;
enum drbd_disk_state pdsk_state = D_MASK;
enum drbd_fencing_p fp = FP_DONT_CARE;
rcu_read_lock();
fp = rcu_dereference(device->ldev->disk_conf)->fencing;
if (fp != FP_DONT_CARE) {
struct drbd_peer_device *peer_device;
int vnr;
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
}
}
rcu_read_unlock();
if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE)
conn_khelper(connection, "unfence-peer");
}
put_ldev(device);
out:
device->rs_total = 0;
device->rs_failed = 0;
device->rs_paused = 0;
/* reset start sector, if we reached end of device */
if (verify_done && device->ov_left == 0)
device->ov_start_sector = 0;
drbd_md_sync(device);
if (khelper_cmd)
drbd_khelper(device, khelper_cmd);
return 1;
}
/* helper */
static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
int i = PFN_UP(peer_req->i.size);
atomic_add(i, &device->pp_in_use_by_net);
atomic_sub(i, &device->pp_in_use);
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->net_ee);
spin_unlock_irq(&device->resource->req_lock);
wake_up(&drbd_pp_wait);
} else
drbd_free_peer_req(device, peer_req);
}
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_e_end_data_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
int err;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
} else {
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. sector=%llus.\n",
(unsigned long long)peer_req->i.sector);
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
dec_unacked(device);
move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
return err;
}
static bool all_zero(struct drbd_peer_request *peer_req)
{
struct page *page = peer_req->pages;
unsigned int len = peer_req->i.size;
page_chain_for_each(page) {
unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
unsigned int i, words = l / sizeof(long);
unsigned long *d;
d = kmap_atomic(page);
for (i = 0; i < words; i++) {
if (d[i]) {
kunmap_atomic(d);
return false;
}
}
kunmap_atomic(d);
len -= l;
}
return true;
}
/**
* w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
int err;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
}
if (get_ldev_if_state(device, D_FAILED)) {
drbd_rs_complete_io(device, peer_req->i.sector);
put_ldev(device);
}
if (device->state.conn == C_AHEAD) {
err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(device->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(peer_device);
if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
err = drbd_send_rs_deallocated(peer_device, peer_req);
else
err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
} else {
if (drbd_ratelimit())
drbd_err(device, "Not sending RSDataReply, "
"partner DISKLESS!\n");
err = 0;
}
} else {
if (drbd_ratelimit())
drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
(unsigned long long)peer_req->i.sector);
err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
dec_unacked(device);
move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
return err;
}
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
struct digest_info *di;
int digest_size;
void *digest = NULL;
int err, eq = 0;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
}
if (get_ldev(device)) {
drbd_rs_complete_io(device, peer_req->i.sector);
put_ldev(device);
}
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
if (eq) {
drbd_set_in_sync(peer_device, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
} else {
inc_rs_pending(peer_device);
peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
}
} else {
err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(device);
move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
return err;
}
int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
int digest_size;
void *digest;
int err = 0;
if (unlikely(cancel))
goto out;
digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
goto out;
}
if (likely(!(peer_req->flags & EE_WAS_ERROR)))
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
else
memset(digest, 0, digest_size);
/* Free e and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(device, peer_req);
peer_req = NULL;
inc_rs_pending(peer_device);
err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
if (err)
dec_rs_pending(peer_device);
kfree(digest);
out:
if (peer_req)
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return err;
}
void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
struct drbd_device *device = peer_device->device;
if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
device->ov_last_oos_size += size>>9;
} else {
device->ov_last_oos_start = sector;
device->ov_last_oos_size = size>>9;
}
drbd_set_out_of_sync(peer_device, sector, size);
}
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
struct digest_info *di;
void *digest;
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
int digest_size;
int err, eq = 0;
bool stop_sector_reached = false;
if (unlikely(cancel)) {
drbd_free_peer_req(device, peer_req);
dec_unacked(device);
return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */
if (get_ldev(device)) {
drbd_rs_complete_io(device, peer_req->i.sector);
put_ldev(device);
}
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
D_ASSERT(device, digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
}
/* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(device, peer_req);
if (!eq)
drbd_ov_out_of_sync_found(peer_device, sector, size);
else
ov_out_of_sync_print(peer_device);
err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
dec_unacked(device);
--device->ov_left;
/* let's advance progress step marks only for every other megabyte */
if ((device->ov_left & 0x200) == 0x200)
drbd_advance_rs_marks(peer_device, device->ov_left);
stop_sector_reached = verify_can_do_stop_sector(device) &&
(sector + (size>>9)) >= device->ov_stop_sector;
if (device->ov_left == 0 || stop_sector_reached) {
ov_out_of_sync_print(peer_device);
drbd_resync_finished(peer_device);
}
return err;
}
/* FIXME
* We need to track the number of pending barrier acks,
* and to be able to wait for them.
* See also comment in drbd_adm_attach before drbd_suspend_io.
*/
static int drbd_send_barrier(struct drbd_connection *connection)
{
struct p_barrier *p;
struct drbd_socket *sock;
sock = &connection->data;
p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
p->barrier = connection->send.current_epoch_nr;
p->pad = 0;
connection->send.current_epoch_writes = 0;
connection->send.last_sent_barrier_jif = jiffies;
return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
static int pd_send_unplug_remote(struct drbd_peer_device *pd)
{
struct drbd_socket *sock = &pd->connection->data;
if (!drbd_prepare_command(pd, sock))
return -EIO;
return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
}
int w_send_write_hint(struct drbd_work *w, int cancel)
{
struct drbd_device *device =
container_of(w, struct drbd_device, unplug_work);
if (cancel)
return 0;
return pd_send_unplug_remote(first_peer_device(device));
}
static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
{
if (!connection->send.seen_any_write_yet) {
connection->send.seen_any_write_yet = true;
connection->send.current_epoch_nr = epoch;
connection->send.current_epoch_writes = 0;
connection->send.last_sent_barrier_jif = jiffies;
}
}
static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
{
/* re-init if first write on this connection */
if (!connection->send.seen_any_write_yet)
return;
if (connection->send.current_epoch_nr != epoch) {
if (connection->send.current_epoch_writes)
drbd_send_barrier(connection);
connection->send.current_epoch_nr = epoch;
}
}
int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device->connection;
int err;
if (unlikely(cancel)) {
req_mod(req, SEND_CANCELED, peer_device);
return 0;
}
req->pre_send_jif = jiffies;
/* this time, no connection->send.current_epoch_writes++;
* If it was sent, it was the closing barrier for the last
* replicated epoch, before we went into AHEAD mode.
* No more barriers will be sent, until we leave AHEAD mode again. */
maybe_send_barrier(connection, req->epoch);
err = drbd_send_out_of_sync(peer_device, req);
req_mod(req, OOS_HANDED_TO_NETWORK, peer_device);
return err;
}
/**
* w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
req_mod(req, SEND_CANCELED, peer_device);
return 0;
}
req->pre_send_jif = jiffies;
re_init_if_first_write(connection, req->epoch);
maybe_send_barrier(connection, req->epoch);
connection->send.current_epoch_writes++;
err = drbd_send_dblock(peer_device, req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
if (do_send_unplug && !err)
pd_send_unplug_remote(peer_device);
return err;
}
/**
* w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
req_mod(req, SEND_CANCELED, peer_device);
return 0;
}
req->pre_send_jif = jiffies;
/* Even read requests may close a write epoch,
* if there was any yet. */
maybe_send_barrier(connection, req->epoch);
err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
(unsigned long)req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
if (do_send_unplug && !err)
pd_send_unplug_remote(peer_device);
return err;
}
int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = req->device;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i);
req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
req->master_bio, GFP_NOIO,
&drbd_io_bio_set);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio);
return 0;
}
static int _drbd_may_sync_now(struct drbd_device *device)
{
struct drbd_device *odev = device;
int resync_after;
while (1) {
if (!odev->ldev || odev->state.disk == D_DISKLESS)
return 1;
rcu_read_lock();
resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
rcu_read_unlock();
if (resync_after == -1)
return 1;
odev = minor_to_device(resync_after);
if (!odev)
return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
odev->state.conn <= C_PAUSED_SYNC_T) ||
odev->state.aftr_isp || odev->state.peer_isp ||
odev->state.user_isp)
return 0;
}
}
/**
* drbd_pause_after() - Pause resync on all devices that may not resync now
* @device: DRBD device.
*
* Called from process context only (admin command and after_state_ch).
*/
static bool drbd_pause_after(struct drbd_device *device)
{
bool changed = false;
struct drbd_device *odev;
int i;
rcu_read_lock();
idr_for_each_entry(&drbd_devices, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev) &&
_drbd_set_state(_NS(odev, aftr_isp, 1),
CS_HARD, NULL) != SS_NOTHING_TO_DO)
changed = true;
}
rcu_read_unlock();
return changed;
}
/**
* drbd_resume_next() - Resume resync on all devices that may resync now
* @device: DRBD device.
*
* Called from process context only (admin command and worker).
*/
static bool drbd_resume_next(struct drbd_device *device)
{
bool changed = false;
struct drbd_device *odev;
int i;
rcu_read_lock();
idr_for_each_entry(&drbd_devices, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
if (_drbd_may_sync_now(odev) &&
_drbd_set_state(_NS(odev, aftr_isp, 0),
CS_HARD, NULL) != SS_NOTHING_TO_DO)
changed = true;
}
}
rcu_read_unlock();
return changed;
}
void resume_next_sg(struct drbd_device *device)
{
lock_all_resources();
drbd_resume_next(device);
unlock_all_resources();
}
void suspend_other_sg(struct drbd_device *device)
{
lock_all_resources();
drbd_pause_after(device);
unlock_all_resources();
}
/* caller must lock_all_resources() */
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
{
struct drbd_device *odev;
int resync_after;
if (o_minor == -1)
return NO_ERROR;
if (o_minor < -1 || o_minor > MINORMASK)
return ERR_RESYNC_AFTER;
/* check for loops */
odev = minor_to_device(o_minor);
while (1) {
if (odev == device)
return ERR_RESYNC_AFTER_CYCLE;
/* You are free to depend on diskless, non-existing,
* or not yet/no longer existing minors.
* We only reject dependency loops.
* We cannot follow the dependency chain beyond a detached or
* missing minor.
*/
if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
return NO_ERROR;
rcu_read_lock();
resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
rcu_read_unlock();
/* dependency chain ends here, no cycles. */
if (resync_after == -1)
return NO_ERROR;
/* follow the dependency chain */
odev = minor_to_device(resync_after);
}
}
/* caller must lock_all_resources() */
void drbd_resync_after_changed(struct drbd_device *device)
{
int changed;
do {
changed = drbd_pause_after(device);
changed |= drbd_resume_next(device);
} while (changed);
}
void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
struct fifo_buffer *plan;
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
device->rs_last_events =
(int)part_stat_read_accum(disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
It is valid since all other updates also lead to an completely
empty fifo */
rcu_read_lock();
plan = rcu_dereference(device->rs_plan_s);
plan->total = 0;
fifo_set(plan, 0);
rcu_read_unlock();
}
void start_resync_timer_fn(struct timer_list *t)
{
struct drbd_device *device = from_timer(device, t, start_resync_timer);
drbd_device_post_work(device, RS_START);
}
static void do_start_resync(struct drbd_device *device)
{
if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
drbd_warn(device, "postponing start_resync ...\n");
device->start_resync_timer.expires = jiffies + HZ/10;
add_timer(&device->start_resync_timer);
return;
}
drbd_start_resync(device, C_SYNC_SOURCE);
clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
}
static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
{
bool csums_after_crash_only;
rcu_read_lock();
csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only;
rcu_read_unlock();
return connection->agreed_pro_version >= 89 && /* supported? */
connection->csums_tfm && /* configured? */
(csums_after_crash_only == false /* use for each resync? */
|| test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */
}
/**
* drbd_start_resync() - Start the resync process
* @device: DRBD device.
* @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
*
* This function might bring you directly into one of the
* C_PAUSED_SYNC_* states.
*/
void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
union drbd_state ns;
int r;
if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
drbd_err(device, "Resync already running!\n");
return;
}
if (!connection) {
drbd_err(device, "No connection to peer, aborting!\n");
return;
}
if (!test_bit(B_RS_H_DONE, &device->flags)) {
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
we check that we might make the data inconsistent. */
r = drbd_khelper(device, "before-resync-target");
r = (r >> 8) & 0xff;
if (r > 0) {
drbd_info(device, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
} else /* C_SYNC_SOURCE */ {
r = drbd_khelper(device, "before-resync-source");
r = (r >> 8) & 0xff;
if (r > 0) {
if (r == 3) {
drbd_info(device, "before-resync-source handler returned %d, "
"ignoring. Old userland tools?", r);
} else {
drbd_info(device, "before-resync-source handler returned %d, "
"dropping connection.\n", r);
conn_request_state(connection,
NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
}
}
}
if (current == connection->worker.task) {
/* The worker should not sleep waiting for state_mutex,
that can take long */
if (!mutex_trylock(device->state_mutex)) {
set_bit(B_RS_H_DONE, &device->flags);
device->start_resync_timer.expires = jiffies + HZ/5;
add_timer(&device->start_resync_timer);
return;
}
} else {
mutex_lock(device->state_mutex);
}
lock_all_resources();
clear_bit(B_RS_H_DONE, &device->flags);
/* Did some connection breakage or IO error race with us? */
if (device->state.conn < C_CONNECTED
|| !get_ldev_if_state(device, D_NEGOTIATING)) {
unlock_all_resources();
goto out;
}
ns = drbd_read_state(device);
ns.aftr_isp = !_drbd_may_sync_now(device);
ns.conn = side;
if (side == C_SYNC_TARGET)
ns.disk = D_INCONSISTENT;
else /* side == C_SYNC_SOURCE */
ns.pdsk = D_INCONSISTENT;
r = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
ns = drbd_read_state(device);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
if (r == SS_SUCCESS) {
unsigned long tw = drbd_bm_total_weight(device);
unsigned long now = jiffies;
int i;
device->rs_failed = 0;
device->rs_paused = 0;
device->rs_same_csum = 0;
device->rs_last_sect_ev = 0;
device->rs_total = tw;
device->rs_start = now;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
device->rs_mark_left[i] = tw;
device->rs_mark_time[i] = now;
}
drbd_pause_after(device);
/* Forget potentially stale cached per resync extent bit-counts.
* Open coded drbd_rs_cancel_all(device), we already have IRQs
* disabled, and know the disk state is ok. */
spin_lock(&device->al_lock);
lc_reset(device->resync);
device->resync_locked = 0;
device->resync_wenr = LC_FREE;
spin_unlock(&device->al_lock);
}
unlock_all_resources();
if (r == SS_SUCCESS) {
wake_up(&device->al_wait); /* for lc_reset() above */
/* reset rs_last_bcast when a resync or verify is started,
* to deal with potential jiffies wrap. */
device->rs_last_bcast = jiffies - HZ;
drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
(unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
(unsigned long) device->rs_total);
if (side == C_SYNC_TARGET) {
device->bm_resync_fo = 0;
device->use_csums = use_checksum_based_resync(connection, device);
} else {
device->use_csums = false;
}
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
* with w_send_oos, or the sync target will get confused as to
* how much bits to resync. We cannot do that always, because for an
* empty resync and protocol < 95, we need to do it here, as we call
* drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */
if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96)
drbd_gen_and_send_sync_uuid(peer_device);
if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
/* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit
* resync-finished notifications, but the fix
* introduces a protocol change. Sleeping for some
* time longer than the ping interval + timeout on the
* SyncSource, to give the SyncTarget the chance to
* detect connection loss, then waiting for a ping
* response (implicit in drbd_resync_finished) reduces
* the race considerably, but does not solve it. */
if (side == C_SYNC_SOURCE) {
struct net_conf *nc;
int timeo;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
}
drbd_resync_finished(peer_device);
}
drbd_rs_controller_reset(peer_device);
/* ns.conn may already be != device->state.conn,
* we may have been paused in between, or become paused until
* the timer triggers.
* No matter, that is handled in resync_timer_fn() */
if (ns.conn == C_SYNC_TARGET)
mod_timer(&device->resync_timer, jiffies);
drbd_md_sync(device);
}
put_ldev(device);
out:
mutex_unlock(device->state_mutex);
}
static void update_on_disk_bitmap(struct drbd_peer_device *peer_device, bool resync_done)
{
struct drbd_device *device = peer_device->device;
struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
device->rs_last_bcast = jiffies;
if (!get_ldev(device))
return;
drbd_bm_write_lazy(device, 0);
if (resync_done && is_sync_state(device->state.conn))
drbd_resync_finished(peer_device);
drbd_bcast_event(device, &sib);
/* update timestamp, in case it took a while to write out stuff */
device->rs_last_bcast = jiffies;
put_ldev(device);
}
static void drbd_ldev_destroy(struct drbd_device *device)
{
lc_destroy(device->resync);
device->resync = NULL;
lc_destroy(device->act_log);
device->act_log = NULL;
__acquire(local);
drbd_backing_dev_free(device, device->ldev);
device->ldev = NULL;
__release(local);
clear_bit(GOING_DISKLESS, &device->flags);
wake_up(&device->misc_wait);
}
static void go_diskless(struct drbd_device *device)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
D_ASSERT(device, device->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
* again, it will be safe to free them. */
/* Try to write changed bitmap pages, read errors may have just
* set some bits outside the area covered by the activity log.
*
* If we have an IO error during the bitmap writeout,
* we will want a full sync next time, just in case.
* (Do we want a specific meta data flag for this?)
*
* If that does not make it to stable storage either,
* we cannot do anything about that anymore.
*
* We still need to check if both bitmap and ldev are present, we may
* end up here after a failed attach, before ldev was even assigned.
*/
if (device->bitmap && device->ldev) {
/* An interrupted resync or similar is allowed to recounts bits
* while we detach.
* Any modifications would not be expected anymore, though.
*/
if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
"detach", BM_LOCKED_TEST_ALLOWED, peer_device)) {
if (test_bit(WAS_READ_ERROR, &device->flags)) {
drbd_md_set_flag(device, MDF_FULL_SYNC);
drbd_md_sync(device);
}
}
}
drbd_force_state(device, NS(disk, D_DISKLESS));
}
static int do_md_sync(struct drbd_device *device)
{
drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
drbd_md_sync(device);
return 0;
}
/* only called from drbd_worker thread, no locking */
void __update_timing_details(
struct drbd_thread_timing_details *tdp,
unsigned int *cb_nr,
void *cb,
const char *fn, const unsigned int line)
{
unsigned int i = *cb_nr % DRBD_THREAD_DETAILS_HIST;
struct drbd_thread_timing_details *td = tdp + i;
td->start_jif = jiffies;
td->cb_addr = cb;
td->caller_fn = fn;
td->line = line;
td->cb_nr = *cb_nr;
i = (i+1) % DRBD_THREAD_DETAILS_HIST;
td = tdp + i;
memset(td, 0, sizeof(*td));
++(*cb_nr);
}
static void do_device_work(struct drbd_device *device, const unsigned long todo)
{
if (test_bit(MD_SYNC, &todo))
do_md_sync(device);
if (test_bit(RS_DONE, &todo) ||
test_bit(RS_PROGRESS, &todo))
update_on_disk_bitmap(first_peer_device(device), test_bit(RS_DONE, &todo));
if (test_bit(GO_DISKLESS, &todo))
go_diskless(device);
if (test_bit(DESTROY_DISK, &todo))
drbd_ldev_destroy(device);
if (test_bit(RS_START, &todo))
do_start_resync(device);
}
#define DRBD_DEVICE_WORK_MASK \
((1UL << GO_DISKLESS) \
|(1UL << DESTROY_DISK) \
|(1UL << MD_SYNC) \
|(1UL << RS_START) \
|(1UL << RS_PROGRESS) \
|(1UL << RS_DONE) \
)
static unsigned long get_work_bits(unsigned long *flags)
{
unsigned long old, new;
do {
old = *flags;
new = old & ~DRBD_DEVICE_WORK_MASK;
} while (cmpxchg(flags, old, new) != old);
return old & DRBD_DEVICE_WORK_MASK;
}
static void do_unqueued_work(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
unsigned long todo = get_work_bits(&device->flags);
if (!todo)
continue;
kref_get(&device->kref);
rcu_read_unlock();
do_device_work(device, todo);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
list_splice_tail_init(&queue->q, work_list);
spin_unlock_irq(&queue->q_lock);
return !list_empty(work_list);
}
static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
{
DEFINE_WAIT(wait);
struct net_conf *nc;
int uncork, cork;
dequeue_work_batch(&connection->sender_work, work_list);
if (!list_empty(work_list))
return;
/* Still nothing to do?
* Maybe we still need to close the current epoch,
* even if no new requests are queued yet.
*
* Also, poke TCP, just in case.
* Then wait for new work (or signal). */
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
uncork = nc ? nc->tcp_cork : 0;
rcu_read_unlock();
if (uncork) {
mutex_lock(&connection->data.mutex);
if (connection->data.socket)
tcp_sock_set_cork(connection->data.socket->sk, false);
mutex_unlock(&connection->data.mutex);
}
for (;;) {
int send_barrier;
prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
spin_lock_irq(&connection->resource->req_lock);
spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
if (!list_empty(&connection->sender_work.q))
list_splice_tail_init(&connection->sender_work.q, work_list);
spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
if (!list_empty(work_list) || signal_pending(current)) {
spin_unlock_irq(&connection->resource->req_lock);
break;
}
/* We found nothing new to do, no to-be-communicated request,
* no other work item. We may still need to close the last
* epoch. Next incoming request epoch will be connection ->
* current transfer log epoch number. If that is different
* from the epoch of the last request we communicated, it is
* safe to send the epoch separating barrier now.
*/
send_barrier =
atomic_read(&connection->current_tle_nr) !=
connection->send.current_epoch_nr;
spin_unlock_irq(&connection->resource->req_lock);
if (send_barrier)
maybe_send_barrier(connection,
connection->send.current_epoch_nr + 1);
if (test_bit(DEVICE_WORK_PENDING, &connection->flags))
break;
/* drbd_send() may have called flush_signals() */
if (get_t_state(&connection->worker) != RUNNING)
break;
schedule();
/* may be woken up for other things but new work, too,
* e.g. if the current epoch got closed.
* In which case we send the barrier above. */
}
finish_wait(&connection->sender_work.q_wait, &wait);
/* someone may have changed the config while we have been waiting above. */
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
cork = nc ? nc->tcp_cork : 0;
rcu_read_unlock();
mutex_lock(&connection->data.mutex);
if (connection->data.socket) {
if (cork)
tcp_sock_set_cork(connection->data.socket->sk, true);
else if (!uncork)
tcp_sock_set_cork(connection->data.socket->sk, false);
}
mutex_unlock(&connection->data.mutex);
}
int drbd_worker(struct drbd_thread *thi)
{
struct drbd_connection *connection = thi->connection;
struct drbd_work *w = NULL;
struct drbd_peer_device *peer_device;
LIST_HEAD(work_list);
int vnr;
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
if (list_empty(&work_list)) {
update_worker_timing_details(connection, wait_for_work);
wait_for_work(connection, &work_list);
}
if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
update_worker_timing_details(connection, do_unqueued_work);
do_unqueued_work(connection);
}
if (signal_pending(current)) {
flush_signals(current);
if (get_t_state(thi) == RUNNING) {
drbd_warn(connection, "Worker got an unexpected signal\n");
continue;
}
break;
}
if (get_t_state(thi) != RUNNING)
break;
if (!list_empty(&work_list)) {
w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
update_worker_timing_details(connection, w->cb);
if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
continue;
if (connection->cstate >= C_WF_REPORT_PARAMS)
conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
}
}
do {
if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
update_worker_timing_details(connection, do_unqueued_work);
do_unqueued_work(connection);
}
if (!list_empty(&work_list)) {
w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
update_worker_timing_details(connection, w->cb);
w->cb(w, 1);
} else
dequeue_work_batch(&connection->sender_work, &work_list);
} while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
kref_get(&device->kref);
rcu_read_unlock();
drbd_device_cleanup(device);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
return 0;
}
| linux-master | drivers/block/drbd/drbd_worker.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_nl.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/drbd.h>
#include <linux/in.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_state_change.h"
#include <asm/unaligned.h>
#include <linux/drbd_limits.h>
#include <linux/kthread.h>
#include <net/genetlink.h>
/* .doit */
// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
/* .dumpit */
int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
int drbd_adm_dump_devices_done(struct netlink_callback *cb);
int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
int drbd_adm_dump_connections_done(struct netlink_callback *cb);
int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
#include <linux/drbd_genl_api.h>
#include "drbd_nla.h"
#include <linux/genl_magic_func.h>
static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
DEFINE_MUTEX(notification_mutex);
/* used blkdev_get_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
{
genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
if (genlmsg_reply(skb, info))
pr_err("error sending genl reply\n");
}
/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
* reason it could fail was no space in skb, and there are 4k available. */
static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
{
struct nlattr *nla;
int err = -EMSGSIZE;
if (!info || !info[0])
return 0;
nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
err = nla_put_string(skb, T_info_text, info);
if (err) {
nla_nest_cancel(skb, nla);
return err;
} else
nla_nest_end(skb, nla);
return 0;
}
__printf(2, 3)
static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
{
va_list args;
struct nlattr *nla, *txt;
int err = -EMSGSIZE;
int len;
nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
txt = nla_reserve(skb, T_info_text, 256);
if (!txt) {
nla_nest_cancel(skb, nla);
return err;
}
va_start(args, fmt);
len = vscnprintf(nla_data(txt), 256, fmt, args);
va_end(args);
/* maybe: retry with larger reserve, if truncated */
txt->nla_len = nla_attr_size(len+1);
nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
nla_nest_end(skb, nla);
return 0;
}
/* This would be a good candidate for a "pre_doit" hook,
* and per-family private info->pointers.
* But we need to stay compatible with older kernels.
* If it returns successfully, adm_ctx members are valid.
*
* At this point, we still rely on the global genl_lock().
* If we want to avoid that, and allow "genl_family.parallel_ops", we may need
* to add additional synchronization against object destruction/modification.
*/
#define DRBD_ADM_NEED_MINOR 1
#define DRBD_ADM_NEED_RESOURCE 2
#define DRBD_ADM_NEED_CONNECTION 4
static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
struct sk_buff *skb, struct genl_info *info, unsigned flags)
{
struct drbd_genlmsghdr *d_in = genl_info_userhdr(info);
const u8 cmd = info->genlhdr->cmd;
int err;
memset(adm_ctx, 0, sizeof(*adm_ctx));
/* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
return -EPERM;
adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!adm_ctx->reply_skb) {
err = -ENOMEM;
goto fail;
}
adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
info, &drbd_genl_family, 0, cmd);
/* put of a few bytes into a fresh skb of >= 4k will always succeed.
* but anyways */
if (!adm_ctx->reply_dh) {
err = -ENOMEM;
goto fail;
}
adm_ctx->reply_dh->minor = d_in->minor;
adm_ctx->reply_dh->ret_code = NO_ERROR;
adm_ctx->volume = VOLUME_UNSPECIFIED;
if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
struct nlattr *nla;
/* parse and validate only */
err = drbd_cfg_context_from_attrs(NULL, info);
if (err)
goto fail;
/* It was present, and valid,
* copy it over to the reply skb. */
err = nla_put_nohdr(adm_ctx->reply_skb,
info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
info->attrs[DRBD_NLA_CFG_CONTEXT]);
if (err)
goto fail;
/* and assign stuff to the adm_ctx */
nla = nested_attr_tb[__nla_type(T_ctx_volume)];
if (nla)
adm_ctx->volume = nla_get_u32(nla);
nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
if (nla)
adm_ctx->resource_name = nla_data(nla);
adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
if ((adm_ctx->my_addr &&
nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
(adm_ctx->peer_addr &&
nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
err = -EINVAL;
goto fail;
}
}
adm_ctx->minor = d_in->minor;
adm_ctx->device = minor_to_device(d_in->minor);
/* We are protected by the global genl_lock().
* But we may explicitly drop it/retake it in drbd_adm_set_role(),
* so make sure this object stays around. */
if (adm_ctx->device)
kref_get(&adm_ctx->device->kref);
if (adm_ctx->resource_name) {
adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
}
if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
return ERR_MINOR_INVALID;
}
if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
if (adm_ctx->resource_name)
return ERR_RES_NOT_KNOWN;
return ERR_INVALID_REQUEST;
}
if (flags & DRBD_ADM_NEED_CONNECTION) {
if (adm_ctx->resource) {
drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
return ERR_INVALID_REQUEST;
}
if (adm_ctx->device) {
drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
return ERR_INVALID_REQUEST;
}
if (adm_ctx->my_addr && adm_ctx->peer_addr)
adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
nla_len(adm_ctx->my_addr),
nla_data(adm_ctx->peer_addr),
nla_len(adm_ctx->peer_addr));
if (!adm_ctx->connection) {
drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
return ERR_INVALID_REQUEST;
}
}
/* some more paranoia, if the request was over-determined */
if (adm_ctx->device && adm_ctx->resource &&
adm_ctx->device->resource != adm_ctx->resource) {
pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
adm_ctx->minor, adm_ctx->resource->name,
adm_ctx->device->resource->name);
drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
return ERR_INVALID_REQUEST;
}
if (adm_ctx->device &&
adm_ctx->volume != VOLUME_UNSPECIFIED &&
adm_ctx->volume != adm_ctx->device->vnr) {
pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
adm_ctx->minor, adm_ctx->volume,
adm_ctx->device->vnr, adm_ctx->device->resource->name);
drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
return ERR_INVALID_REQUEST;
}
/* still, provide adm_ctx->resource always, if possible. */
if (!adm_ctx->resource) {
adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
: adm_ctx->connection ? adm_ctx->connection->resource : NULL;
if (adm_ctx->resource)
kref_get(&adm_ctx->resource->kref);
}
return NO_ERROR;
fail:
nlmsg_free(adm_ctx->reply_skb);
adm_ctx->reply_skb = NULL;
return err;
}
static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
struct genl_info *info, int retcode)
{
if (adm_ctx->device) {
kref_put(&adm_ctx->device->kref, drbd_destroy_device);
adm_ctx->device = NULL;
}
if (adm_ctx->connection) {
kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
adm_ctx->connection = NULL;
}
if (adm_ctx->resource) {
kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
adm_ctx->resource = NULL;
}
if (!adm_ctx->reply_skb)
return -ENOMEM;
adm_ctx->reply_dh->ret_code = retcode;
drbd_adm_send_reply(adm_ctx->reply_skb, info);
return 0;
}
static void setup_khelper_env(struct drbd_connection *connection, char **envp)
{
char *afs;
/* FIXME: A future version will not allow this case. */
if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
return;
switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
case AF_INET6:
afs = "ipv6";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
&((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
break;
case AF_INET:
afs = "ipv4";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
&((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
break;
default:
afs = "ssocks";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
&((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
}
snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
}
int drbd_khelper(struct drbd_device *device, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
(char[20]) { }, /* address family */
(char[60]) { }, /* address */
NULL };
char mb[14];
char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
int ret;
if (current == connection->worker.task)
set_bit(CALLBACK_PENDING, &connection->flags);
snprintf(mb, 14, "minor-%d", device_to_minor(device));
setup_khelper_env(connection, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
drbd_md_sync(device);
drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
drbd_bcast_event(device, &sib);
notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
else
drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
drbd_bcast_event(device, &sib);
notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
if (current == connection->worker.task)
clear_bit(CALLBACK_PENDING, &connection->flags);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
return ret;
}
enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
(char[20]) { }, /* address family */
(char[60]) { }, /* address */
NULL };
char *resource_name = connection->resource->name;
char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
int ret;
setup_khelper_env(connection, envp);
conn_md_sync(connection);
drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
else
drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
return ret;
}
static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
{
enum drbd_fencing_p fp = FP_NOT_AVAIL;
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (get_ldev_if_state(device, D_CONSISTENT)) {
struct disk_conf *disk_conf =
rcu_dereference(peer_device->device->ldev->disk_conf);
fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
put_ldev(device);
}
}
rcu_read_unlock();
return fp;
}
static bool resource_is_supended(struct drbd_resource *resource)
{
return resource->susp || resource->susp_fen || resource->susp_nod;
}
bool conn_try_outdate_peer(struct drbd_connection *connection)
{
struct drbd_resource * const resource = connection->resource;
unsigned int connect_cnt;
union drbd_state mask = { };
union drbd_state val = { };
enum drbd_fencing_p fp;
char *ex_to_string;
int r;
spin_lock_irq(&resource->req_lock);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
spin_unlock_irq(&resource->req_lock);
return false;
}
connect_cnt = connection->connect_cnt;
spin_unlock_irq(&resource->req_lock);
fp = highest_fencing_policy(connection);
switch (fp) {
case FP_NOT_AVAIL:
drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
spin_lock_irq(&resource->req_lock);
if (connection->cstate < C_WF_REPORT_PARAMS) {
_conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE | CS_HARD | CS_DC_SUSP);
/* We are no longer suspended due to the fencing policy.
* We may still be suspended due to the on-no-data-accessible policy.
* If that was OND_IO_ERROR, fail pending requests. */
if (!resource_is_supended(resource))
_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
}
/* Else: in case we raced with a connection handshake,
* let the handshake figure out if we maybe can RESEND,
* and do not resume/fail pending requests here.
* Worst case is we stay suspended for now, which may be
* resolved by either re-establishing the replication link, or
* the next link failure, or eventually the administrator. */
spin_unlock_irq(&resource->req_lock);
return false;
case FP_DONT_CARE:
return true;
default: ;
}
r = conn_khelper(connection, "fence-peer");
switch ((r>>8) & 0xff) {
case P_INCONSISTENT: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
mask.pdsk = D_MASK;
val.pdsk = D_INCONSISTENT;
break;
case P_OUTDATED: /* peer got outdated, or was already outdated */
ex_to_string = "peer was fenced";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
break;
case P_DOWN: /* peer was down */
if (conn_highest_disk(connection) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
} else {
ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
}
break;
case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
drbd_warn(connection, "Peer is primary, outdating myself.\n");
mask.disk = D_MASK;
val.disk = D_OUTDATED;
break;
case P_FENCING:
/* THINK: do we need to handle this
* like case 4, or more like case 5? */
if (fp != FP_STONITH)
drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
break;
default:
/* The script is broken ... */
drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
return false; /* Eventually leave IO frozen */
}
drbd_info(connection, "fence-peer helper returned %d (%s)\n",
(r>>8) & 0xff, ex_to_string);
/* Not using
conn_request_state(connection, mask, val, CS_VERBOSE);
here, because we might were able to re-establish the connection in the
meantime. */
spin_lock_irq(&resource->req_lock);
if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
if (connection->connect_cnt != connect_cnt)
/* In case the connection was established and droped
while the fence-peer handler was running, ignore it */
drbd_info(connection, "Ignoring fence-peer exit code\n");
else
_conn_request_state(connection, mask, val, CS_VERBOSE);
}
spin_unlock_irq(&resource->req_lock);
return conn_highest_pdsk(connection) <= D_OUTDATED;
}
static int _try_outdate_peer_async(void *data)
{
struct drbd_connection *connection = (struct drbd_connection *)data;
conn_try_outdate_peer(connection);
kref_put(&connection->kref, drbd_destroy_connection);
return 0;
}
void conn_try_outdate_peer_async(struct drbd_connection *connection)
{
struct task_struct *opa;
kref_get(&connection->kref);
/* We may have just sent a signal to this thread
* to get it out of some blocking network function.
* Clear signals; otherwise kthread_run(), which internally uses
* wait_on_completion_killable(), will mistake our pending signal
* for a new fatal signal and fail. */
flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
kref_put(&connection->kref, drbd_destroy_connection);
}
}
enum drbd_state_rv
drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
{
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
struct net_conf *nc;
int try = 0;
int forced = 0;
union drbd_state mask, val;
if (new_role == R_PRIMARY) {
struct drbd_connection *connection;
/* Detect dead peers as soon as possible. */
rcu_read_lock();
for_each_connection(connection, device->resource)
request_ping(connection);
rcu_read_unlock();
}
mutex_lock(device->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
while (try++ < max_tries) {
rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
/* in case we first succeeded to outdate,
* but now suddenly could establish a connection */
if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
val.pdsk = 0;
mask.pdsk = 0;
continue;
}
if (rv == SS_NO_UP_TO_DATE_DISK && force &&
(device->state.disk < D_UP_TO_DATE &&
device->state.disk >= D_INCONSISTENT)) {
mask.disk = D_MASK;
val.disk = D_UP_TO_DATE;
forced = 1;
continue;
}
if (rv == SS_NO_UP_TO_DATE_DISK &&
device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
if (conn_try_outdate_peer(connection)) {
val.disk = D_UP_TO_DATE;
mask.disk = D_MASK;
}
continue;
}
if (rv == SS_NOTHING_TO_DO)
goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
if (!conn_try_outdate_peer(connection) && force) {
drbd_warn(device, "Forced into split brain situation!\n");
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
}
continue;
}
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
if (try < max_tries) {
int timeo;
try = max_tries - 1;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
}
continue;
}
if (rv < SS_SUCCESS) {
rv = _drbd_request_state(device, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
if (rv < SS_SUCCESS)
goto out;
}
break;
}
if (rv < SS_SUCCESS)
goto out;
if (forced)
drbd_warn(device, "Forced to consider local data as UpToDate!\n");
/* Wait until nothing is on the fly :) */
wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
/* FIXME also wait for all pending P_BARRIER_ACK? */
if (new_role == R_SECONDARY) {
if (get_ldev(device)) {
device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
put_ldev(device);
}
} else {
mutex_lock(&device->resource->conf_update);
nc = connection->net_conf;
if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
mutex_unlock(&device->resource->conf_update);
if (get_ldev(device)) {
if (((device->state.conn < C_CONNECTED ||
device->state.pdsk <= D_FAILED)
&& device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
drbd_uuid_new_current(device);
device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
put_ldev(device);
}
}
/* writeout of activity log covered areas of the bitmap
* to stable storage done in after state change already */
if (device->state.conn >= C_WF_REPORT_PARAMS) {
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(peer_device);
drbd_send_current_state(peer_device);
}
drbd_md_sync(device);
set_disk_ro(device->vdisk, new_role == R_SECONDARY);
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
out:
mutex_unlock(device->state_mutex);
return rv;
}
static const char *from_attrs_err_to_txt(int err)
{
return err == -ENOMSG ? "required attribute missing" :
err == -EOPNOTSUPP ? "unknown mandatory attribute" :
err == -EEXIST ? "can not change invariant setting" :
"invalid attribute value";
}
int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct set_role_parms parms;
int err;
enum drbd_ret_code retcode;
enum drbd_state_rv rv;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
memset(&parms, 0, sizeof(parms));
if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
err = set_role_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
genl_unlock();
mutex_lock(&adm_ctx.resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
else
rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
mutex_unlock(&adm_ctx.resource->adm_mutex);
genl_lock();
drbd_adm_finish(&adm_ctx, info, rv);
return 0;
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
/* Initializes the md.*_offset members, so we are able to find
* the on disk meta data.
*
* We currently have two possible layouts:
* external:
* |----------- md_size_sect ------------------|
* [ 4k superblock ][ activity log ][ Bitmap ]
* | al_offset == 8 |
* | bm_offset = al_offset + X |
* ==> bitmap sectors = md_size_sect - bm_offset
*
* internal:
* |----------- md_size_sect ------------------|
* [data.....][ Bitmap ][ activity log ][ 4k superblock ]
* | al_offset < 0 |
* | bm_offset = al_offset - Y |
* ==> bitmap sectors = Y = al_offset - bm_offset
*
* Activity log size used to be fixed 32kB,
* but is about to become configurable.
*/
static void drbd_md_set_sector_offsets(struct drbd_device *device,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
unsigned int al_size_sect = bdev->md.al_size_4k * 8;
bdev->md.md_offset = drbd_md_ss(bdev);
switch (bdev->md.meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
bdev->md.md_size_sect = MD_128MB_SECT;
bdev->md.al_offset = MD_4kB_SECT;
bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
break;
case DRBD_MD_INDEX_FLEX_EXT:
/* just occupy the full device; unit: sectors */
bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
bdev->md.al_offset = MD_4kB_SECT;
bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
break;
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
/* al size is still fixed */
bdev->md.al_offset = -al_size_sect;
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
md_size_sect = BM_SECT_TO_EXT(md_size_sect);
md_size_sect = ALIGN(md_size_sect, 8);
/* plus the "drbd meta data super block",
* and the activity log; */
md_size_sect += MD_4kB_SECT + al_size_sect;
bdev->md.md_size_sect = md_size_sect;
/* bitmap offset is adjusted by 'super' block size */
bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
break;
}
}
/* input size is expected to be in KB */
char *ppsize(char *buf, unsigned long long size)
{
/* Needs 9 bytes at max including trailing NUL:
* -1ULL ==> "16384 EB" */
static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
int base = 0;
while (size >= 10000 && base < sizeof(units)-1) {
/* shift + round */
size = (size >> 10) + !!(size & (1<<9));
base++;
}
sprintf(buf, "%u %cB", (unsigned)size, units[base]);
return buf;
}
/* there is still a theoretical deadlock when called from receiver
* on an D_INCONSISTENT R_PRIMARY:
* remote READ does inc_ap_bio, receiver would need to receive answer
* packet from remote to dec_ap_bio again.
* receiver receive_sizes(), comes here,
* waits for ap_bio_cnt == 0. -> deadlock.
* but this cannot happen, actually, because:
* R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
* (not connected, or bad/no disk on peer):
* see drbd_fail_request_early, ap_bio_cnt is zero.
* R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
* peer may not initiate a resize.
*/
/* Note these are not to be confused with
* drbd_adm_suspend_io/drbd_adm_resume_io,
* which are (sub) state changes triggered by admin (drbdsetup),
* and can be long lived.
* This changes an device->flag, is triggered by drbd internals,
* and should be short-lived. */
/* It needs to be a counter, since multiple threads might
independently suspend and resume IO. */
void drbd_suspend_io(struct drbd_device *device)
{
atomic_inc(&device->suspend_cnt);
if (drbd_suspended(device))
return;
wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
}
void drbd_resume_io(struct drbd_device *device)
{
if (atomic_dec_and_test(&device->suspend_cnt))
wake_up(&device->misc_wait);
}
/*
* drbd_determine_dev_size() - Sets the right device size obeying all constraints
* @device: DRBD device.
*
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
enum determine_dev_size
drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
{
struct md_offsets_and_sizes {
u64 last_agreed_sect;
u64 md_offset;
s32 al_offset;
s32 bm_offset;
u32 md_size_sect;
u32 al_stripes;
u32 al_stripe_size_4k;
} prev;
sector_t u_size, size;
struct drbd_md *md = &device->ldev->md;
void *buffer;
int md_moved, la_size_changed;
enum determine_dev_size rv = DS_UNCHANGED;
/* We may change the on-disk offsets of our meta data below. Lock out
* anything that may cause meta data IO, to avoid acting on incomplete
* layout changes or scribbling over meta data that is in the process
* of being moved.
*
* Move is not exactly correct, btw, currently we have all our meta
* data in core memory, to "move" it we just write it all out, there
* are no reads. */
drbd_suspend_io(device);
buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
if (!buffer) {
drbd_resume_io(device);
return DS_ERROR;
}
/* remember current offset and sizes */
prev.last_agreed_sect = md->la_size_sect;
prev.md_offset = md->md_offset;
prev.al_offset = md->al_offset;
prev.bm_offset = md->bm_offset;
prev.md_size_sect = md->md_size_sect;
prev.al_stripes = md->al_stripes;
prev.al_stripe_size_4k = md->al_stripe_size_4k;
if (rs) {
/* rs is non NULL if we should change the AL layout only */
md->al_stripes = rs->al_stripes;
md->al_stripe_size_4k = rs->al_stripe_size / 4;
md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
}
drbd_md_set_sector_offsets(device, device->ldev);
rcu_read_lock();
u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
if (size < prev.last_agreed_sect) {
if (rs && u_size == 0) {
/* Remove "rs &&" later. This check should always be active, but
right now the receiver expects the permissive behavior */
drbd_warn(device, "Implicit shrink not allowed. "
"Use --size=%llus for explicit shrink.\n",
(unsigned long long)size);
rv = DS_ERROR_SHRINK;
}
if (u_size > size)
rv = DS_ERROR_SPACE_MD;
if (rv != DS_UNCHANGED)
goto err_out;
}
if (get_capacity(device->vdisk) != size ||
drbd_bm_capacity(device) != size) {
int err;
err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
if (unlikely(err)) {
/* currently there is only one error: ENOMEM! */
size = drbd_bm_capacity(device);
if (size == 0) {
drbd_err(device, "OUT OF MEMORY! "
"Could not allocate bitmap!\n");
} else {
drbd_err(device, "BM resizing failed. "
"Leaving size unchanged\n");
}
rv = DS_ERROR;
}
/* racy, see comments above. */
drbd_set_my_capacity(device, size);
md->la_size_sect = size;
}
if (rv <= DS_ERROR)
goto err_out;
la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
md_moved = prev.md_offset != md->md_offset
|| prev.md_size_sect != md->md_size_sect;
if (la_size_changed || md_moved || rs) {
u32 prev_flags;
/* We do some synchronous IO below, which may take some time.
* Clear the timer, to avoid scary "timer expired!" messages,
* "Superblock" is written out at least twice below, anyways. */
del_timer(&device->md_sync_timer);
/* We won't change the "al-extents" setting, we just may need
* to move the on-disk location of the activity log ringbuffer.
* Lock for transaction is good enough, it may well be "dirty"
* or even "starving". */
wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
/* mark current on-disk bitmap and activity log as unreliable */
prev_flags = md->flags;
md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
drbd_md_write(device, buffer);
drbd_al_initialize(device, buffer);
drbd_info(device, "Writing the whole bitmap, %s\n",
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
"size changed", BM_LOCKED_MASK, NULL);
/* on-disk bitmap and activity log is authoritative again
* (unless there was an IO error meanwhile...) */
md->flags = prev_flags;
drbd_md_write(device, buffer);
if (rs)
drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
md->al_stripes, md->al_stripe_size_4k * 4);
}
if (size > prev.last_agreed_sect)
rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
if (size < prev.last_agreed_sect)
rv = DS_SHRUNK;
if (0) {
err_out:
/* restore previous offset and sizes */
md->la_size_sect = prev.last_agreed_sect;
md->md_offset = prev.md_offset;
md->al_offset = prev.al_offset;
md->bm_offset = prev.bm_offset;
md->md_size_sect = prev.md_size_sect;
md->al_stripes = prev.al_stripes;
md->al_stripe_size_4k = prev.al_stripe_size_4k;
md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
}
lc_unlock(device->act_log);
wake_up(&device->al_wait);
drbd_md_put_buffer(device);
drbd_resume_io(device);
return rv;
}
sector_t
drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = device->p_size; /* partner's disk size. */
sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
sector_t size = 0;
m_size = drbd_get_max_capacity(bdev);
if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
drbd_warn(device, "Resize while not connected was forced by the user!\n");
p_size = m_size;
}
if (p_size && m_size) {
size = min_t(sector_t, p_size, m_size);
} else {
if (la_size_sect) {
size = la_size_sect;
if (m_size && m_size < size)
size = m_size;
if (p_size && p_size < size)
size = p_size;
} else {
if (m_size)
size = m_size;
if (p_size)
size = p_size;
}
}
if (size == 0)
drbd_err(device, "Both nodes diskless!\n");
if (u_size) {
if (u_size > size)
drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
(unsigned long)u_size>>1, (unsigned long)size>>1);
else
size = u_size;
}
return size;
}
/*
* drbd_check_al_size() - Ensures that the AL is of the right size
* @device: DRBD device.
*
* Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
unsigned int in_use;
int i;
if (device->act_log &&
device->act_log->nr_elements == dc->al_extents)
return 0;
in_use = 0;
t = device->act_log;
n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
dc->al_extents, sizeof(struct lc_element), 0);
if (n == NULL) {
drbd_err(device, "Cannot allocate act_log lru!\n");
return -ENOMEM;
}
spin_lock_irq(&device->al_lock);
if (t) {
for (i = 0; i < t->nr_elements; i++) {
e = lc_element_by_index(t, i);
if (e->refcnt)
drbd_err(device, "refcnt(%d)==%d\n",
e->lc_number, e->refcnt);
in_use += e->refcnt;
}
}
if (!in_use)
device->act_log = n;
spin_unlock_irq(&device->al_lock);
if (in_use) {
drbd_err(device, "Activity log still in use!\n");
lc_destroy(n);
return -EBUSY;
} else {
lc_destroy(t);
}
drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
return 0;
}
static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
{
q->limits.discard_granularity = granularity;
}
static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
{
/* when we introduced REQ_WRITE_SAME support, we also bumped
* our maximum supported batch bio size used for discards. */
if (connection->agreed_features & DRBD_FF_WSAME)
return DRBD_MAX_BBIO_SECTORS;
/* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
return AL_EXTENT_SIZE >> 9;
}
static void decide_on_discard_support(struct drbd_device *device,
struct drbd_backing_dev *bdev)
{
struct drbd_connection *connection =
first_peer_device(device)->connection;
struct request_queue *q = device->rq_queue;
unsigned int max_discard_sectors;
if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
goto not_supported;
if (connection->cstate >= C_CONNECTED &&
!(connection->agreed_features & DRBD_FF_TRIM)) {
drbd_info(connection,
"peer DRBD too old, does not support TRIM: disabling discards\n");
goto not_supported;
}
/*
* We don't care for the granularity, really.
*
* Stacking limits below should fix it for the local device. Whether or
* not it is a suitable granularity on the remote device is not our
* problem, really. If you care, you need to use devices with similar
* topology on all peers.
*/
blk_queue_discard_granularity(q, 512);
max_discard_sectors = drbd_max_discard_sectors(connection);
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
return;
not_supported:
blk_queue_discard_granularity(q, 0);
blk_queue_max_discard_sectors(q, 0);
}
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
{
/* Fixup max_write_zeroes_sectors after blk_stack_limits():
* if we can handle "zeroes" efficiently on the protocol,
* we want to do that, even if our backend does not announce
* max_write_zeroes_sectors itself. */
struct drbd_connection *connection = first_peer_device(device)->connection;
/* If the peer announces WZEROES support, use it. Otherwise, rather
* send explicit zeroes than rely on some discard-zeroes-data magic. */
if (connection->agreed_features & DRBD_FF_WZEROES)
q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
q->limits.max_write_zeroes_sectors = 0;
}
static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
{
unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
unsigned int discard_granularity =
device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
if (discard_granularity > max_discard) {
blk_queue_discard_granularity(q, 0);
blk_queue_max_discard_sectors(q, 0);
}
}
static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
unsigned int max_bio_size, struct o_qlim *o)
{
struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = 0;
struct request_queue *b = NULL;
struct disk_conf *dc;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
rcu_read_lock();
dc = rcu_dereference(device->ldev->disk_conf);
max_segments = dc->max_bio_bvecs;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
}
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
decide_on_discard_support(device, bdev);
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
disk_update_readahead(device->vdisk);
}
fixup_write_zeroes(device, q);
fixup_discard_support(device, q);
}
void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
{
unsigned int now, new, local, peer;
now = queue_max_hw_sectors(device->rq_queue) << 9;
local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
if (bdev) {
local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
device->local_max_bio_size = local;
}
local = min(local, DRBD_MAX_BIO_SIZE);
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (device->state.conn >= C_WF_REPORT_PARAMS) {
if (first_peer_device(device)->connection->agreed_pro_version < 94)
peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
else if (first_peer_device(device)->connection->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
else if (first_peer_device(device)->connection->agreed_pro_version < 100)
peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
else
peer = DRBD_MAX_BIO_SIZE;
/* We may later detach and re-attach on a disconnected Primary.
* Avoid this setting to jump back in that case.
* We want to store what we know the peer DRBD can handle,
* not what the peer IO backend can handle. */
if (peer > device->peer_max_bio_size)
device->peer_max_bio_size = peer;
}
new = min(local, peer);
if (device->state.role == R_PRIMARY && new < now)
drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
if (new != now)
drbd_info(device, "max BIO size = %u\n", new);
drbd_setup_queue_param(device, bdev, new, o);
}
/* Starts the worker thread */
static void conn_reconfig_start(struct drbd_connection *connection)
{
drbd_thread_start(&connection->worker);
drbd_flush_workqueue(&connection->sender_work);
}
/* if still unconfigured, stops worker again. */
static void conn_reconfig_done(struct drbd_connection *connection)
{
bool stop_threads;
spin_lock_irq(&connection->resource->req_lock);
stop_threads = conn_all_vols_unconf(connection) &&
connection->cstate == C_STANDALONE;
spin_unlock_irq(&connection->resource->req_lock);
if (stop_threads) {
/* ack_receiver thread and ack_sender workqueue are implicitly
* stopped by receiver in conn_disconnect() */
drbd_thread_stop(&connection->receiver);
drbd_thread_stop(&connection->worker);
}
}
/* Make sure IO is suspended before calling this function(). */
static void drbd_suspend_al(struct drbd_device *device)
{
int s = 0;
if (!lc_try_lock(device->act_log)) {
drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
return;
}
drbd_al_shrink(device);
spin_lock_irq(&device->resource->req_lock);
if (device->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
spin_unlock_irq(&device->resource->req_lock);
lc_unlock(device->act_log);
if (s)
drbd_info(device, "Suspended AL updates\n");
}
static bool should_set_defaults(struct genl_info *info)
{
struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
return 0 != (dh->flags & DRBD_GENL_F_SET_DEFAULTS);
}
static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
{
/* This is limited by 16 bit "slot" numbers,
* and by available on-disk context storage.
*
* Also (u16)~0 is special (denotes a "free" extent).
*
* One transaction occupies one 4kB on-disk block,
* we have n such blocks in the on disk ring buffer,
* the "current" transaction may fail (n-1),
* and there is 919 slot numbers context information per transaction.
*
* 72 transaction blocks amounts to more than 2**16 context slots,
* so cap there first.
*/
const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
const unsigned int sufficient_on_disk =
(max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
/AL_CONTEXT_PER_TRANSACTION;
unsigned int al_size_4k = bdev->md.al_size_4k;
if (al_size_4k > sufficient_on_disk)
return max_al_nr;
return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
}
static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
{
return a->disk_barrier != b->disk_barrier ||
a->disk_flushes != b->disk_flushes ||
a->disk_drain != b->disk_drain;
}
static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
struct drbd_backing_dev *nbc)
{
struct block_device *bdev = nbc->backing_bdev;
if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
if (disk_conf->al_extents > drbd_al_extents_max(nbc))
disk_conf->al_extents = drbd_al_extents_max(nbc);
if (!bdev_max_discard_sectors(bdev)) {
if (disk_conf->rs_discard_granularity) {
disk_conf->rs_discard_granularity = 0; /* disable feature */
drbd_info(device, "rs_discard_granularity feature disabled\n");
}
}
if (disk_conf->rs_discard_granularity) {
int orig_value = disk_conf->rs_discard_granularity;
sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
unsigned int discard_granularity = bdev_discard_granularity(bdev);
int remainder;
if (discard_granularity > disk_conf->rs_discard_granularity)
disk_conf->rs_discard_granularity = discard_granularity;
remainder = disk_conf->rs_discard_granularity %
discard_granularity;
disk_conf->rs_discard_granularity += remainder;
if (disk_conf->rs_discard_granularity > discard_size)
disk_conf->rs_discard_granularity = discard_size;
if (disk_conf->rs_discard_granularity != orig_value)
drbd_info(device, "rs_discard_granularity changed to %d\n",
disk_conf->rs_discard_granularity);
}
}
static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
{
int err = -EBUSY;
if (device->act_log &&
device->act_log->nr_elements == dc->al_extents)
return 0;
drbd_suspend_io(device);
/* If IO completion is currently blocked, we would likely wait
* "forever" for the activity log to become unused. So we don't. */
if (atomic_read(&device->ap_bio_cnt))
goto out;
wait_event(device->al_wait, lc_try_lock(device->act_log));
drbd_al_shrink(device);
err = drbd_check_al_size(device, dc);
lc_unlock(device->act_log);
wake_up(&device->al_wait);
out:
drbd_resume_io(device);
return err;
}
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct drbd_device *device;
struct disk_conf *new_disk_conf, *old_disk_conf;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int err;
unsigned int fifo_size;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
device = adm_ctx.device;
mutex_lock(&adm_ctx.resource->adm_mutex);
/* we also need a disk
* to change the options on */
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
retcode = ERR_NOMEM;
goto fail;
}
mutex_lock(&device->resource->conf_update);
old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
if (should_set_defaults(info))
set_disk_conf_defaults(new_disk_conf);
err = disk_conf_from_attrs_for_change(new_disk_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail_unlock;
}
if (!expect(device, new_disk_conf->resync_rate >= 1))
new_disk_conf->resync_rate = 1;
sanitize_disk_conf(device, new_disk_conf, device->ldev);
if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
if (fifo_size != device->rs_plan_s->size) {
new_plan = fifo_alloc(fifo_size);
if (!new_plan) {
drbd_err(device, "kmalloc of fifo_buffer failed");
retcode = ERR_NOMEM;
goto fail_unlock;
}
}
err = disk_opts_check_al_size(device, new_disk_conf);
if (err) {
/* Could be just "busy". Ignore?
* Introduce dedicated error code? */
drbd_msg_put_info(adm_ctx.reply_skb,
"Try again without changing current al-extents setting");
retcode = ERR_NOMEM;
goto fail_unlock;
}
lock_all_resources();
retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
if (retcode == NO_ERROR) {
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
drbd_resync_after_changed(device);
}
unlock_all_resources();
if (retcode != NO_ERROR)
goto fail_unlock;
if (new_plan) {
old_plan = device->rs_plan_s;
rcu_assign_pointer(device->rs_plan_s, new_plan);
}
mutex_unlock(&device->resource->conf_update);
if (new_disk_conf->al_updates)
device->ldev->md.flags &= ~MDF_AL_DISABLED;
else
device->ldev->md.flags |= MDF_AL_DISABLED;
if (new_disk_conf->md_flushes)
clear_bit(MD_NO_FUA, &device->flags);
else
set_bit(MD_NO_FUA, &device->flags);
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
if (old_disk_conf->discard_zeroes_if_aligned !=
new_disk_conf->discard_zeroes_if_aligned)
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
drbd_md_sync(device);
if (device->state.conn >= C_CONNECTED) {
struct drbd_peer_device *peer_device;
for_each_peer_device(peer_device, device)
drbd_send_sync_param(peer_device);
}
kvfree_rcu_mightsleep(old_disk_conf);
kfree(old_plan);
mod_timer(&device->request_timer, jiffies + HZ);
goto success;
fail_unlock:
mutex_unlock(&device->resource->conf_update);
fail:
kfree(new_disk_conf);
kfree(new_plan);
success:
put_ldev(device);
out:
mutex_unlock(&adm_ctx.resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static struct block_device *open_backing_dev(struct drbd_device *device,
const char *bdev_path, void *claim_ptr, bool do_bd_link)
{
struct block_device *bdev;
int err = 0;
bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
claim_ptr, NULL);
if (IS_ERR(bdev)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
bdev_path, PTR_ERR(bdev));
return bdev;
}
if (!do_bd_link)
return bdev;
err = bd_link_disk_holder(bdev, device->vdisk);
if (err) {
blkdev_put(bdev, claim_ptr);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
bdev = ERR_PTR(err);
}
return bdev;
}
static int open_backing_devices(struct drbd_device *device,
struct disk_conf *new_disk_conf,
struct drbd_backing_dev *nbc)
{
struct block_device *bdev;
bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
if (IS_ERR(bdev))
return ERR_OPEN_DISK;
nbc->backing_bdev = bdev;
/*
* meta_dev_idx >= 0: external fixed size, possibly multiple
* drbd sharing one meta device. TODO in that case, paranoia
* check that [md_bdev, meta_dev_idx] is not yet used by some
* other drbd minor! (if you use drbd.conf + drbdadm, that
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
bdev = open_backing_dev(device, new_disk_conf->meta_dev,
/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
* if potentially shared with other drbd minors */
(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
/* avoid double bd_claim_by_disk() for the same (source,target) tuple,
* as would happen with internal metadata. */
(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
if (IS_ERR(bdev))
return ERR_OPEN_MD_DISK;
nbc->md_bdev = bdev;
return NO_ERROR;
}
static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
void *claim_ptr, bool do_bd_unlink)
{
if (!bdev)
return;
if (do_bd_unlink)
bd_unlink_disk_holder(bdev, device->vdisk);
blkdev_put(bdev, claim_ptr);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
{
if (ldev == NULL)
return;
close_backing_dev(device, ldev->md_bdev,
ldev->md.meta_dev_idx < 0 ?
(void *)device : (void *)drbd_m_holder,
ldev->md_bdev != ldev->backing_bdev);
close_backing_dev(device, ldev->backing_bdev, device, true);
kfree(ldev->disk_conf);
kfree(ldev);
}
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_device *device;
struct drbd_peer_device *peer_device;
struct drbd_connection *connection;
int err;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
struct disk_conf *new_disk_conf = NULL;
struct lru_cache *resync_lru = NULL;
struct fifo_buffer *new_plan = NULL;
union drbd_state ns, os;
enum drbd_state_rv rv;
struct net_conf *nc;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
device = adm_ctx.device;
mutex_lock(&adm_ctx.resource->adm_mutex);
peer_device = first_peer_device(device);
connection = peer_device->connection;
conn_reconfig_start(connection);
/* if you want to reconfigure, please tear down first */
if (device->state.disk > D_DISKLESS) {
retcode = ERR_DISK_CONFIGURED;
goto fail;
}
/* It may just now have detached because of IO error. Make sure
* drbd_ldev_destroy is done already, we may end up here very fast,
* e.g. if someone calls attach from the on-io-error handler,
* to realize a "hot spare" feature (not that I'd recommend that) */
wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
/* make sure there is no leftover from previous force-detach attempts */
clear_bit(FORCE_DETACH, &device->flags);
clear_bit(WAS_IO_ERROR, &device->flags);
clear_bit(WAS_READ_ERROR, &device->flags);
/* and no leftover from previously aborted resync or verify, either */
device->rs_total = 0;
device->rs_failed = 0;
atomic_set(&device->rs_pending_cnt, 0);
/* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
retcode = ERR_NOMEM;
goto fail;
}
spin_lock_init(&nbc->md.uuid_lock);
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
retcode = ERR_NOMEM;
goto fail;
}
nbc->disk_conf = new_disk_conf;
set_disk_conf_defaults(new_disk_conf);
err = disk_conf_from_attrs(new_disk_conf, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
if (!new_plan) {
retcode = ERR_NOMEM;
goto fail;
}
if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (nc) {
if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
rcu_read_unlock();
retcode = ERR_STONITH_AND_PROT_A;
goto fail;
}
}
rcu_read_unlock();
retcode = open_backing_devices(device, new_disk_conf, nbc);
if (retcode != NO_ERROR)
goto fail;
if ((nbc->backing_bdev == nbc->md_bdev) !=
(new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
resync_lru = lc_create("resync", drbd_bm_ext_cache,
1, 61, sizeof(struct bm_extent),
offsetof(struct bm_extent, lce));
if (!resync_lru) {
retcode = ERR_NOMEM;
goto fail;
}
/* Read our meta data super block early.
* This also sets other on-disk offsets. */
retcode = drbd_md_read(device, nbc);
if (retcode != NO_ERROR)
goto fail;
sanitize_disk_conf(device, new_disk_conf, nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) new_disk_conf->disk_size);
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
if (new_disk_conf->meta_dev_idx < 0) {
max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
/* at least one MB, otherwise it does not make sense */
min_md_device_sectors = (2<<10);
} else {
max_possible_sectors = DRBD_MAX_SECTORS;
min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
retcode = ERR_MD_DISK_TOO_SMALL;
drbd_warn(device, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
goto fail;
}
/* Make sure the new disk is big enough
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
if (nbc->known_size > max_possible_sectors) {
drbd_warn(device, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
if (new_disk_conf->meta_dev_idx >= 0)
drbd_warn(device, "==>> using internal or flexible "
"meta data may help <<==\n");
}
drbd_suspend_io(device);
/* also wait for the last barrier ack. */
/* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
* We need a way to either ignore barrier acks for barriers sent before a device
* was attached, or a way to wait for all pending barrier acks to come in.
* As barriers are counted per resource,
* we'd need to suspend io on all devices of a resource.
*/
wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
/* and for any other previously queued work */
drbd_flush_workqueue(&connection->sender_work);
rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = (enum drbd_ret_code)rv;
drbd_resume_io(device);
if (rv < SS_SUCCESS)
goto fail;
if (!get_ldev_if_state(device, D_ATTACHING))
goto force_diskless;
if (!device->bitmap) {
if (drbd_bm_init(device)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
}
if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
(device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
(device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
(unsigned long long)device->ed_uuid);
retcode = ERR_DATA_NOT_CURRENT;
goto force_diskless_dec;
}
/* Since we are diskless, fix the activity log first... */
if (drbd_check_al_size(device, new_disk_conf)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
{
unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
unsigned long long eff = nbc->md.la_size_sect;
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
if (nsz == nbc->disk_conf->disk_size) {
drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
} else {
drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
drbd_msg_sprintf_info(adm_ctx.reply_skb,
"To-be-attached device has last effective > current size, and is consistent\n"
"(%llu > %llu sectors). Refusing to attach.", eff, nsz);
retcode = ERR_IMPLICIT_SHRINK;
goto force_diskless_dec;
}
}
}
lock_all_resources();
retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
if (retcode != NO_ERROR) {
unlock_all_resources();
goto force_diskless_dec;
}
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (new_disk_conf->md_flushes)
clear_bit(MD_NO_FUA, &device->flags);
else
set_bit(MD_NO_FUA, &device->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
* now device takes over responsibility, and the state engine should
* clean it up somewhere. */
D_ASSERT(device, device->ldev == NULL);
device->ldev = nbc;
device->resync = resync_lru;
device->rs_plan_s = new_plan;
nbc = NULL;
resync_lru = NULL;
new_disk_conf = NULL;
new_plan = NULL;
drbd_resync_after_changed(device);
drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
unlock_all_resources();
if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &device->flags);
else
clear_bit(CRASHED_PRIMARY, &device->flags);
if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
!(device->state.role == R_PRIMARY && device->resource->susp_nod))
set_bit(CRASHED_PRIMARY, &device->flags);
device->send_cnt = 0;
device->recv_cnt = 0;
device->read_cnt = 0;
device->writ_cnt = 0;
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
/* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
* I just now recover from a hard crash,
* and have been R_PRIMARY before that crash.
*
* Now, if I had no connection before that crash
* (have been degraded R_PRIMARY), chances are that
* I won't find my peer now either.
*
* In that case, and _only_ in that case,
* we use the degr-wfc-timeout instead of the default,
* so we can automatically recover from a crash of a
* degraded but active "cluster" after a certain timeout.
*/
clear_bit(USE_DEGR_WFC_T, &device->flags);
if (device->state.role != R_PRIMARY &&
drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
!drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &device->flags);
dd = drbd_determine_dev_size(device, 0, NULL);
if (dd <= DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == DS_GREW)
set_bit(RESYNC_AFTER_NEG, &device->flags);
if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
(test_bit(CRASHED_PRIMARY, &device->flags) &&
drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
drbd_info(device, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
"set_n_write from attaching", BM_LOCKED_MASK,
NULL)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
} else {
if (drbd_bitmap_io(device, &drbd_bm_read,
"read from attaching", BM_LOCKED_MASK,
NULL)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
}
if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
drbd_suspend_al(device); /* IO is still suspended here... */
spin_lock_irq(&device->resource->req_lock);
os = drbd_read_state(device);
ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
otherwise into D_CONSISTENT state.
*/
if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
ns.disk = D_CONSISTENT;
else
ns.disk = D_OUTDATED;
} else {
ns.disk = D_INCONSISTENT;
}
if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
ns.pdsk = D_OUTDATED;
rcu_read_lock();
if (ns.disk == D_CONSISTENT &&
(ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
this point, because drbd_request_state() modifies these
flags. */
if (rcu_dereference(device->ldev->disk_conf)->al_updates)
device->ldev->md.flags &= ~MDF_AL_DISABLED;
else
device->ldev->md.flags |= MDF_AL_DISABLED;
rcu_read_unlock();
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
if (device->state.conn == C_CONNECTED) {
device->new_state_tmp.i = ns.i;
ns.i = os.i;
ns.disk = D_NEGOTIATING;
/* We expect to receive up-to-date UUIDs soon.
To avoid a race in receive_state, free p_uuid while
holding req_lock. I.e. atomic with the state change */
kfree(device->p_uuid);
device->p_uuid = NULL;
}
rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
spin_unlock_irq(&device->resource->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
mod_timer(&device->request_timer, jiffies + HZ);
if (device->state.role == R_PRIMARY)
device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
drbd_md_mark_dirty(device);
drbd_md_sync(device);
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(device);
conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex);
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
force_diskless_dec:
put_ldev(device);
force_diskless:
drbd_force_state(device, NS(disk, D_DISKLESS));
drbd_md_sync(device);
fail:
conn_reconfig_done(connection);
if (nbc) {
close_backing_dev(device, nbc->md_bdev,
nbc->disk_conf->meta_dev_idx < 0 ?
(void *)device : (void *)drbd_m_holder,
nbc->md_bdev != nbc->backing_bdev);
close_backing_dev(device, nbc->backing_bdev, device, true);
kfree(nbc);
}
kfree(new_disk_conf);
lc_destroy(resync_lru);
kfree(new_plan);
mutex_unlock(&adm_ctx.resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static int adm_detach(struct drbd_device *device, int force)
{
if (force) {
set_bit(FORCE_DETACH, &device->flags);
drbd_force_state(device, NS(disk, D_FAILED));
return SS_SUCCESS;
}
return drbd_request_detach_interruptible(device);
}
/* Detaching the disk is a process in multiple stages. First we need to lock
* out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
* Then we transition to D_DISKLESS, and wait for put_ldev() to return all
* internal references as well.
* Only then we have finally detached. */
int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct detach_parms parms = { };
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
err = detach_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = adm_detach(adm_ctx.device, parms.force_detach);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static bool conn_resync_running(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
bool rv = false;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (device->state.conn == C_SYNC_SOURCE ||
device->state.conn == C_SYNC_TARGET ||
device->state.conn == C_PAUSED_SYNC_S ||
device->state.conn == C_PAUSED_SYNC_T) {
rv = true;
break;
}
}
rcu_read_unlock();
return rv;
}
static bool conn_ov_running(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
bool rv = false;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) {
rv = true;
break;
}
}
rcu_read_unlock();
return rv;
}
static enum drbd_ret_code
_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
{
struct drbd_peer_device *peer_device;
int i;
if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
return ERR_NEED_APV_100;
if (new_net_conf->two_primaries != old_net_conf->two_primaries)
return ERR_NEED_APV_100;
if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
return ERR_NEED_APV_100;
}
if (!new_net_conf->two_primaries &&
conn_highest_role(connection) == R_PRIMARY &&
conn_highest_peer(connection) == R_PRIMARY)
return ERR_NEED_ALLOW_TWO_PRI;
if (new_net_conf->two_primaries &&
(new_net_conf->wire_protocol != DRBD_PROT_C))
return ERR_NOT_PROTO_C;
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
struct drbd_device *device = peer_device->device;
if (get_ldev(device)) {
enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
put_ldev(device);
if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
return ERR_STONITH_AND_PROT_A;
}
if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
return ERR_DISCARD_IMPOSSIBLE;
}
if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
return ERR_CONG_NOT_PROTO_A;
return NO_ERROR;
}
static enum drbd_ret_code
check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
{
enum drbd_ret_code rv;
struct drbd_peer_device *peer_device;
int i;
rcu_read_lock();
rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
rcu_read_unlock();
/* connection->peer_devices protected by genl_lock() here */
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
struct drbd_device *device = peer_device->device;
if (!device->bitmap) {
if (drbd_bm_init(device))
return ERR_NOMEM;
}
}
return rv;
}
struct crypto {
struct crypto_shash *verify_tfm;
struct crypto_shash *csums_tfm;
struct crypto_shash *cram_hmac_tfm;
struct crypto_shash *integrity_tfm;
};
static int
alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
{
if (!tfm_name[0])
return NO_ERROR;
*tfm = crypto_alloc_shash(tfm_name, 0, 0);
if (IS_ERR(*tfm)) {
*tfm = NULL;
return err_alg;
}
return NO_ERROR;
}
static enum drbd_ret_code
alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
if (new_net_conf->cram_hmac_alg[0] != 0) {
snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
new_net_conf->cram_hmac_alg);
rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
ERR_AUTH_ALG);
}
return rv;
}
static void free_crypto(struct crypto *crypto)
{
crypto_free_shash(crypto->cram_hmac_tfm);
crypto_free_shash(crypto->integrity_tfm);
crypto_free_shash(crypto->csums_tfm);
crypto_free_shash(crypto->verify_tfm);
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct drbd_connection *connection;
struct net_conf *old_net_conf, *new_net_conf = NULL;
int err;
int ovr; /* online verify running */
int rsr; /* re-sync running */
struct crypto crypto = { };
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
connection = adm_ctx.connection;
mutex_lock(&adm_ctx.resource->adm_mutex);
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
retcode = ERR_NOMEM;
goto out;
}
conn_reconfig_start(connection);
mutex_lock(&connection->data.mutex);
mutex_lock(&connection->resource->conf_update);
old_net_conf = connection->net_conf;
if (!old_net_conf) {
drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
retcode = ERR_INVALID_REQUEST;
goto fail;
}
*new_net_conf = *old_net_conf;
if (should_set_defaults(info))
set_net_conf_defaults(new_net_conf);
err = net_conf_from_attrs_for_change(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
retcode = check_net_options(connection, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
/* re-sync running */
rsr = conn_resync_running(connection);
if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
retcode = ERR_CSUMS_RESYNC_RUNNING;
goto fail;
}
/* online verify running */
ovr = conn_ov_running(connection);
if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
retcode = ERR_VERIFY_RUNNING;
goto fail;
}
retcode = alloc_crypto(&crypto, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
crypto_free_shash(connection->csums_tfm);
connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
crypto_free_shash(connection->verify_tfm);
connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
crypto_free_shash(connection->integrity_tfm);
connection->integrity_tfm = crypto.integrity_tfm;
if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take connection->data.mutex again. */
__drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
crypto_free_shash(connection->cram_hmac_tfm);
connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
kvfree_rcu_mightsleep(old_net_conf);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
struct drbd_peer_device *peer_device;
int vnr;
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
drbd_send_sync_param(peer_device);
}
goto done;
fail:
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
free_crypto(&crypto);
kfree(new_net_conf);
done:
conn_reconfig_done(connection);
out:
mutex_unlock(&adm_ctx.resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static void connection_to_info(struct connection_info *info,
struct drbd_connection *connection)
{
info->conn_connection_state = connection->cstate;
info->conn_role = conn_highest_peer(connection);
}
static void peer_device_to_info(struct peer_device_info *info,
struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
info->peer_repl_state =
max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
info->peer_disk_state = device->state.pdsk;
info->peer_resync_susp_user = device->state.user_isp;
info->peer_resync_susp_peer = device->state.peer_isp;
info->peer_resync_susp_dependency = device->state.aftr_isp;
}
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
{
struct connection_info connection_info;
enum drbd_notification_type flags;
unsigned int peer_devices = 0;
struct drbd_config_context adm_ctx;
struct drbd_peer_device *peer_device;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct crypto crypto = { };
struct drbd_resource *resource;
struct drbd_connection *connection;
enum drbd_ret_code retcode;
enum drbd_state_rv rv;
int i;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
retcode = ERR_INVALID_REQUEST;
goto out;
}
/* No need for _rcu here. All reconfiguration is
* strictly serialized on genl_lock(). We are protected against
* concurrent reconfiguration/addition/deletion */
for_each_resource(resource, &drbd_resources) {
for_each_connection(connection, resource) {
if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
!memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
connection->my_addr_len)) {
retcode = ERR_LOCAL_ADDR;
goto out;
}
if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
!memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
connection->peer_addr_len)) {
retcode = ERR_PEER_ADDR;
goto out;
}
}
}
mutex_lock(&adm_ctx.resource->adm_mutex);
connection = first_connection(adm_ctx.resource);
conn_reconfig_start(connection);
if (connection->cstate > C_STANDALONE) {
retcode = ERR_NET_CONFIGURED;
goto fail;
}
/* allocation not in the IO path, drbdsetup / netlink process context */
new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
if (!new_net_conf) {
retcode = ERR_NOMEM;
goto fail;
}
set_net_conf_defaults(new_net_conf);
err = net_conf_from_attrs(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
retcode = check_net_options(connection, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
retcode = alloc_crypto(&crypto, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
drbd_flush_workqueue(&connection->sender_work);
mutex_lock(&adm_ctx.resource->conf_update);
old_net_conf = connection->net_conf;
if (old_net_conf) {
retcode = ERR_NET_CONFIGURED;
mutex_unlock(&adm_ctx.resource->conf_update);
goto fail;
}
rcu_assign_pointer(connection->net_conf, new_net_conf);
conn_free_crypto(connection);
connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
connection->integrity_tfm = crypto.integrity_tfm;
connection->csums_tfm = crypto.csums_tfm;
connection->verify_tfm = crypto.verify_tfm;
connection->my_addr_len = nla_len(adm_ctx.my_addr);
memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
peer_devices++;
}
connection_to_info(&connection_info, connection);
flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
mutex_lock(¬ification_mutex);
notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
struct peer_device_info peer_device_info;
peer_device_to_info(&peer_device_info, peer_device);
flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
}
mutex_unlock(¬ification_mutex);
mutex_unlock(&adm_ctx.resource->conf_update);
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
struct drbd_device *device = peer_device->device;
device->send_cnt = 0;
device->recv_cnt = 0;
}
rcu_read_unlock();
rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex);
drbd_adm_finish(&adm_ctx, info, rv);
return 0;
fail:
free_crypto(&crypto);
kfree(new_net_conf);
conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
{
enum drbd_conns cstate;
enum drbd_state_rv rv;
repeat:
rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
force ? CS_HARD : 0);
switch (rv) {
case SS_NOTHING_TO_DO:
break;
case SS_ALREADY_STANDALONE:
return SS_SUCCESS;
case SS_PRIMARY_NOP:
/* Our state checking code wants to see the peer outdated. */
rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
break;
case SS_CW_FAILED_BY_PEER:
spin_lock_irq(&connection->resource->req_lock);
cstate = connection->cstate;
spin_unlock_irq(&connection->resource->req_lock);
if (cstate <= C_WF_CONNECTION)
goto repeat;
/* The peer probably wants to see us outdated. */
rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
disk, D_OUTDATED), 0);
if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
CS_HARD);
}
break;
default:;
/* no special handling necessary */
}
if (rv >= SS_SUCCESS) {
enum drbd_state_rv rv2;
/* No one else can reconfigure the network while I am here.
* The state handling only uses drbd_thread_stop_nowait(),
* we want to really wait here until the receiver is no more.
*/
drbd_thread_stop(&connection->receiver);
/* Race breaker. This additional state change request may be
* necessary, if this was a forced disconnect during a receiver
* restart. We may have "killed" the receiver thread just
* after drbd_receiver() returned. Typically, we should be
* C_STANDALONE already, now, and this becomes a no-op.
*/
rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
CS_VERBOSE | CS_HARD);
if (rv2 < SS_SUCCESS)
drbd_err(connection,
"unexpected rv2=%d in conn_try_disconnect()\n",
rv2);
/* Unlike in DRBD 9, the state engine has generated
* NOTIFY_DESTROY events before clearing connection->net_conf. */
}
return rv;
}
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct disconnect_parms parms;
struct drbd_connection *connection;
enum drbd_state_rv rv;
enum drbd_ret_code retcode;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto fail;
connection = adm_ctx.connection;
memset(&parms, 0, sizeof(parms));
if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
err = disconnect_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
}
mutex_lock(&adm_ctx.resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
mutex_unlock(&adm_ctx.resource->adm_mutex);
if (rv < SS_SUCCESS) {
drbd_adm_finish(&adm_ctx, info, rv);
return 0;
}
retcode = NO_ERROR;
fail:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
void resync_after_online_grow(struct drbd_device *device)
{
int iass; /* I am sync source */
drbd_info(device, "Resync of new storage after online grow\n");
if (device->state.role != device->state.peer)
iass = (device->state.role == R_PRIMARY);
else
iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
if (iass)
drbd_start_resync(device, C_SYNC_SOURCE);
else
_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
struct resize_parms rs;
struct drbd_device *device;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
bool change_al_layout = false;
enum dds_flags ddsf;
sector_t u_size;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto fail;
}
memset(&rs, 0, sizeof(struct resize_parms));
rs.al_stripes = device->ldev->md.al_stripes;
rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
err = resize_parms_from_attrs(&rs, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail_ldev;
}
}
if (device->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
goto fail_ldev;
}
if (device->state.role == R_SECONDARY &&
device->state.peer == R_SECONDARY) {
retcode = ERR_NO_PRIMARY;
goto fail_ldev;
}
if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
goto fail_ldev;
}
rcu_read_lock();
u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
if (u_size != (sector_t)rs.resize_size) {
new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
retcode = ERR_NOMEM;
goto fail_ldev;
}
}
if (device->ldev->md.al_stripes != rs.al_stripes ||
device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
if (al_size_k > (16 * 1024 * 1024)) {
retcode = ERR_MD_LAYOUT_TOO_BIG;
goto fail_ldev;
}
if (al_size_k < MD_32kB_SECT/2) {
retcode = ERR_MD_LAYOUT_TOO_SMALL;
goto fail_ldev;
}
if (device->state.conn != C_CONNECTED && !rs.resize_force) {
retcode = ERR_MD_LAYOUT_CONNECTED;
goto fail_ldev;
}
change_al_layout = true;
}
if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
if (new_disk_conf) {
mutex_lock(&device->resource->conf_update);
old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = (sector_t)rs.resize_size;
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&device->resource->conf_update);
kvfree_rcu_mightsleep(old_disk_conf);
new_disk_conf = NULL;
}
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
drbd_md_sync(device);
put_ldev(device);
if (dd == DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto fail;
} else if (dd == DS_ERROR_SPACE_MD) {
retcode = ERR_MD_LAYOUT_NO_FIT;
goto fail;
} else if (dd == DS_ERROR_SHRINK) {
retcode = ERR_IMPLICIT_SHRINK;
goto fail;
}
if (device->state.conn == C_CONNECTED) {
if (dd == DS_GREW)
set_bit(RESIZE_PENDING, &device->flags);
drbd_send_uuids(first_peer_device(device));
drbd_send_sizes(first_peer_device(device), 1, ddsf);
}
fail:
mutex_unlock(&adm_ctx.resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
fail_ldev:
put_ldev(device);
kfree(new_disk_conf);
goto fail;
}
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct res_opts res_opts;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto fail;
res_opts = adm_ctx.resource->res_opts;
if (should_set_defaults(info))
set_res_opts_defaults(&res_opts);
err = res_opts_from_attrs(&res_opts, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
mutex_lock(&adm_ctx.resource->adm_mutex);
err = set_resource_options(adm_ctx.resource, &res_opts);
if (err) {
retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM)
retcode = ERR_NOMEM;
}
mutex_unlock(&adm_ctx.resource->adm_mutex);
fail:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
device = adm_ctx.device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
mutex_lock(&adm_ctx.resource->adm_mutex);
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(device);
wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
/* If we happen to be C_STANDALONE R_SECONDARY, just change to
* D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
* try to start a resync handshake as sync target for full sync.
*/
if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
"set_n_write from invalidate", BM_LOCKED_MASK, NULL))
retcode = ERR_IO_MD_DISK;
}
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
drbd_resume_io(device);
mutex_unlock(&adm_ctx.resource->adm_mutex);
put_ldev(device);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
union drbd_state mask, union drbd_state val)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = drbd_request_state(adm_ctx.device, mask, val);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static int drbd_bmio_set_susp_al(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
int rv;
rv = drbd_bmio_set_n_write(device, peer_device);
drbd_suspend_al(device);
return rv;
}
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
int retcode; /* drbd_ret_code, drbd_state_rv */
struct drbd_device *device;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
device = adm_ctx.device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
mutex_lock(&adm_ctx.resource->adm_mutex);
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(device);
wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
/* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
* in the bitmap. Otherwise, try to start a resync handshake
* as sync source for full sync.
*/
if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
/* The peer will get a resync upon connect anyways. Just make that
into a full resync. */
retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
"set_n_write from invalidate_peer",
BM_LOCKED_SET_ALLOWED, NULL))
retcode = ERR_IO_MD_DISK;
}
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
drbd_resume_io(device);
mutex_unlock(&adm_ctx.resource->adm_mutex);
put_ldev(device);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
mutex_lock(&adm_ctx.resource->adm_mutex);
if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
retcode = ERR_PAUSE_IS_SET;
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
union drbd_dev_state s;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
mutex_lock(&adm_ctx.resource->adm_mutex);
if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
s = adm_ctx.device->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
} else {
retcode = ERR_PAUSE_IS_CLEAR;
}
}
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
{
return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
}
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
if (test_bit(NEW_CUR_UUID, &device->flags)) {
if (get_ldev_if_state(device, D_ATTACHING)) {
drbd_uuid_new_current(device);
put_ldev(device);
} else {
/* This is effectively a multi-stage "forced down".
* The NEW_CUR_UUID bit is supposedly only set, if we
* lost the replication connection, and are configured
* to freeze IO and wait for some fence-peer handler.
* So we still don't have a replication connection.
* And now we don't have a local disk either. After
* resume, we will fail all pending and new IO, because
* we don't have any data anymore. Which means we will
* eventually be able to terminate all users of this
* device, and then take it down. By bumping the
* "effective" data uuid, we make sure that you really
* need to tear down before you reconfigure, we will
* the refuse to re-connect or re-attach (because no
* matching real data uuid exists).
*/
u64 val;
get_random_bytes(&val, sizeof(u64));
drbd_set_ed_uuid(device, val);
drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
}
clear_bit(NEW_CUR_UUID, &device->flags);
}
drbd_suspend_io(device);
retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
if (retcode == SS_SUCCESS) {
if (device->state.conn < C_CONNECTED)
tl_clear(first_peer_device(device)->connection);
if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(device);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
{
return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
static int nla_put_drbd_cfg_context(struct sk_buff *skb,
struct drbd_resource *resource,
struct drbd_connection *connection,
struct drbd_device *device)
{
struct nlattr *nla;
nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
if (device &&
nla_put_u32(skb, T_ctx_volume, device->vnr))
goto nla_put_failure;
if (nla_put_string(skb, T_ctx_resource_name, resource->name))
goto nla_put_failure;
if (connection) {
if (connection->my_addr_len &&
nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
goto nla_put_failure;
if (connection->peer_addr_len &&
nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
goto nla_put_failure;
}
nla_nest_end(skb, nla);
return 0;
nla_put_failure:
if (nla)
nla_nest_cancel(skb, nla);
return -EMSGSIZE;
}
/*
* The generic netlink dump callbacks are called outside the genl_lock(), so
* they cannot use the simple attribute parsing code which uses global
* attribute tables.
*/
static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
{
const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
struct nlattr *nla;
nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
DRBD_NLA_CFG_CONTEXT);
if (!nla)
return NULL;
return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
}
static void resource_to_info(struct resource_info *, struct drbd_resource *);
int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
{
struct drbd_genlmsghdr *dh;
struct drbd_resource *resource;
struct resource_info resource_info;
struct resource_statistics resource_statistics;
int err;
rcu_read_lock();
if (cb->args[0]) {
for_each_resource_rcu(resource, &drbd_resources)
if (resource == (struct drbd_resource *)cb->args[0])
goto found_resource;
err = 0; /* resource was probably deleted */
goto out;
}
resource = list_entry(&drbd_resources,
struct drbd_resource, resources);
found_resource:
list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
goto put_result;
}
err = 0;
goto out;
put_result:
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &drbd_genl_family,
NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
err = -ENOMEM;
if (!dh)
goto out;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
if (err)
goto out;
err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
resource_to_info(&resource_info, resource);
err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
resource_statistics.res_stat_write_ordering = resource->write_ordering;
err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
cb->args[0] = (long)resource;
genlmsg_end(skb, dh);
err = 0;
out:
rcu_read_unlock();
if (err)
return err;
return skb->len;
}
static void device_to_statistics(struct device_statistics *s,
struct drbd_device *device)
{
memset(s, 0, sizeof(*s));
s->dev_upper_blocked = !may_inc_ap_bio(device);
if (get_ldev(device)) {
struct drbd_md *md = &device->ldev->md;
u64 *history_uuids = (u64 *)s->history_uuids;
int n;
spin_lock_irq(&md->uuid_lock);
s->dev_current_uuid = md->uuid[UI_CURRENT];
BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
history_uuids[n] = md->uuid[UI_HISTORY_START + n];
for (; n < HISTORY_UUIDS; n++)
history_uuids[n] = 0;
s->history_uuids_len = HISTORY_UUIDS;
spin_unlock_irq(&md->uuid_lock);
s->dev_disk_flags = md->flags;
put_ldev(device);
}
s->dev_size = get_capacity(device->vdisk);
s->dev_read = device->read_cnt;
s->dev_write = device->writ_cnt;
s->dev_al_writes = device->al_writ_cnt;
s->dev_bm_writes = device->bm_writ_cnt;
s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
s->dev_lower_pending = atomic_read(&device->local_cnt);
s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
s->dev_exposed_data_uuid = device->ed_uuid;
}
static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
{
if (cb->args[0]) {
struct drbd_resource *resource =
(struct drbd_resource *)cb->args[0];
kref_put(&resource->kref, drbd_destroy_resource);
}
return 0;
}
int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
return put_resource_in_arg0(cb, 7);
}
static void device_to_info(struct device_info *, struct drbd_device *);
int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource;
struct drbd_device *device;
int minor, err, retcode;
struct drbd_genlmsghdr *dh;
struct device_info device_info;
struct device_statistics device_statistics;
struct idr *idr_to_search;
resource = (struct drbd_resource *)cb->args[0];
if (!cb->args[0] && !cb->args[1]) {
resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
if (resource_filter) {
retcode = ERR_RES_NOT_KNOWN;
resource = drbd_find_resource(nla_data(resource_filter));
if (!resource)
goto put_result;
cb->args[0] = (long)resource;
}
}
rcu_read_lock();
minor = cb->args[1];
idr_to_search = resource ? &resource->devices : &drbd_devices;
device = idr_get_next(idr_to_search, &minor);
if (!device) {
err = 0;
goto out;
}
idr_for_each_entry_continue(idr_to_search, device, minor) {
retcode = NO_ERROR;
goto put_result; /* only one iteration */
}
err = 0;
goto out; /* no more devices */
put_result:
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &drbd_genl_family,
NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
err = -ENOMEM;
if (!dh)
goto out;
dh->ret_code = retcode;
dh->minor = -1U;
if (retcode == NO_ERROR) {
dh->minor = device->minor;
err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
if (err)
goto out;
if (get_ldev(device)) {
struct disk_conf *disk_conf =
rcu_dereference(device->ldev->disk_conf);
err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
put_ldev(device);
if (err)
goto out;
}
device_to_info(&device_info, device);
err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
device_to_statistics(&device_statistics, device);
err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
cb->args[1] = minor + 1;
}
genlmsg_end(skb, dh);
err = 0;
out:
rcu_read_unlock();
if (err)
return err;
return skb->len;
}
int drbd_adm_dump_connections_done(struct netlink_callback *cb)
{
return put_resource_in_arg0(cb, 6);
}
enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource = NULL, *next_resource;
struct drbd_connection *connection;
int err = 0, retcode;
struct drbd_genlmsghdr *dh;
struct connection_info connection_info;
struct connection_statistics connection_statistics;
rcu_read_lock();
resource = (struct drbd_resource *)cb->args[0];
if (!cb->args[0]) {
resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
if (resource_filter) {
retcode = ERR_RES_NOT_KNOWN;
resource = drbd_find_resource(nla_data(resource_filter));
if (!resource)
goto put_result;
cb->args[0] = (long)resource;
cb->args[1] = SINGLE_RESOURCE;
}
}
if (!resource) {
if (list_empty(&drbd_resources))
goto out;
resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
kref_get(&resource->kref);
cb->args[0] = (long)resource;
cb->args[1] = ITERATE_RESOURCES;
}
next_resource:
rcu_read_unlock();
mutex_lock(&resource->conf_update);
rcu_read_lock();
if (cb->args[2]) {
for_each_connection_rcu(connection, resource)
if (connection == (struct drbd_connection *)cb->args[2])
goto found_connection;
/* connection was probably deleted */
goto no_more_connections;
}
connection = list_entry(&resource->connections, struct drbd_connection, connections);
found_connection:
list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
if (!has_net_conf(connection))
continue;
retcode = NO_ERROR;
goto put_result; /* only one iteration */
}
no_more_connections:
if (cb->args[1] == ITERATE_RESOURCES) {
for_each_resource_rcu(next_resource, &drbd_resources) {
if (next_resource == resource)
goto found_resource;
}
/* resource was probably deleted */
}
goto out;
found_resource:
list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
mutex_unlock(&resource->conf_update);
kref_put(&resource->kref, drbd_destroy_resource);
resource = next_resource;
kref_get(&resource->kref);
cb->args[0] = (long)resource;
cb->args[2] = 0;
goto next_resource;
}
goto out; /* no more resources */
put_result:
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &drbd_genl_family,
NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
err = -ENOMEM;
if (!dh)
goto out;
dh->ret_code = retcode;
dh->minor = -1U;
if (retcode == NO_ERROR) {
struct net_conf *net_conf;
err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
if (err)
goto out;
net_conf = rcu_dereference(connection->net_conf);
if (net_conf) {
err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
}
connection_to_info(&connection_info, connection);
err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
cb->args[2] = (long)connection;
}
genlmsg_end(skb, dh);
err = 0;
out:
rcu_read_unlock();
if (resource)
mutex_unlock(&resource->conf_update);
if (err)
return err;
return skb->len;
}
enum mdf_peer_flag {
MDF_PEER_CONNECTED = 1 << 0,
MDF_PEER_OUTDATED = 1 << 1,
MDF_PEER_FENCING = 1 << 2,
MDF_PEER_FULL_SYNC = 1 << 3,
};
static void peer_device_to_statistics(struct peer_device_statistics *s,
struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
memset(s, 0, sizeof(*s));
s->peer_dev_received = device->recv_cnt;
s->peer_dev_sent = device->send_cnt;
s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
atomic_read(&device->rs_pending_cnt);
s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
if (get_ldev(device)) {
struct drbd_md *md = &device->ldev->md;
spin_lock_irq(&md->uuid_lock);
s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
spin_unlock_irq(&md->uuid_lock);
s->peer_dev_flags =
(drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
MDF_PEER_CONNECTED : 0) +
(drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
!drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
MDF_PEER_OUTDATED : 0) +
/* FIXME: MDF_PEER_FENCING? */
(drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
MDF_PEER_FULL_SYNC : 0);
put_ldev(device);
}
}
int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
{
return put_resource_in_arg0(cb, 9);
}
int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource;
struct drbd_device *device;
struct drbd_peer_device *peer_device = NULL;
int minor, err, retcode;
struct drbd_genlmsghdr *dh;
struct idr *idr_to_search;
resource = (struct drbd_resource *)cb->args[0];
if (!cb->args[0] && !cb->args[1]) {
resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
if (resource_filter) {
retcode = ERR_RES_NOT_KNOWN;
resource = drbd_find_resource(nla_data(resource_filter));
if (!resource)
goto put_result;
}
cb->args[0] = (long)resource;
}
rcu_read_lock();
minor = cb->args[1];
idr_to_search = resource ? &resource->devices : &drbd_devices;
device = idr_find(idr_to_search, minor);
if (!device) {
next_device:
minor++;
cb->args[2] = 0;
device = idr_get_next(idr_to_search, &minor);
if (!device) {
err = 0;
goto out;
}
}
if (cb->args[2]) {
for_each_peer_device(peer_device, device)
if (peer_device == (struct drbd_peer_device *)cb->args[2])
goto found_peer_device;
/* peer device was probably deleted */
goto next_device;
}
/* Make peer_device point to the list head (not the first entry). */
peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
found_peer_device:
list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
if (!has_net_conf(peer_device->connection))
continue;
retcode = NO_ERROR;
goto put_result; /* only one iteration */
}
goto next_device;
put_result:
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &drbd_genl_family,
NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
err = -ENOMEM;
if (!dh)
goto out;
dh->ret_code = retcode;
dh->minor = -1U;
if (retcode == NO_ERROR) {
struct peer_device_info peer_device_info;
struct peer_device_statistics peer_device_statistics;
dh->minor = minor;
err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
if (err)
goto out;
peer_device_to_info(&peer_device_info, peer_device);
err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
peer_device_to_statistics(&peer_device_statistics, peer_device);
err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
if (err)
goto out;
cb->args[1] = minor;
cb->args[2] = (long)peer_device;
}
genlmsg_end(skb, dh);
err = 0;
out:
rcu_read_unlock();
if (err)
return err;
return skb->len;
}
/*
* Return the connection of @resource if @resource has exactly one connection.
*/
static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
{
struct list_head *connections = &resource->connections;
if (list_empty(connections) || connections->next->next != connections)
return NULL;
return list_first_entry(&resource->connections, struct drbd_connection, connections);
}
static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
const struct sib_info *sib)
{
struct drbd_resource *resource = device->resource;
struct state_info *si = NULL; /* for sizeof(si->member); */
struct nlattr *nla;
int got_ldev;
int err = 0;
int exclude_sensitive;
/* If sib != NULL, this is drbd_bcast_event, which anyone can listen
* to. So we better exclude_sensitive information.
*
* If sib == NULL, this is drbd_adm_get_status, executed synchronously
* in the context of the requesting user process. Exclude sensitive
* information, unless current has superuser.
*
* NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
* relies on the current implementation of netlink_dump(), which
* executes the dump callback successively from netlink_recvmsg(),
* always in the context of the receiving process */
exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
got_ldev = get_ldev(device);
/* We need to add connection name and volume number information still.
* Minor number is in drbd_genlmsghdr. */
if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
goto nla_put_failure;
if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
goto nla_put_failure;
rcu_read_lock();
if (got_ldev) {
struct disk_conf *disk_conf;
disk_conf = rcu_dereference(device->ldev->disk_conf);
err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
}
if (!err) {
struct net_conf *nc;
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc)
err = net_conf_to_skb(skb, nc, exclude_sensitive);
}
rcu_read_unlock();
if (err)
goto nla_put_failure;
nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
if (!nla)
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
nla_put_u32(skb, T_current_state, device->state.i) ||
nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
goto nla_put_failure;
if (got_ldev) {
int err;
spin_lock_irq(&device->ldev->md.uuid_lock);
err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
spin_unlock_irq(&device->ldev->md.uuid_lock);
if (err)
goto nla_put_failure;
if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
nla_put_u64_0pad(skb, T_bits_oos,
drbd_bm_total_weight(device)))
goto nla_put_failure;
if (C_SYNC_SOURCE <= device->state.conn &&
C_PAUSED_SYNC_T >= device->state.conn) {
if (nla_put_u64_0pad(skb, T_bits_rs_total,
device->rs_total) ||
nla_put_u64_0pad(skb, T_bits_rs_failed,
device->rs_failed))
goto nla_put_failure;
}
}
if (sib) {
switch(sib->sib_reason) {
case SIB_SYNC_PROGRESS:
case SIB_GET_STATUS_REPLY:
break;
case SIB_STATE_CHANGE:
if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
nla_put_u32(skb, T_new_state, sib->ns.i))
goto nla_put_failure;
break;
case SIB_HELPER_POST:
if (nla_put_u32(skb, T_helper_exit_code,
sib->helper_exit_code))
goto nla_put_failure;
fallthrough;
case SIB_HELPER_PRE:
if (nla_put_string(skb, T_helper, sib->helper_name))
goto nla_put_failure;
break;
}
}
nla_nest_end(skb, nla);
if (0)
nla_put_failure:
err = -EMSGSIZE;
if (got_ldev)
put_ldev(device);
return err;
}
int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
if (err) {
nlmsg_free(adm_ctx.reply_skb);
return err;
}
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
struct drbd_device *device;
struct drbd_genlmsghdr *dh;
struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
struct drbd_resource *resource = NULL;
struct drbd_resource *tmp;
unsigned volume = cb->args[1];
/* Open coded, deferred, iteration:
* for_each_resource_safe(resource, tmp, &drbd_resources) {
* connection = "first connection of resource or undefined";
* idr_for_each_entry(&resource->devices, device, i) {
* ...
* }
* }
* where resource is cb->args[0];
* and i is cb->args[1];
*
* cb->args[2] indicates if we shall loop over all resources,
* or just dump all volumes of a single resource.
*
* This may miss entries inserted after this dump started,
* or entries deleted before they are reached.
*
* We need to make sure the device won't disappear while
* we are looking at it, and revalidate our iterators
* on each iteration.
*/
/* synchronize with conn_create()/drbd_destroy_connection() */
rcu_read_lock();
/* revalidate iterator position */
for_each_resource_rcu(tmp, &drbd_resources) {
if (pos == NULL) {
/* first iteration */
pos = tmp;
resource = pos;
break;
}
if (tmp == pos) {
resource = pos;
break;
}
}
if (resource) {
next_resource:
device = idr_get_next(&resource->devices, &volume);
if (!device) {
/* No more volumes to dump on this resource.
* Advance resource iterator. */
pos = list_entry_rcu(resource->resources.next,
struct drbd_resource, resources);
/* Did we dump any volume of this resource yet? */
if (volume != 0) {
/* If we reached the end of the list,
* or only a single resource dump was requested,
* we are done. */
if (&pos->resources == &drbd_resources || cb->args[2])
goto out;
volume = 0;
resource = pos;
goto next_resource;
}
}
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &drbd_genl_family,
NLM_F_MULTI, DRBD_ADM_GET_STATUS);
if (!dh)
goto out;
if (!device) {
/* This is a connection without a single volume.
* Suprisingly enough, it may have a network
* configuration. */
struct drbd_connection *connection;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
connection = the_only_connection(resource);
if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
goto cancel;
if (connection) {
struct net_conf *nc;
nc = rcu_dereference(connection->net_conf);
if (nc && net_conf_to_skb(skb, nc, 1) != 0)
goto cancel;
}
goto done;
}
D_ASSERT(device, device->vnr == volume);
D_ASSERT(device, device->resource == resource);
dh->minor = device_to_minor(device);
dh->ret_code = NO_ERROR;
if (nla_put_status_info(skb, device, NULL)) {
cancel:
genlmsg_cancel(skb, dh);
goto out;
}
done:
genlmsg_end(skb, dh);
}
out:
rcu_read_unlock();
/* where to start the next iteration */
cb->args[0] = (long)pos;
cb->args[1] = (pos == resource) ? volume + 1 : 0;
/* No more resources/volumes/minors found results in an empty skb.
* Which will terminate the dump. */
return skb->len;
}
/*
* Request status of all resources, or of all volumes within a single resource.
*
* This is a dump, as the answer may not fit in a single reply skb otherwise.
* Which means we cannot use the family->attrbuf or other such members, because
* dump is NOT protected by the genl_lock(). During dump, we only have access
* to the incoming skb, and need to opencode "parsing" of the nlattr payload.
*
* Once things are setup properly, we call into get_one_status().
*/
int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
{
const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
struct nlattr *nla;
const char *resource_name;
struct drbd_resource *resource;
int maxtype;
/* Is this a followup call? */
if (cb->args[0]) {
/* ... of a single resource dump,
* and the resource iterator has been advanced already? */
if (cb->args[2] && cb->args[2] != cb->args[0])
return 0; /* DONE. */
goto dump;
}
/* First call (from netlink_dump_start). We need to figure out
* which resource(s) the user wants us to dump. */
nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
nlmsg_attrlen(cb->nlh, hdrlen),
DRBD_NLA_CFG_CONTEXT);
/* No explicit context given. Dump all. */
if (!nla)
goto dump;
maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
if (IS_ERR(nla))
return PTR_ERR(nla);
/* context given, but no name present? */
if (!nla)
return -EINVAL;
resource_name = nla_data(nla);
if (!*resource_name)
return -ENODEV;
resource = drbd_find_resource(resource_name);
if (!resource)
return -ENODEV;
kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
/* prime iterators, and set "filter" mode mark:
* only dump this connection. */
cb->args[0] = (long)resource;
/* cb->args[1] = 0; passed in this way. */
cb->args[2] = (long)resource;
dump:
return get_one_status(skb, cb);
}
int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct timeout_parms tp;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
tp.timeout_type =
adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
UT_DEFAULT;
err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
if (err) {
nlmsg_free(adm_ctx.reply_skb);
return err;
}
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_device *device;
enum drbd_ret_code retcode;
struct start_ov_parms parms;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
device = adm_ctx.device;
/* resume from last known position, if possible */
parms.ov_start_sector = device->ov_start_sector;
parms.ov_stop_sector = ULLONG_MAX;
if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
int err = start_ov_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
mutex_lock(&adm_ctx.resource->adm_mutex);
/* w_make_ov_request expects position to be aligned */
device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
device->ov_stop_sector = parms.ov_stop_sector;
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(device);
wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
drbd_resume_io(device);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_device *device;
enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
struct new_c_uuid_parms args;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out_nolock;
device = adm_ctx.device;
memset(&args, 0, sizeof(args));
if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
err = new_c_uuid_parms_from_attrs(&args, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out_nolock;
}
}
mutex_lock(&adm_ctx.resource->adm_mutex);
mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
/* this is "skip initial sync", assume to be clean */
if (device->state.conn == C_CONNECTED &&
first_peer_device(device)->connection->agreed_pro_version >= 90 &&
device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
drbd_info(device, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
} else if (device->state.conn != C_STANDALONE) {
retcode = ERR_CONNECTED;
goto out_dec;
}
drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
if (args.clear_bm) {
err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from new_c_uuid", BM_LOCKED_MASK, NULL);
if (err) {
drbd_err(device, "Writing bitmap failed with %d\n", err);
retcode = ERR_IO_MD_DISK;
}
if (skip_initial_sync) {
drbd_send_uuids_skip_initial_sync(first_peer_device(device));
_drbd_uuid_set(device, UI_BITMAP, 0);
drbd_print_uuids(device, "cleared bitmap UUID");
spin_lock_irq(&device->resource->req_lock);
_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
spin_unlock_irq(&device->resource->req_lock);
}
}
drbd_md_sync(device);
out_dec:
put_ldev(device);
out:
mutex_unlock(device->state_mutex);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out_nolock:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static enum drbd_ret_code
drbd_check_resource_name(struct drbd_config_context *adm_ctx)
{
const char *name = adm_ctx->resource_name;
if (!name || !name[0]) {
drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
return ERR_MANDATORY_TAG;
}
/* if we want to use these in sysfs/configfs/debugfs some day,
* we must not allow slashes */
if (strchr(name, '/')) {
drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
return ERR_INVALID_REQUEST;
}
return NO_ERROR;
}
static void resource_to_info(struct resource_info *info,
struct drbd_resource *resource)
{
info->res_role = conn_highest_role(first_connection(resource));
info->res_susp = resource->susp;
info->res_susp_nod = resource->susp_nod;
info->res_susp_fen = resource->susp_fen;
}
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_connection *connection;
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
struct res_opts res_opts;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
set_res_opts_defaults(&res_opts);
err = res_opts_from_attrs(&res_opts, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
goto out;
}
retcode = drbd_check_resource_name(&adm_ctx);
if (retcode != NO_ERROR)
goto out;
if (adm_ctx.resource) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
retcode = ERR_INVALID_REQUEST;
drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
}
/* else: still NO_ERROR */
goto out;
}
/* not yet safe for genl_family.parallel_ops */
mutex_lock(&resources_mutex);
connection = conn_create(adm_ctx.resource_name, &res_opts);
mutex_unlock(&resources_mutex);
if (connection) {
struct resource_info resource_info;
mutex_lock(¬ification_mutex);
resource_to_info(&resource_info, connection->resource);
notify_resource_state(NULL, 0, connection->resource,
&resource_info, NOTIFY_CREATE);
mutex_unlock(¬ification_mutex);
} else
retcode = ERR_NOMEM;
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static void device_to_info(struct device_info *info,
struct drbd_device *device)
{
info->dev_disk_state = device->state.disk;
}
int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
if (dh->minor > MINORMASK) {
drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
}
if (adm_ctx.volume > DRBD_VOLUME_MAX) {
drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
}
/* drbd_adm_prepare made sure already
* that first_peer_device(device)->connection and device->vnr match the request. */
if (adm_ctx.device) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
retcode = ERR_MINOR_OR_VOLUME_EXISTS;
/* else: still NO_ERROR */
goto out;
}
mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = drbd_create_device(&adm_ctx, dh->minor);
if (retcode == NO_ERROR) {
struct drbd_device *device;
struct drbd_peer_device *peer_device;
struct device_info info;
unsigned int peer_devices = 0;
enum drbd_notification_type flags;
device = minor_to_device(dh->minor);
for_each_peer_device(peer_device, device) {
if (!has_net_conf(peer_device->connection))
continue;
peer_devices++;
}
device_to_info(&info, device);
mutex_lock(¬ification_mutex);
flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
for_each_peer_device(peer_device, device) {
struct peer_device_info peer_device_info;
if (!has_net_conf(peer_device->connection))
continue;
peer_device_to_info(&peer_device_info, peer_device);
flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
NOTIFY_CREATE | flags);
}
mutex_unlock(¬ification_mutex);
}
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
{
struct drbd_peer_device *peer_device;
if (device->state.disk == D_DISKLESS &&
/* no need to be device->state.conn == C_STANDALONE &&
* we may want to delete a minor from a live replication group.
*/
device->state.role == R_SECONDARY) {
struct drbd_connection *connection =
first_connection(device->resource);
_drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
CS_VERBOSE + CS_WAIT_COMPLETE);
/* If the state engine hasn't stopped the sender thread yet, we
* need to flush the sender work queue before generating the
* DESTROY events here. */
if (get_t_state(&connection->worker) == RUNNING)
drbd_flush_workqueue(&connection->sender_work);
mutex_lock(¬ification_mutex);
for_each_peer_device(peer_device, device) {
if (!has_net_conf(peer_device->connection))
continue;
notify_peer_device_state(NULL, 0, peer_device, NULL,
NOTIFY_DESTROY | NOTIFY_CONTINUES);
}
notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
mutex_unlock(¬ification_mutex);
drbd_delete_device(device);
return NO_ERROR;
} else
return ERR_MINOR_CONFIGURED;
}
int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = adm_del_minor(adm_ctx.device);
mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
static int adm_del_resource(struct drbd_resource *resource)
{
struct drbd_connection *connection;
for_each_connection(connection, resource) {
if (connection->cstate > C_STANDALONE)
return ERR_NET_CONFIGURED;
}
if (!idr_is_empty(&resource->devices))
return ERR_RES_IN_USE;
/* The state engine has stopped the sender thread, so we don't
* need to flush the sender work queue before generating the
* DESTROY event here. */
mutex_lock(¬ification_mutex);
notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
mutex_unlock(¬ification_mutex);
mutex_lock(&resources_mutex);
list_del_rcu(&resource->resources);
mutex_unlock(&resources_mutex);
/* Make sure all threads have actually stopped: state handling only
* does drbd_thread_stop_nowait(). */
list_for_each_entry(connection, &resource->connections, connections)
drbd_thread_stop(&connection->worker);
synchronize_rcu();
drbd_free_resource(resource);
return NO_ERROR;
}
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_resource *resource;
struct drbd_connection *connection;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
unsigned i;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
resource = adm_ctx.resource;
mutex_lock(&resource->adm_mutex);
/* demote */
for_each_connection(connection, resource) {
struct drbd_peer_device *peer_device;
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
if (retcode < SS_SUCCESS) {
drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
goto out;
}
}
retcode = conn_try_disconnect(connection, 0);
if (retcode < SS_SUCCESS) {
drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
goto out;
}
}
/* detach */
idr_for_each_entry(&resource->devices, device, i) {
retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
goto out;
}
}
/* delete volumes */
idr_for_each_entry(&resource->devices, device, i) {
retcode = adm_del_minor(device);
if (retcode != NO_ERROR) {
/* "can not happen" */
drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
goto out;
}
}
retcode = adm_del_resource(resource);
out:
mutex_unlock(&resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_resource *resource;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto finish;
resource = adm_ctx.resource;
mutex_lock(&resource->adm_mutex);
retcode = adm_del_resource(resource);
mutex_unlock(&resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
{
struct sk_buff *msg;
struct drbd_genlmsghdr *d_out;
unsigned seq;
int err = -ENOMEM;
seq = atomic_inc_return(&drbd_genl_seq);
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
if (!msg)
goto failed;
err = -EMSGSIZE;
d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
if (!d_out) /* cannot happen, but anyways. */
goto nla_put_failure;
d_out->minor = device_to_minor(device);
d_out->ret_code = NO_ERROR;
if (nla_put_status_info(msg, device, sib))
goto nla_put_failure;
genlmsg_end(msg, d_out);
err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
/* msg has been consumed or freed in netlink_broadcast() */
if (err && err != -ESRCH)
goto failed;
return;
nla_put_failure:
nlmsg_free(msg);
failed:
drbd_err(device, "Error %d while broadcasting event. "
"Event seq:%u sib_reason:%u\n",
err, seq, sib->sib_reason);
}
static int nla_put_notification_header(struct sk_buff *msg,
enum drbd_notification_type type)
{
struct drbd_notification_header nh = {
.nh_type = type,
};
return drbd_notification_header_to_skb(msg, &nh, true);
}
int notify_resource_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource *resource,
struct resource_info *resource_info,
enum drbd_notification_type type)
{
struct resource_statistics resource_statistics;
struct drbd_genlmsghdr *dh;
bool multicast = false;
int err;
if (!skb) {
seq = atomic_inc_return(¬ify_genl_seq);
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
err = -ENOMEM;
if (!skb)
goto failed;
multicast = true;
}
err = -EMSGSIZE;
dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
if (!dh)
goto nla_put_failure;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
nla_put_notification_header(skb, type) ||
((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
resource_info_to_skb(skb, resource_info, true)))
goto nla_put_failure;
resource_statistics.res_stat_write_ordering = resource->write_ordering;
err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
if (err)
goto nla_put_failure;
genlmsg_end(skb, dh);
if (multicast) {
err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
/* skb has been consumed or freed in netlink_broadcast() */
if (err && err != -ESRCH)
goto failed;
}
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
int notify_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_device *device,
struct device_info *device_info,
enum drbd_notification_type type)
{
struct device_statistics device_statistics;
struct drbd_genlmsghdr *dh;
bool multicast = false;
int err;
if (!skb) {
seq = atomic_inc_return(¬ify_genl_seq);
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
err = -ENOMEM;
if (!skb)
goto failed;
multicast = true;
}
err = -EMSGSIZE;
dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
if (!dh)
goto nla_put_failure;
dh->minor = device->minor;
dh->ret_code = NO_ERROR;
if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
nla_put_notification_header(skb, type) ||
((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
device_info_to_skb(skb, device_info, true)))
goto nla_put_failure;
device_to_statistics(&device_statistics, device);
device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
genlmsg_end(skb, dh);
if (multicast) {
err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
/* skb has been consumed or freed in netlink_broadcast() */
if (err && err != -ESRCH)
goto failed;
}
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
int notify_connection_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection *connection,
struct connection_info *connection_info,
enum drbd_notification_type type)
{
struct connection_statistics connection_statistics;
struct drbd_genlmsghdr *dh;
bool multicast = false;
int err;
if (!skb) {
seq = atomic_inc_return(¬ify_genl_seq);
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
err = -ENOMEM;
if (!skb)
goto failed;
multicast = true;
}
err = -EMSGSIZE;
dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
if (!dh)
goto nla_put_failure;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
nla_put_notification_header(skb, type) ||
((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
connection_info_to_skb(skb, connection_info, true)))
goto nla_put_failure;
connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
genlmsg_end(skb, dh);
if (multicast) {
err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
/* skb has been consumed or freed in netlink_broadcast() */
if (err && err != -ESRCH)
goto failed;
}
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
int notify_peer_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device *peer_device,
struct peer_device_info *peer_device_info,
enum drbd_notification_type type)
{
struct peer_device_statistics peer_device_statistics;
struct drbd_resource *resource = peer_device->device->resource;
struct drbd_genlmsghdr *dh;
bool multicast = false;
int err;
if (!skb) {
seq = atomic_inc_return(¬ify_genl_seq);
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
err = -ENOMEM;
if (!skb)
goto failed;
multicast = true;
}
err = -EMSGSIZE;
dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
if (!dh)
goto nla_put_failure;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
nla_put_notification_header(skb, type) ||
((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
peer_device_info_to_skb(skb, peer_device_info, true)))
goto nla_put_failure;
peer_device_to_statistics(&peer_device_statistics, peer_device);
peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
genlmsg_end(skb, dh);
if (multicast) {
err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
/* skb has been consumed or freed in netlink_broadcast() */
if (err && err != -ESRCH)
goto failed;
}
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
void notify_helper(enum drbd_notification_type type,
struct drbd_device *device, struct drbd_connection *connection,
const char *name, int status)
{
struct drbd_resource *resource = device ? device->resource : connection->resource;
struct drbd_helper_info helper_info;
unsigned int seq = atomic_inc_return(¬ify_genl_seq);
struct sk_buff *skb = NULL;
struct drbd_genlmsghdr *dh;
int err;
strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
helper_info.helper_status = status;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
err = -ENOMEM;
if (!skb)
goto fail;
err = -EMSGSIZE;
dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
if (!dh)
goto fail;
dh->minor = device ? device->minor : -1;
dh->ret_code = NO_ERROR;
mutex_lock(¬ification_mutex);
if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
nla_put_notification_header(skb, type) ||
drbd_helper_info_to_skb(skb, &helper_info, true))
goto unlock_fail;
genlmsg_end(skb, dh);
err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
skb = NULL;
/* skb has been consumed or freed in netlink_broadcast() */
if (err && err != -ESRCH)
goto unlock_fail;
mutex_unlock(¬ification_mutex);
return;
unlock_fail:
mutex_unlock(¬ification_mutex);
fail:
nlmsg_free(skb);
drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
}
static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
{
struct drbd_genlmsghdr *dh;
int err;
err = -EMSGSIZE;
dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
if (!dh)
goto nla_put_failure;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
if (nla_put_notification_header(skb, NOTIFY_EXISTS))
goto nla_put_failure;
genlmsg_end(skb, dh);
return 0;
nla_put_failure:
nlmsg_free(skb);
pr_err("Error %d sending event. Event seq:%u\n", err, seq);
return err;
}
static void free_state_changes(struct list_head *list)
{
while (!list_empty(list)) {
struct drbd_state_change *state_change =
list_first_entry(list, struct drbd_state_change, list);
list_del(&state_change->list);
forget_state_change(state_change);
}
}
static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
{
return 1 +
state_change->n_connections +
state_change->n_devices +
state_change->n_devices * state_change->n_connections;
}
static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
{
struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
unsigned int seq = cb->args[2];
unsigned int n;
enum drbd_notification_type flags = 0;
int err = 0;
/* There is no need for taking notification_mutex here: it doesn't
matter if the initial state events mix with later state chage
events; we can always tell the events apart by the NOTIFY_EXISTS
flag. */
cb->args[5]--;
if (cb->args[5] == 1) {
err = notify_initial_state_done(skb, seq);
goto out;
}
n = cb->args[4]++;
if (cb->args[4] < cb->args[3])
flags |= NOTIFY_CONTINUES;
if (n < 1) {
err = notify_resource_state_change(skb, seq, state_change->resource,
NOTIFY_EXISTS | flags);
goto next;
}
n--;
if (n < state_change->n_connections) {
err = notify_connection_state_change(skb, seq, &state_change->connections[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_connections;
if (n < state_change->n_devices) {
err = notify_device_state_change(skb, seq, &state_change->devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_devices;
if (n < state_change->n_devices * state_change->n_connections) {
err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
next:
if (cb->args[4] == cb->args[3]) {
struct drbd_state_change *next_state_change =
list_entry(state_change->list.next,
struct drbd_state_change, list);
cb->args[0] = (long)next_state_change;
cb->args[3] = notifications_for_state_change(next_state_change);
cb->args[4] = 0;
}
out:
if (err)
return err;
else
return skb->len;
}
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
{
struct drbd_resource *resource;
LIST_HEAD(head);
if (cb->args[5] >= 1) {
if (cb->args[5] > 1)
return get_initial_state(skb, cb);
if (cb->args[0]) {
struct drbd_state_change *state_change =
(struct drbd_state_change *)cb->args[0];
/* connect list to head */
list_add(&head, &state_change->list);
free_state_changes(&head);
}
return 0;
}
cb->args[5] = 2; /* number of iterations */
mutex_lock(&resources_mutex);
for_each_resource(resource, &drbd_resources) {
struct drbd_state_change *state_change;
state_change = remember_old_state(resource, GFP_KERNEL);
if (!state_change) {
if (!list_empty(&head))
free_state_changes(&head);
mutex_unlock(&resources_mutex);
return -ENOMEM;
}
copy_old_to_new_state_change(state_change);
list_add_tail(&state_change->list, &head);
cb->args[5] += notifications_for_state_change(state_change);
}
mutex_unlock(&resources_mutex);
if (!list_empty(&head)) {
struct drbd_state_change *state_change =
list_entry(head.next, struct drbd_state_change, list);
cb->args[0] = (long)state_change;
cb->args[3] = notifications_for_state_change(state_change);
list_del(&head); /* detach list from head */
}
cb->args[2] = cb->nlh->nlmsg_seq;
return get_initial_state(skb, cb);
}
| linux-master | drivers/block/drbd/drbd_nl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_proc.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/drbd.h>
#include "drbd_int.h"
struct proc_dir_entry *drbd_proc;
static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
{
/* v is in kB/sec. We don't expect TiByte/sec yet. */
if (unlikely(v >= 1000000)) {
/* cool: > GiByte/s */
seq_printf(seq, "%ld,", v / 1000000);
v %= 1000000;
seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
} else if (likely(v >= 1000))
seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
else
seq_printf(seq, "%ld", v);
}
static void drbd_get_syncer_progress(struct drbd_device *device,
union drbd_dev_state state, unsigned long *rs_total,
unsigned long *bits_left, unsigned int *per_mil_done)
{
/* this is to break it at compile time when we change that, in case we
* want to support more than (1<<32) bits on a 32bit arch. */
typecheck(unsigned long, device->rs_total);
*rs_total = device->rs_total;
/* note: both rs_total and rs_left are in bits, i.e. in
* units of BM_BLOCK_SIZE.
* for the percentage, we don't care. */
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
*bits_left = device->ov_left;
else
*bits_left = drbd_bm_total_weight(device) - device->rs_failed;
/* >> 10 to prevent overflow,
* +1 to prevent division by zero */
if (*bits_left > *rs_total) {
/* D'oh. Maybe a logic bug somewhere. More likely just a race
* between state change and reset of rs_total.
*/
*bits_left = *rs_total;
*per_mil_done = *rs_total ? 0 : 1000;
} else {
/* Make sure the division happens in long context.
* We allow up to one petabyte storage right now,
* at a granularity of 4k per bit that is 2**38 bits.
* After shift right and multiplication by 1000,
* this should still fit easily into a 32bit long,
* so we don't need a 64bit division on 32bit arch.
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
unsigned int shift = *rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (*rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
*per_mil_done = tmp;
}
}
/*lge
* progress bars shamelessly adapted from driver/md/md.c
* output looks like
* [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/
static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq,
union drbd_dev_state state)
{
unsigned long db, dt, dbdt, rt, rs_total, rs_left;
unsigned int res;
int i, x, y;
int stalled = 0;
drbd_get_syncer_progress(device, state, &rs_total, &rs_left, &res);
x = res/50;
y = 20-x;
seq_puts(seq, "\t[");
for (i = 1; i < x; i++)
seq_putc(seq, '=');
seq_putc(seq, '>');
for (i = 0; i < y; i++)
seq_putc(seq, '.');
seq_puts(seq, "] ");
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
seq_puts(seq, "verified:");
else
seq_puts(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
if (rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(rs_total >> 10));
else
seq_printf(seq, "(%lu/%lu)K",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(rs_total));
seq_puts(seq, "\n\t");
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
* the * 100 / 100 trick are important. We do a +1 to be
* safe against division by zero. We only estimate anyway.
*
* dt: time from mark until now
* db: blocks written from mark until now
* rt: remaining time
*/
/* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is
* at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
* least DRBD_SYNC_MARK_STEP time before it will be modified. */
/* ------------------------ ~18s average ------------------------ */
i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (dt > 180)
stalled = 1;
if (!dt)
dt++;
db = device->rs_mark_left[i] - rs_left;
rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
seq_printf(seq, "finish: %lu:%02lu:%02lu",
rt / 3600, (rt % 3600) / 60, rt % 60);
dbdt = Bit2KB(db/dt);
seq_puts(seq, " speed: ");
seq_printf_with_thousands_grouping(seq, dbdt);
seq_puts(seq, " (");
/* ------------------------- ~3s average ------------------------ */
if (drbd_proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_puts(seq, " -- ");
}
/* --------------------- long term average ---------------------- */
/* mean speed since syncer started
* we do account for PausedSync periods */
dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt == 0)
dt = 1;
db = rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_putc(seq, ')');
if (state.conn == C_SYNC_TARGET ||
state.conn == C_VERIFY_S) {
seq_puts(seq, " want: ");
seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
if (drbd_proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(device);
unsigned long bit_pos;
unsigned long long stop_sector = 0;
if (state.conn == C_VERIFY_S ||
state.conn == C_VERIFY_T) {
bit_pos = bm_bits - device->ov_left;
if (verify_can_do_stop_sector(device))
stop_sector = device->ov_stop_sector;
} else
bit_pos = device->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
"\t%3d%% sector pos: %llu/%llu",
(int)(bit_pos / (bm_bits/100+1)),
(unsigned long long)bit_pos * BM_SECT_PER_BIT,
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
if (stop_sector != 0 && stop_sector != ULLONG_MAX)
seq_printf(seq, " stop sector: %llu", stop_sector);
seq_putc(seq, '\n');
}
}
int drbd_seq_show(struct seq_file *seq, void *v)
{
int i, prev_i = -1;
const char *sn;
struct drbd_device *device;
struct net_conf *nc;
union drbd_dev_state state;
char wp;
static char write_ordering_chars[] = {
[WO_NONE] = 'n',
[WO_DRAIN_IO] = 'd',
[WO_BDEV_FLUSH] = 'f',
};
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX, drbd_buildtag());
/*
cs .. connection state
ro .. node role (local/remote)
ds .. disk state (local/remote)
protocol
various flags
ns .. network send
nr .. network receive
dw .. disk write
dr .. disk read
al .. activity log write count
bm .. bitmap update write count
pe .. pending (waiting for ack or data reply)
ua .. unack'd (still need to send ack or data reply)
ap .. application requests accepted, but not yet completed
ep .. number of epochs currently "on the fly", P_BARRIER_ACK pending
wo .. write ordering mode currently in use
oos .. known out-of-sync kB
*/
rcu_read_lock();
idr_for_each_entry(&drbd_devices, device, i) {
if (prev_i != i - 1)
seq_putc(seq, '\n');
prev_i = i;
state = device->state;
sn = drbd_conn_str(state.conn);
if (state.conn == C_STANDALONE &&
state.disk == D_DISKLESS &&
state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn,
drbd_role_str(state.role),
drbd_role_str(state.peer),
drbd_disk_str(state.disk),
drbd_disk_str(state.pdsk),
wp,
drbd_suspended(device) ? 's' : 'r',
state.aftr_isp ? 'a' : '-',
state.peer_isp ? 'p' : '-',
state.user_isp ? 'u' : '-',
device->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-',
device->send_cnt/2,
device->recv_cnt/2,
device->writ_cnt/2,
device->read_cnt/2,
device->al_writ_cnt,
device->bm_writ_cnt,
atomic_read(&device->local_cnt),
atomic_read(&device->ap_pending_cnt) +
atomic_read(&device->rs_pending_cnt),
atomic_read(&device->unacked_cnt),
atomic_read(&device->ap_bio_cnt),
first_peer_device(device)->connection->epochs,
write_ordering_chars[device->resource->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
drbd_bm_total_weight(device)));
}
if (state.conn == C_SYNC_SOURCE ||
state.conn == C_SYNC_TARGET ||
state.conn == C_VERIFY_S ||
state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq, state);
if (drbd_proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync);
lc_seq_printf_stats(seq, device->act_log);
put_ldev(device);
}
if (drbd_proc_details >= 2)
seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt));
}
rcu_read_unlock();
return 0;
}
| linux-master | drivers/block/drbd/drbd_proc.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <asm/bug.h>
#include <linux/rbtree_augmented.h>
#include "drbd_interval.h"
/*
* interval_end - return end of @node
*/
static inline
sector_t interval_end(struct rb_node *node)
{
struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
return this->end;
}
#define NODE_END(node) ((node)->sector + ((node)->size >> 9))
RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
struct drbd_interval, rb, sector_t, end, NODE_END);
/*
* drbd_insert_interval - insert a new interval into a tree
*/
bool
drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
{
struct rb_node **new = &root->rb_node, *parent = NULL;
sector_t this_end = this->sector + (this->size >> 9);
BUG_ON(!IS_ALIGNED(this->size, 512));
while (*new) {
struct drbd_interval *here =
rb_entry(*new, struct drbd_interval, rb);
parent = *new;
if (here->end < this_end)
here->end = this_end;
if (this->sector < here->sector)
new = &(*new)->rb_left;
else if (this->sector > here->sector)
new = &(*new)->rb_right;
else if (this < here)
new = &(*new)->rb_left;
else if (this > here)
new = &(*new)->rb_right;
else
return false;
}
this->end = this_end;
rb_link_node(&this->rb, parent, new);
rb_insert_augmented(&this->rb, root, &augment_callbacks);
return true;
}
/**
* drbd_contains_interval - check if a tree contains a given interval
* @root: red black tree root
* @sector: start sector of @interval
* @interval: may be an invalid pointer
*
* Returns if the tree contains the node @interval with start sector @start.
* Does not dereference @interval until @interval is known to be a valid object
* in @tree. Returns %false if @interval is in the tree but with a different
* sector number.
*/
bool
drbd_contains_interval(struct rb_root *root, sector_t sector,
struct drbd_interval *interval)
{
struct rb_node *node = root->rb_node;
while (node) {
struct drbd_interval *here =
rb_entry(node, struct drbd_interval, rb);
if (sector < here->sector)
node = node->rb_left;
else if (sector > here->sector)
node = node->rb_right;
else if (interval < here)
node = node->rb_left;
else if (interval > here)
node = node->rb_right;
else
return true;
}
return false;
}
/*
* drbd_remove_interval - remove an interval from a tree
*/
void
drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
{
/* avoid endless loop */
if (drbd_interval_empty(this))
return;
rb_erase_augmented(&this->rb, root, &augment_callbacks);
}
/**
* drbd_find_overlap - search for an interval overlapping with [sector, sector + size)
* @root: red black tree root
* @sector: start sector
* @size: size, aligned to 512 bytes
*
* Returns an interval overlapping with [sector, sector + size), or NULL if
* there is none. When there is more than one overlapping interval in the
* tree, the interval with the lowest start sector is returned, and all other
* overlapping intervals will be on the right side of the tree, reachable with
* rb_next().
*/
struct drbd_interval *
drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size)
{
struct rb_node *node = root->rb_node;
struct drbd_interval *overlap = NULL;
sector_t end = sector + (size >> 9);
BUG_ON(!IS_ALIGNED(size, 512));
while (node) {
struct drbd_interval *here =
rb_entry(node, struct drbd_interval, rb);
if (node->rb_left &&
sector < interval_end(node->rb_left)) {
/* Overlap if any must be on left side */
node = node->rb_left;
} else if (here->sector < end &&
sector < here->sector + (here->size >> 9)) {
overlap = here;
break;
} else if (sector >= here->sector) {
/* Overlap if any must be on right side */
node = node->rb_right;
} else
break;
}
return overlap;
}
struct drbd_interval *
drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size)
{
sector_t end = sector + (size >> 9);
struct rb_node *node;
for (;;) {
node = rb_next(&i->rb);
if (!node)
return NULL;
i = rb_entry(node, struct drbd_interval, rb);
if (i->sector >= end)
return NULL;
if (sector < i->sector + (i->size >> 9))
return i;
}
}
| linux-master | drivers/block/drbd/drbd_interval.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_bitmap.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2004-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2004-2008, Lars Ellenberg <[email protected]>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/drbd.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include "drbd_int.h"
/* OPAQUE outside this file!
* interface defined in drbd_int.h
* convention:
* function name drbd_bm_... => used elsewhere, "public".
* function name bm_... => internal to implementation, "private".
*/
/*
* LIMITATIONS:
* We want to support >= peta byte of backend storage, while for now still using
* a granularity of one bit per 4KiB of storage.
* 1 << 50 bytes backend storage (1 PiB)
* 1 << (50 - 12) bits needed
* 38 --> we need u64 to index and count bits
* 1 << (38 - 3) bitmap bytes needed
* 35 --> we still need u64 to index and count bytes
* (that's 32 GiB of bitmap for 1 PiB storage)
* 1 << (35 - 2) 32bit longs needed
* 33 --> we'd even need u64 to index and count 32bit long words.
* 1 << (35 - 3) 64bit longs needed
* 32 --> we could get away with a 32bit unsigned int to index and count
* 64bit long words, but I rather stay with unsigned long for now.
* We probably should neither count nor point to bytes or long words
* directly, but either by bitnumber, or by page index and offset.
* 1 << (35 - 12)
* 22 --> we need that much 4KiB pages of bitmap.
* 1 << (22 + 3) --> on a 64bit arch,
* we need 32 MiB to store the array of page pointers.
*
* Because I'm lazy, and because the resulting patch was too large, too ugly
* and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
* (1 << 32) bits * 4k storage.
*
* bitmap storage and IO:
* Bitmap is stored little endian on disk, and is kept little endian in
* core memory. Currently we still hold the full bitmap in core as long
* as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
* seems excessive.
*
* We plan to reduce the amount of in-core bitmap pages by paging them in
* and out against their on-disk location as necessary, but need to make
* sure we don't cause too much meta data IO, and must not deadlock in
* tight memory situations. This needs some more work.
*/
/*
* NOTE
* Access to the *bm_pages is protected by bm_lock.
* It is safe to read the other members within the lock.
*
* drbd_bm_set_bits is called from bio_endio callbacks,
* We may be called with irq already disabled,
* so we need spin_lock_irqsave().
* And we need the kmap_atomic.
*/
struct drbd_bitmap {
struct page **bm_pages;
spinlock_t bm_lock;
/* exclusively to be used by __al_write_transaction(),
* drbd_bm_mark_for_writeout() and
* and drbd_bm_write_hinted() -> bm_rw() called from there.
*/
unsigned int n_bitmap_hints;
unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION];
/* see LIMITATIONS: above */
unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
unsigned long bm_bits;
size_t bm_words;
size_t bm_number_of_pages;
sector_t bm_dev_capacity;
struct mutex bm_change; /* serializes resize operations */
wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
enum bm_flag bm_flags;
/* debugging aid, in case we are still racy somewhere */
char *bm_why;
struct task_struct *bm_task;
};
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_device *device, const char *func)
{
struct drbd_bitmap *b = device->bitmap;
if (!drbd_ratelimit())
return;
drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
current->comm, task_pid_nr(current),
func, b->bm_why ?: "?",
b->bm_task->comm, task_pid_nr(b->bm_task));
}
void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
{
struct drbd_bitmap *b = device->bitmap;
int trylock_failed;
if (!b) {
drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
return;
}
trylock_failed = !mutex_trylock(&b->bm_change);
if (trylock_failed) {
drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
current->comm, task_pid_nr(current),
why, b->bm_why ?: "?",
b->bm_task->comm, task_pid_nr(b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
b->bm_flags |= flags & BM_LOCKED_MASK;
b->bm_why = why;
b->bm_task = current;
}
void drbd_bm_unlock(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
if (!b) {
drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
return;
}
if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
b->bm_flags &= ~BM_LOCKED_MASK;
b->bm_why = NULL;
b->bm_task = NULL;
mutex_unlock(&b->bm_change);
}
/* we store some "meta" info about our pages in page->private */
/* at a granularity of 4k storage per bitmap bit:
* one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
* 1<<38 bits,
* 1<<23 4k bitmap pages.
* Use 24 bits as page index, covers 2 peta byte storage
* at a granularity of 4k per bit.
* Used to report the failed page idx on io error from the endio handlers.
*/
#define BM_PAGE_IDX_MASK ((1UL<<24)-1)
/* this page is currently read in, or written back */
#define BM_PAGE_IO_LOCK 31
/* if there has been an IO error for this page */
#define BM_PAGE_IO_ERROR 30
/* this is to be able to intelligently skip disk IO,
* set if bits have been set since last IO. */
#define BM_PAGE_NEED_WRITEOUT 29
/* to mark for lazy writeout once syncer cleared all clearable bits,
* we if bits have been cleared since last IO. */
#define BM_PAGE_LAZY_WRITEOUT 28
/* pages marked with this "HINT" will be considered for writeout
* on activity log transactions */
#define BM_PAGE_HINT_WRITEOUT 27
/* store_page_idx uses non-atomic assignment. It is only used directly after
* allocating the page. All other bm_set_page_* and bm_clear_page_* need to
* use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
* changes) may happen from various contexts, and wait_on_bit/wake_up_bit
* requires it all to be atomic as well. */
static void bm_store_page_idx(struct page *page, unsigned long idx)
{
BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
set_page_private(page, idx);
}
static unsigned long bm_page_to_idx(struct page *page)
{
return page_private(page) & BM_PAGE_IDX_MASK;
}
/* As is very unlikely that the same page is under IO from more than one
* context, we can get away with a bit per page and one wait queue per bitmap.
*/
static void bm_page_lock_io(struct drbd_device *device, int page_nr)
{
struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
}
static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
{
struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
wake_up(&device->bitmap->bm_io_wait);
}
/* set _before_ submit_io, so it may be reset due to being changed
* while this page is in flight... will get submitted later again */
static void bm_set_page_unchanged(struct page *page)
{
/* use cmpxchg? */
clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
}
static void bm_set_page_need_writeout(struct page *page)
{
set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
}
void drbd_bm_reset_al_hints(struct drbd_device *device)
{
device->bitmap->n_bitmap_hints = 0;
}
/**
* drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
* @device: DRBD device.
* @page_nr: the bitmap page to mark with the "hint" flag
*
* From within an activity log transaction, we mark a few pages with these
* hints, then call drbd_bm_write_hinted(), which will only write out changed
* pages which are flagged with this mark.
*/
void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
struct drbd_bitmap *b = device->bitmap;
struct page *page;
if (page_nr >= device->bitmap->bm_number_of_pages) {
drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
page_nr, (int)device->bitmap->bm_number_of_pages);
return;
}
page = device->bitmap->bm_pages[page_nr];
BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints));
if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr;
}
static int bm_test_page_unchanged(struct page *page)
{
volatile const unsigned long *addr = &page_private(page);
return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
}
static void bm_set_page_io_err(struct page *page)
{
set_bit(BM_PAGE_IO_ERROR, &page_private(page));
}
static void bm_clear_page_io_err(struct page *page)
{
clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
}
static void bm_set_page_lazy_writeout(struct page *page)
{
set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
}
static int bm_test_page_lazy_writeout(struct page *page)
{
return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
}
/* on a 32bit box, this would allow for exactly (2<<38) bits. */
static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
{
/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
BUG_ON(page_nr >= b->bm_number_of_pages);
return page_nr;
}
static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
{
/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
BUG_ON(page_nr >= b->bm_number_of_pages);
return page_nr;
}
static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{
struct page *page = b->bm_pages[idx];
return (unsigned long *) kmap_atomic(page);
}
static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
{
return __bm_map_pidx(b, idx);
}
static void __bm_unmap(unsigned long *p_addr)
{
kunmap_atomic(p_addr);
};
static void bm_unmap(unsigned long *p_addr)
{
return __bm_unmap(p_addr);
}
/* long word offset of _bitmap_ sector */
#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
/* word offset from start of bitmap to word number _in_page_
* modulo longs per page
#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
so do it explicitly:
*/
#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
/* Long words per page */
#define LWPP (PAGE_SIZE/sizeof(long))
/*
* actually most functions herein should take a struct drbd_bitmap*, not a
* struct drbd_device*, but for the debug macros I like to have the device around
* to be able to report device specific.
*/
static void bm_free_pages(struct page **pages, unsigned long number)
{
unsigned long i;
if (!pages)
return;
for (i = 0; i < number; i++) {
if (!pages[i]) {
pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n",
i, number);
continue;
}
__free_page(pages[i]);
pages[i] = NULL;
}
}
static inline void bm_vk_free(void *ptr)
{
kvfree(ptr);
}
/*
* "have" and "want" are NUMBER OF PAGES.
*/
static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
{
struct page **old_pages = b->bm_pages;
struct page **new_pages, *page;
unsigned int i, bytes;
unsigned long have = b->bm_number_of_pages;
BUG_ON(have == 0 && old_pages != NULL);
BUG_ON(have != 0 && old_pages == NULL);
if (have == want)
return old_pages;
/* Trying kmalloc first, falling back to vmalloc.
* GFP_NOIO, as this is called while drbd IO is "suspended",
* and during resize or attach on diskless Primary,
* we must not block on IO to ourselves.
* Context is receiver thread or dmsetup. */
bytes = sizeof(struct page *)*want;
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
if (!new_pages) {
new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO);
if (!new_pages)
return NULL;
}
if (want >= have) {
for (i = 0; i < have; i++)
new_pages[i] = old_pages[i];
for (; i < want; i++) {
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (!page) {
bm_free_pages(new_pages + have, i - have);
bm_vk_free(new_pages);
return NULL;
}
/* we want to know which page it is
* from the endio handlers */
bm_store_page_idx(page, i);
new_pages[i] = page;
}
} else {
for (i = 0; i < want; i++)
new_pages[i] = old_pages[i];
/* NOT HERE, we are outside the spinlock!
bm_free_pages(old_pages + want, have - want);
*/
}
return new_pages;
}
/*
* allocates the drbd_bitmap and stores it in device->bitmap.
*/
int drbd_bm_init(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
WARN_ON(b != NULL);
b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
if (!b)
return -ENOMEM;
spin_lock_init(&b->bm_lock);
mutex_init(&b->bm_change);
init_waitqueue_head(&b->bm_io_wait);
device->bitmap = b;
return 0;
}
sector_t drbd_bm_capacity(struct drbd_device *device)
{
if (!expect(device, device->bitmap))
return 0;
return device->bitmap->bm_dev_capacity;
}
/* called on driver unload. TODO: call when a device is destroyed.
*/
void drbd_bm_cleanup(struct drbd_device *device)
{
if (!expect(device, device->bitmap))
return;
bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
bm_vk_free(device->bitmap->bm_pages);
kfree(device->bitmap);
device->bitmap = NULL;
}
/*
* since (b->bm_bits % BITS_PER_LONG) != 0,
* this masks out the remaining bits.
* Returns the number of bits cleared.
*/
#ifndef BITS_PER_PAGE
#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
#else
# if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
# error "ambiguous BITS_PER_PAGE"
# endif
#endif
#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
static int bm_clear_surplus(struct drbd_bitmap *b)
{
unsigned long mask;
unsigned long *p_addr, *bm;
int tmp;
int cleared = 0;
/* number of bits modulo bits per page */
tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
/* mask the used bits of the word containing the last bit */
mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
/* bitmap is always stored little endian,
* on disk and in core memory alike */
mask = cpu_to_lel(mask);
p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
bm = p_addr + (tmp/BITS_PER_LONG);
if (mask) {
/* If mask != 0, we are not exactly aligned, so bm now points
* to the long containing the last bit.
* If mask == 0, bm already points to the word immediately
* after the last (long word aligned) bit. */
cleared = hweight_long(*bm & ~mask);
*bm &= mask;
bm++;
}
if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
/* on a 32bit arch, we may need to zero out
* a padding long to align with a 64bit remote */
cleared += hweight_long(*bm);
*bm = 0;
}
bm_unmap(p_addr);
return cleared;
}
static void bm_set_surplus(struct drbd_bitmap *b)
{
unsigned long mask;
unsigned long *p_addr, *bm;
int tmp;
/* number of bits modulo bits per page */
tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
/* mask the used bits of the word containing the last bit */
mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
/* bitmap is always stored little endian,
* on disk and in core memory alike */
mask = cpu_to_lel(mask);
p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
bm = p_addr + (tmp/BITS_PER_LONG);
if (mask) {
/* If mask != 0, we are not exactly aligned, so bm now points
* to the long containing the last bit.
* If mask == 0, bm already points to the word immediately
* after the last (long word aligned) bit. */
*bm |= ~mask;
bm++;
}
if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
/* on a 32bit arch, we may need to zero out
* a padding long to align with a 64bit remote */
*bm = ~0UL;
}
bm_unmap(p_addr);
}
/* you better not modify the bitmap while this is running,
* or its results will be stale */
static unsigned long bm_count_bits(struct drbd_bitmap *b)
{
unsigned long *p_addr;
unsigned long bits = 0;
unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
int idx, last_word;
/* all but last page */
for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
p_addr = __bm_map_pidx(b, idx);
bits += bitmap_weight(p_addr, BITS_PER_PAGE);
__bm_unmap(p_addr);
cond_resched();
}
/* last (or only) page */
last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
p_addr = __bm_map_pidx(b, idx);
bits += bitmap_weight(p_addr, last_word * BITS_PER_LONG);
p_addr[last_word] &= cpu_to_lel(mask);
bits += hweight_long(p_addr[last_word]);
/* 32bit arch, may have an unused padding long */
if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
p_addr[last_word+1] = 0;
__bm_unmap(p_addr);
return bits;
}
/* offset and len in long words.*/
static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
{
unsigned long *p_addr, *bm;
unsigned int idx;
size_t do_now, end;
end = offset + len;
if (end > b->bm_words) {
pr_alert("bm_memset end > bm_words\n");
return;
}
while (offset < end) {
do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
idx = bm_word_to_page_idx(b, offset);
p_addr = bm_map_pidx(b, idx);
bm = p_addr + MLPP(offset);
if (bm+do_now > p_addr + LWPP) {
pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
p_addr, bm, (int)do_now);
} else
memset(bm, c, do_now * sizeof(long));
bm_unmap(p_addr);
bm_set_page_need_writeout(b->bm_pages[idx]);
offset += do_now;
}
}
/* For the layout, see comment above drbd_md_set_sector_offsets(). */
static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
{
u64 bitmap_sectors;
if (ldev->md.al_offset == 8)
bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
else
bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
return bitmap_sectors << (9 + 3);
}
/*
* make sure the bitmap has enough room for the attached storage,
* if necessary, resize.
* called whenever we may have changed the device size.
* returns -ENOMEM if we could not allocate enough memory, 0 on success.
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
int err = 0;
bool growing;
if (!expect(device, b))
return -ENOMEM;
drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
(unsigned long long)capacity);
if (capacity == b->bm_dev_capacity)
goto out;
if (capacity == 0) {
spin_lock_irq(&b->bm_lock);
opages = b->bm_pages;
onpages = b->bm_number_of_pages;
owords = b->bm_words;
b->bm_pages = NULL;
b->bm_number_of_pages =
b->bm_set =
b->bm_bits =
b->bm_words =
b->bm_dev_capacity = 0;
spin_unlock_irq(&b->bm_lock);
bm_free_pages(opages, onpages);
bm_vk_free(opages);
goto out;
}
bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
/* if we would use
words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
a 32bit host could present the wrong number of words
to a 64bit host.
*/
words = ALIGN(bits, 64) >> LN2_BPL;
if (get_ldev(device)) {
u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
put_ldev(device);
if (bits > bits_on_disk) {
drbd_info(device, "bits = %lu\n", bits);
drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
err = -ENOSPC;
goto out;
}
}
want = PFN_UP(words*sizeof(long));
have = b->bm_number_of_pages;
if (want == have) {
D_ASSERT(device, b->bm_pages != NULL);
npages = b->bm_pages;
} else {
if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
npages = NULL;
else
npages = bm_realloc_pages(b, want);
}
if (!npages) {
err = -ENOMEM;
goto out;
}
spin_lock_irq(&b->bm_lock);
opages = b->bm_pages;
owords = b->bm_words;
obits = b->bm_bits;
growing = bits > obits;
if (opages && growing && set_new_bits)
bm_set_surplus(b);
b->bm_pages = npages;
b->bm_number_of_pages = want;
b->bm_bits = bits;
b->bm_words = words;
b->bm_dev_capacity = capacity;
if (growing) {
if (set_new_bits) {
bm_memset(b, owords, 0xff, words-owords);
b->bm_set += bits - obits;
} else
bm_memset(b, owords, 0x00, words-owords);
}
if (want < have) {
/* implicit: (opages != NULL) && (opages != npages) */
bm_free_pages(opages + want, have - want);
}
(void)bm_clear_surplus(b);
spin_unlock_irq(&b->bm_lock);
if (opages != npages)
bm_vk_free(opages);
if (!growing)
b->bm_set = bm_count_bits(b);
drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
out:
drbd_bm_unlock(device);
return err;
}
/* inherently racy:
* if not protected by other means, return value may be out of date when
* leaving this function...
* we still need to lock it, since it is important that this returns
* bm_set == 0 precisely.
*
* maybe bm_set should be atomic_t ?
*/
unsigned long _drbd_bm_total_weight(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long s;
unsigned long flags;
if (!expect(device, b))
return 0;
if (!expect(device, b->bm_pages))
return 0;
spin_lock_irqsave(&b->bm_lock, flags);
s = b->bm_set;
spin_unlock_irqrestore(&b->bm_lock, flags);
return s;
}
unsigned long drbd_bm_total_weight(struct drbd_device *device)
{
unsigned long s;
/* if I don't have a disk, I don't know about out-of-sync status */
if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
s = _drbd_bm_total_weight(device);
put_ldev(device);
return s;
}
size_t drbd_bm_words(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
if (!expect(device, b))
return 0;
if (!expect(device, b->bm_pages))
return 0;
return b->bm_words;
}
unsigned long drbd_bm_bits(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
if (!expect(device, b))
return 0;
return b->bm_bits;
}
/* merge number words from buffer into the bitmap starting at offset.
* buffer[i] is expected to be little endian unsigned long.
* bitmap must be locked by drbd_bm_lock.
* currently only used from receive_bitmap.
*/
void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
unsigned long word, bits;
unsigned int idx;
size_t end, do_now;
end = offset + number;
if (!expect(device, b))
return;
if (!expect(device, b->bm_pages))
return;
if (number == 0)
return;
WARN_ON(offset >= b->bm_words);
WARN_ON(end > b->bm_words);
spin_lock_irq(&b->bm_lock);
while (offset < end) {
do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
idx = bm_word_to_page_idx(b, offset);
p_addr = bm_map_pidx(b, idx);
bm = p_addr + MLPP(offset);
offset += do_now;
while (do_now--) {
bits = hweight_long(*bm);
word = *bm | *buffer++;
*bm++ = word;
b->bm_set += hweight_long(word) - bits;
}
bm_unmap(p_addr);
bm_set_page_need_writeout(b->bm_pages[idx]);
}
/* with 32bit <-> 64bit cross-platform connect
* this is only correct for current usage,
* where we _know_ that we are 64 bit aligned,
* and know that this function is used in this way, too...
*/
if (end == b->bm_words)
b->bm_set -= bm_clear_surplus(b);
spin_unlock_irq(&b->bm_lock);
}
/* copy number words from the bitmap starting at offset into the buffer.
* buffer[i] will be little endian unsigned long.
*/
void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
size_t end, do_now;
end = offset + number;
if (!expect(device, b))
return;
if (!expect(device, b->bm_pages))
return;
spin_lock_irq(&b->bm_lock);
if ((offset >= b->bm_words) ||
(end > b->bm_words) ||
(number <= 0))
drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
(unsigned long) offset,
(unsigned long) number,
(unsigned long) b->bm_words);
else {
while (offset < end) {
do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
bm = p_addr + MLPP(offset);
offset += do_now;
while (do_now--)
*buffer++ = *bm++;
bm_unmap(p_addr);
}
}
spin_unlock_irq(&b->bm_lock);
}
/* set all bits in the bitmap */
void drbd_bm_set_all(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
if (!expect(device, b))
return;
if (!expect(device, b->bm_pages))
return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0xff, b->bm_words);
(void)bm_clear_surplus(b);
b->bm_set = b->bm_bits;
spin_unlock_irq(&b->bm_lock);
}
/* clear all bits in the bitmap */
void drbd_bm_clear_all(struct drbd_device *device)
{
struct drbd_bitmap *b = device->bitmap;
if (!expect(device, b))
return;
if (!expect(device, b->bm_pages))
return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0, b->bm_words);
b->bm_set = 0;
spin_unlock_irq(&b->bm_lock);
}
static void drbd_bm_aio_ctx_destroy(struct kref *kref)
{
struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref);
unsigned long flags;
spin_lock_irqsave(&ctx->device->resource->req_lock, flags);
list_del(&ctx->list);
spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags);
put_ldev(ctx->device);
kfree(ctx);
}
/* bv_page may be a copy, or may be the original */
static void drbd_bm_endio(struct bio *bio)
{
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
if (bio->bi_status) {
/* ctx error will hold the completed-last non-zero error code,
* in case error codes differ. */
ctx->error = blk_status_to_errno(bio->bi_status);
bm_set_page_io_err(b->bm_pages[idx]);
/* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */
if (drbd_ratelimit())
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
bio->bi_status, idx);
} else {
bm_clear_page_io_err(b->bm_pages[idx]);
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
}
bm_page_unlock_io(device, idx);
if (ctx->flags & BM_AIO_COPY_PAGES)
mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
bio_put(bio);
if (atomic_dec_and_test(&ctx->in_flight)) {
ctx->done = 1;
wake_up(&device->misc_wait);
kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
}
}
/* For the layout, see comment above drbd_md_set_sector_offsets(). */
static inline sector_t drbd_md_last_bitmap_sector(struct drbd_backing_dev *bdev)
{
switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + bdev->md.al_offset -1;
case DRBD_MD_INDEX_FLEX_EXT:
default:
return bdev->md.md_offset + bdev->md.md_size_sect -1;
}
}
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
struct drbd_bitmap *b = device->bitmap;
struct bio *bio;
struct page *page;
sector_t last_bm_sect;
sector_t first_bm_sect;
sector_t on_disk_sector;
unsigned int len;
first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset;
on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT));
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
last_bm_sect = drbd_md_last_bitmap_sector(device->ldev);
if (first_bm_sect <= on_disk_sector && last_bm_sect >= on_disk_sector) {
sector_t len_sect = last_bm_sect - on_disk_sector + 1;
if (len_sect < PAGE_SIZE/SECTOR_SIZE)
len = (unsigned int)len_sect*SECTOR_SIZE;
else
len = PAGE_SIZE;
} else {
if (drbd_ratelimit()) {
drbd_err(device, "Invalid offset during on-disk bitmap access: "
"page idx %u, sector %llu\n", page_nr, on_disk_sector);
}
ctx->error = -EIO;
bm_set_page_io_err(b->bm_pages[page_nr]);
if (atomic_dec_and_test(&ctx->in_flight)) {
ctx->done = 1;
wake_up(&device->misc_wait);
kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
}
return;
}
/* serialize IO on this page */
bm_page_lock_io(device, page_nr);
/* before memcpy and submit,
* so it can be redirtied any time */
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
page = mempool_alloc(&drbd_md_io_page_pool,
GFP_NOIO | __GFP_HIGHMEM);
copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
&drbd_md_io_bio_set);
bio->bi_iter.bi_sector = on_disk_sector;
__bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio_io_error(bio);
} else {
submit_bio(bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
atomic_add(len >> 9, &device->rs_sect_ev);
}
}
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
struct drbd_bm_aio_ctx *ctx;
struct drbd_bitmap *b = device->bitmap;
unsigned int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
int err = 0;
/*
* We are protected against bitmap disappearing/resizing by holding an
* ldev reference (caller must have called get_ldev()).
* For read/write, we are protected against changes to the bitmap by
* the bitmap lock (see drbd_bitmap_io).
* For lazy writeout, we don't care for ongoing changes to the bitmap,
* as we submit copies of pages anyways.
*/
ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO);
if (!ctx)
return -ENOMEM;
*ctx = (struct drbd_bm_aio_ctx) {
.device = device,
.start_jif = jiffies,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = flags,
.error = 0,
.kref = KREF_INIT(2),
};
if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */
drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
kfree(ctx);
return -ENODEV;
}
/* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from
drbd_adm_attach(), after device->ldev was assigned. */
if (0 == (ctx->flags & ~BM_AIO_READ))
WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&ctx->list, &device->pending_bitmap_io);
spin_unlock_irq(&device->resource->req_lock);
num_pages = b->bm_number_of_pages;
now = jiffies;
/* let the layers below us try to merge these bios... */
if (flags & BM_AIO_READ) {
for (i = 0; i < num_pages; i++) {
atomic_inc(&ctx->in_flight);
bm_page_io_async(ctx, i);
++count;
cond_resched();
}
} else if (flags & BM_AIO_WRITE_HINTED) {
/* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */
unsigned int hint;
for (hint = 0; hint < b->n_bitmap_hints; hint++) {
i = b->al_bitmap_hints[hint];
if (i >= num_pages) /* == -1U: no hint here. */
continue;
/* Several AL-extents may point to the same page. */
if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
&page_private(b->bm_pages[i])))
continue;
/* Has it even changed? */
if (bm_test_page_unchanged(b->bm_pages[i]))
continue;
atomic_inc(&ctx->in_flight);
bm_page_io_async(ctx, i);
++count;
}
} else {
for (i = 0; i < num_pages; i++) {
/* ignore completely unchanged pages */
if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
break;
if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
continue;
}
/* during lazy writeout,
* ignore those pages not marked for lazy writeout. */
if (lazy_writeout_upper_idx &&
!bm_test_page_lazy_writeout(b->bm_pages[i])) {
dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
continue;
}
atomic_inc(&ctx->in_flight);
bm_page_io_async(ctx, i);
++count;
cond_resched();
}
}
/*
* We initialize ctx->in_flight to one to make sure drbd_bm_endio
* will not set ctx->done early, and decrement / test it here. If there
* are still some bios in flight, we need to wait for them here.
* If all IO is done already (or nothing had been submitted), there is
* no need to wait. Still, we need to put the kref associated with the
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
else
kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
/* summary for global bitmap IO */
if (flags == 0) {
unsigned int ms = jiffies_to_msecs(jiffies - now);
if (ms > 5) {
drbd_info(device, "bitmap %s of %u pages took %u ms\n",
(flags & BM_AIO_READ) ? "READ" : "WRITE",
count, ms);
}
}
if (ctx->error) {
drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */
}
if (atomic_read(&ctx->in_flight))
err = -EIO; /* Disk timeout/force-detach during IO... */
now = jiffies;
if (flags & BM_AIO_READ) {
b->bm_set = bm_count_bits(b);
drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
jiffies - now);
}
now = b->bm_set;
if ((flags & ~BM_AIO_READ) == 0)
drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
return err;
}
/**
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @device: DRBD device.
*/
int drbd_bm_read(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_READ, 0);
}
/**
* drbd_bm_write() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
* Will only write pages that have changed since last IO.
*/
int drbd_bm_write(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, 0, 0);
}
/**
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
* Will write all pages.
*/
int drbd_bm_write_all(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
}
/**
* drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
* @device: DRBD device.
* @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
*/
int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local)
{
return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
}
/**
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
* Will only write pages that have changed since last IO.
* In contrast to drbd_bm_write(), this will copy the bitmap pages
* to temporary writeout pages. It is intended to trigger a full write-out
* while still allowing the bitmap to change, for example if a resync or online
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
int drbd_bm_write_copy_pages(struct drbd_device *device,
struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
* @device: DRBD device.
*/
int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
{
return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
}
/* NOTE
* find_first_bit returns int, we return unsigned long.
* For this to work on 32bit arch with bitnumbers > (1<<32),
* we'd need to return u64, and get a whole lot of other places
* fixed where we still use unsigned long.
*
* this returns a bit number, NOT a sector!
*/
static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
const int find_zero_bit)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
unsigned long bit_offset;
unsigned i;
if (bm_fo > b->bm_bits) {
drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
bm_fo = DRBD_END_OF_BITMAP;
} else {
while (bm_fo < b->bm_bits) {
/* bit offset of the first bit in the page */
bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
if (find_zero_bit)
i = find_next_zero_bit_le(p_addr,
PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
else
i = find_next_bit_le(p_addr,
PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
__bm_unmap(p_addr);
if (i < PAGE_SIZE*8) {
bm_fo = bit_offset + i;
if (bm_fo >= b->bm_bits)
break;
goto found;
}
bm_fo = bit_offset + PAGE_SIZE*8;
}
bm_fo = DRBD_END_OF_BITMAP;
}
found:
return bm_fo;
}
static unsigned long bm_find_next(struct drbd_device *device,
unsigned long bm_fo, const int find_zero_bit)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
if (!expect(device, b))
return i;
if (!expect(device, b->bm_pages))
return i;
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(device);
i = __bm_find_next(device, bm_fo, find_zero_bit);
spin_unlock_irq(&b->bm_lock);
return i;
}
unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
return bm_find_next(device, bm_fo, 0);
}
#if 0
/* not yet needed for anything. */
unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
return bm_find_next(device, bm_fo, 1);
}
#endif
/* does not spin_lock_irqsave.
* you must take drbd_bm_lock() first */
unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
return __bm_find_next(device, bm_fo, 0);
}
unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
return __bm_find_next(device, bm_fo, 1);
}
/* returns number of bits actually changed.
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
unsigned long e, int val)
{
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int last_page_nr = -1U;
int c = 0;
int changed_total = 0;
if (e >= b->bm_bits) {
drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
s, e, b->bm_bits);
e = b->bm_bits ? b->bm_bits -1 : 0;
}
for (bitnr = s; bitnr <= e; bitnr++) {
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
__bm_unmap(p_addr);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
c = 0;
p_addr = __bm_map_pidx(b, page_nr);
last_page_nr = page_nr;
}
if (val)
c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
else
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
__bm_unmap(p_addr);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
b->bm_set += changed_total;
return changed_total;
}
/* returns number of bits actually changed.
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
struct drbd_bitmap *b = device->bitmap;
int c = 0;
if (!expect(device, b))
return 1;
if (!expect(device, b->bm_pages))
return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(device);
c = __bm_change_bits_to(device, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
}
/* returns number of bits changed 0 -> 1 */
int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
return bm_change_bits_to(device, s, e, 1);
}
/* returns number of bits changed 1 -> 0 */
int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
return -bm_change_bits_to(device, s, e, 0);
}
/* sets all bits in full words,
* from first_word up to, but not including, last_word */
static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
int page_nr, int first_word, int last_word)
{
int i;
int bits;
int changed = 0;
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
/* I think it is more cache line friendly to hweight_long then set to ~0UL,
* than to first bitmap_weight() all words, then bitmap_fill() all words */
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
changed += BITS_PER_LONG - bits;
}
kunmap_atomic(paddr);
if (changed) {
/* We only need lazy writeout, the information is still in the
* remote bitmap as well, and is reconstructed during the next
* bitmap exchange, if lost locally due to a crash. */
bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
b->bm_set += changed;
}
}
/* Same thing as drbd_bm_set_bits,
* but more efficient for a large bit range.
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
/* First set_bit from the first bit (s)
* up to the next long boundary (sl),
* then assign full words up to the last long boundary (el),
* then set_bit up to and including the last bit (e).
*
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
struct drbd_bitmap *b = device->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
int last_page;
int page_nr;
int first_word;
int last_word;
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
spin_lock_irq(&b->bm_lock);
__bm_change_bits_to(device, s, e, 1);
spin_unlock_irq(&b->bm_lock);
return;
}
/* difference is large enough that we can trust sl and el */
spin_lock_irq(&b->bm_lock);
/* bits filling the current long */
if (sl)
__bm_change_bits_to(device, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
/* MLPP: modulo longs per page */
/* LWPP: long words per page */
first_word = MLPP(sl >> LN2_BPL);
last_word = LWPP;
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
spin_lock_irq(&b->bm_lock);
}
/* last page (respectively only page, for first page == last page) */
last_word = MLPP(el >> LN2_BPL);
/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
* ==> e = 32767, el = 32768, last_page = 2,
* and now last_word = 0.
* We do not want to touch last_page in this case,
* as we did not allocate it, it is not present in bitmap->bm_pages.
*/
if (last_word)
bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
* if that even was the very last bit,
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
__bm_change_bits_to(device, el, e, 1);
spin_unlock_irq(&b->bm_lock);
}
/* returns bit state
* wants bitnr, NOT sector.
* inherently racy... area needs to be locked by means of {al,rs}_lru
* 1 ... bit set
* 0 ... bit not set
* -1 ... first out of bounds access, stop testing for bits!
*/
int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
{
unsigned long flags;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
int i;
if (!expect(device, b))
return 0;
if (!expect(device, b->bm_pages))
return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(device);
if (bitnr < b->bm_bits) {
p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
bm_unmap(p_addr);
} else if (bitnr == b->bm_bits) {
i = -1;
} else { /* (bitnr > b->bm_bits) */
drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
i = 0;
}
spin_unlock_irqrestore(&b->bm_lock, flags);
return i;
}
/* returns number of bits set in the range [s, e] */
int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
unsigned long flags;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int page_nr = -1U;
int c = 0;
/* If this is called without a bitmap, that is a bug. But just to be
* robust in case we screwed up elsewhere, in that case pretend there
* was one dirty bit in the requested area, so we won't try to do a
* local read there (no bitmap probably implies no disk) */
if (!expect(device, b))
return 1;
if (!expect(device, b->bm_pages))
return 1;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(device);
for (bitnr = s; bitnr <= e; bitnr++) {
unsigned int idx = bm_bit_to_page_idx(b, bitnr);
if (page_nr != idx) {
page_nr = idx;
if (p_addr)
bm_unmap(p_addr);
p_addr = bm_map_pidx(b, idx);
}
if (expect(device, bitnr < b->bm_bits))
c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
else
drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
}
if (p_addr)
bm_unmap(p_addr);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
}
/* inherently racy...
* return value may be already out-of-date when this function returns.
* but the general usage is that this is only use during a cstate when bits are
* only cleared, not set, and typically only care for the case when the return
* value is zero, or we already "locked" this "bitmap extent" by other means.
*
* enr is bm-extent number, since we chose to name one sector (512 bytes)
* worth of the bitmap a "bitmap extent".
*
* TODO
* I think since we use it like a reference count, we should use the real
* reference count of some bitmap extent element from some lru instead...
*
*/
int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
{
struct drbd_bitmap *b = device->bitmap;
int count, s, e;
unsigned long flags;
unsigned long *p_addr, *bm;
if (!expect(device, b))
return 0;
if (!expect(device, b->bm_pages))
return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(device);
s = S2W(enr);
e = min((size_t)S2W(enr+1), b->bm_words);
count = 0;
if (s < b->bm_words) {
int n = e-s;
p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
bm = p_addr + MLPP(s);
count += bitmap_weight(bm, n * BITS_PER_LONG);
bm_unmap(p_addr);
} else {
drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
}
spin_unlock_irqrestore(&b->bm_lock, flags);
return count;
}
| linux-master | drivers/block/drbd/drbd_bitmap.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "drbd debugfs: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include "drbd_int.h"
#include "drbd_req.h"
#include "drbd_debugfs.h"
/**********************************************************************
* Whenever you change the file format, remember to bump the version. *
**********************************************************************/
static struct dentry *drbd_debugfs_root;
static struct dentry *drbd_debugfs_version;
static struct dentry *drbd_debugfs_resources;
static struct dentry *drbd_debugfs_minors;
static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
{
if (valid)
seq_printf(m, "\t%d", jiffies_to_msecs(dt));
else
seq_printf(m, "\t-");
}
static void __seq_print_rq_state_bit(struct seq_file *m,
bool is_set, char *sep, const char *set_name, const char *unset_name)
{
if (is_set && set_name) {
seq_putc(m, *sep);
seq_puts(m, set_name);
*sep = '|';
} else if (!is_set && unset_name) {
seq_putc(m, *sep);
seq_puts(m, unset_name);
*sep = '|';
}
}
static void seq_print_rq_state_bit(struct seq_file *m,
bool is_set, char *sep, const char *set_name)
{
__seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
}
/* pretty print enum drbd_req_state_bits req->rq_state */
static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
{
unsigned int s = req->rq_state;
char sep = ' ';
seq_printf(m, "\t0x%08x", s);
seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
/* RQ_WRITE ignored, already reported */
seq_puts(m, "\tlocal:");
seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
sep = ' ';
seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
if (sep == ' ')
seq_puts(m, " -");
/* for_each_connection ... */
seq_printf(m, "\tnet:");
sep = ' ';
seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
if (sep == ' ')
seq_puts(m, " -");
seq_printf(m, " :");
sep = ' ';
seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
if (sep == ' ')
seq_puts(m, " -");
seq_printf(m, "\n");
}
static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
{
/* change anything here, fixup header below! */
unsigned int s = req->rq_state;
#define RQ_HDR_1 "epoch\tsector\tsize\trw"
seq_printf(m, "0x%x\t%llu\t%u\t%s",
req->epoch,
(unsigned long long)req->i.sector, req->i.size >> 9,
(s & RQ_WRITE) ? "W" : "R");
#define RQ_HDR_2 "\tstart\tin AL\tsubmit"
seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
#define RQ_HDR_3 "\tsent\tacked\tdone"
seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
#define RQ_HDR_4 "\tstate\n"
seq_print_request_state(m, req);
}
#define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
{
seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
seq_print_one_request(m, req, now);
}
static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
unsigned int i;
seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, i) {
struct drbd_md_io tmp;
/* In theory this is racy,
* in the sense that there could have been a
* drbd_md_put_buffer(); drbd_md_get_buffer();
* between accessing these members here. */
tmp = device->md_io;
if (atomic_read(&tmp.in_use)) {
seq_printf(m, "%u\t%u\t%d\t",
device->minor, device->vnr,
jiffies_to_msecs(now - tmp.start_jif));
if (time_before(tmp.submit_jif, tmp.start_jif))
seq_puts(m, "-\t");
else
seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
seq_printf(m, "%s\n", tmp.current_use);
}
}
rcu_read_unlock();
}
static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
unsigned int i;
seq_puts(m, "minor\tvnr\tage\t#waiting\n");
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, i) {
unsigned long jif;
struct drbd_request *req;
int n = atomic_read(&device->ap_actlog_cnt);
if (n) {
spin_lock_irq(&device->resource->req_lock);
req = list_first_entry_or_null(&device->pending_master_completion[1],
struct drbd_request, req_pending_master_completion);
/* if the oldest request does not wait for the activity log
* it is not interesting for us here */
if (req && !(req->rq_state & RQ_IN_ACT_LOG))
jif = req->start_jif;
else
req = NULL;
spin_unlock_irq(&device->resource->req_lock);
}
if (n) {
seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
if (req)
seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
else
seq_puts(m, "-\t");
seq_printf(m, "%u\n", n);
}
}
rcu_read_unlock();
}
static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
{
struct drbd_bm_aio_ctx *ctx;
unsigned long start_jif;
unsigned int in_flight;
unsigned int flags;
spin_lock_irq(&device->resource->req_lock);
ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
if (ctx && ctx->done)
ctx = NULL;
if (ctx) {
start_jif = ctx->start_jif;
in_flight = atomic_read(&ctx->in_flight);
flags = ctx->flags;
}
spin_unlock_irq(&device->resource->req_lock);
if (ctx) {
seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
device->minor, device->vnr,
(flags & BM_AIO_READ) ? 'R' : 'W',
jiffies_to_msecs(now - start_jif),
in_flight);
}
}
static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
unsigned int i;
seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, i) {
seq_print_device_bitmap_io(m, device, now);
}
rcu_read_unlock();
}
/* pretty print enum peer_req->flags */
static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
{
unsigned long f = peer_req->flags;
char sep = ' ';
__seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
__seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
seq_print_rq_state_bit(m, f & EE_TRIM, &sep, "trim");
seq_print_rq_state_bit(m, f & EE_ZEROOUT, &sep, "zero-out");
seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
seq_putc(m, '\n');
}
static void seq_print_peer_request(struct seq_file *m,
struct drbd_device *device, struct list_head *lh,
unsigned long now)
{
bool reported_preparing = false;
struct drbd_peer_request *peer_req;
list_for_each_entry(peer_req, lh, w.list) {
if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
continue;
if (device)
seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
seq_printf(m, "%llu\t%u\t%c\t%u\t",
(unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
(peer_req->flags & EE_WRITE) ? 'W' : 'R',
jiffies_to_msecs(now - peer_req->submit_jif));
seq_print_peer_request_flags(m, peer_req);
if (peer_req->flags & EE_SUBMITTED)
break;
else
reported_preparing = true;
}
}
static void seq_print_device_peer_requests(struct seq_file *m,
struct drbd_device *device, unsigned long now)
{
seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
spin_lock_irq(&device->resource->req_lock);
seq_print_peer_request(m, device, &device->active_ee, now);
seq_print_peer_request(m, device, &device->read_ee, now);
seq_print_peer_request(m, device, &device->sync_ee, now);
spin_unlock_irq(&device->resource->req_lock);
if (test_bit(FLUSH_PENDING, &device->flags)) {
seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
device->minor, device->vnr,
jiffies_to_msecs(now - device->flush_jif));
}
}
static void seq_print_resource_pending_peer_requests(struct seq_file *m,
struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
unsigned int i;
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, i) {
seq_print_device_peer_requests(m, device, now);
}
rcu_read_unlock();
}
static void seq_print_resource_transfer_log_summary(struct seq_file *m,
struct drbd_resource *resource,
struct drbd_connection *connection,
unsigned long now)
{
struct drbd_request *req;
unsigned int count = 0;
unsigned int show_state = 0;
seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
spin_lock_irq(&resource->req_lock);
list_for_each_entry(req, &connection->transfer_log, tl_requests) {
unsigned int tmp = 0;
unsigned int s;
++count;
/* don't disable irq "forever" */
if (!(count & 0x1ff)) {
struct drbd_request *req_next;
kref_get(&req->kref);
spin_unlock_irq(&resource->req_lock);
cond_resched();
spin_lock_irq(&resource->req_lock);
req_next = list_next_entry(req, tl_requests);
if (kref_put(&req->kref, drbd_req_destroy))
req = req_next;
if (&req->tl_requests == &connection->transfer_log)
break;
}
s = req->rq_state;
/* This is meant to summarize timing issues, to be able to tell
* local disk problems from network problems.
* Skip requests, if we have shown an even older request with
* similar aspects already. */
if (req->master_bio == NULL)
tmp |= 1;
if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
tmp |= 2;
if (s & RQ_NET_MASK) {
if (!(s & RQ_NET_SENT))
tmp |= 4;
if (s & RQ_NET_PENDING)
tmp |= 8;
if (!(s & RQ_NET_DONE))
tmp |= 16;
}
if ((tmp & show_state) == tmp)
continue;
show_state |= tmp;
seq_printf(m, "%u\t", count);
seq_print_minor_vnr_req(m, req, now);
if (show_state == 0x1f)
break;
}
spin_unlock_irq(&resource->req_lock);
}
/* TODO: transfer_log and friends should be moved to resource */
static int in_flight_summary_show(struct seq_file *m, void *pos)
{
struct drbd_resource *resource = m->private;
struct drbd_connection *connection;
unsigned long jif = jiffies;
connection = first_connection(resource);
/* This does not happen, actually.
* But be robust and prepare for future code changes. */
if (!connection || !kref_get_unless_zero(&connection->kref))
return -ESTALE;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
seq_puts(m, "oldest bitmap IO\n");
seq_print_resource_pending_bitmap_io(m, resource, jif);
seq_putc(m, '\n');
seq_puts(m, "meta data IO\n");
seq_print_resource_pending_meta_io(m, resource, jif);
seq_putc(m, '\n');
seq_puts(m, "socket buffer stats\n");
/* for each connection ... once we have more than one */
rcu_read_lock();
if (connection->data.socket) {
/* open coded SIOCINQ, the "relevant" part */
struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
int answ = tp->rcv_nxt - tp->copied_seq;
seq_printf(m, "unread receive buffer: %u Byte\n", answ);
/* open coded SIOCOUTQ, the "relevant" part */
answ = tp->write_seq - tp->snd_una;
seq_printf(m, "unacked send buffer: %u Byte\n", answ);
}
rcu_read_unlock();
seq_putc(m, '\n');
seq_puts(m, "oldest peer requests\n");
seq_print_resource_pending_peer_requests(m, resource, jif);
seq_putc(m, '\n');
seq_puts(m, "application requests waiting for activity log\n");
seq_print_waiting_for_AL(m, resource, jif);
seq_putc(m, '\n');
seq_puts(m, "oldest application requests\n");
seq_print_resource_transfer_log_summary(m, resource, connection, jif);
seq_putc(m, '\n');
jif = jiffies - jif;
if (jif)
seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
kref_put(&connection->kref, drbd_destroy_connection);
return 0;
}
/* make sure at *open* time that the respective object won't go away. */
static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
void *data, struct kref *kref,
void (*release)(struct kref *))
{
struct dentry *parent;
int ret = -ESTALE;
/* Are we still linked,
* or has debugfs_remove() already been called? */
parent = file->f_path.dentry->d_parent;
/* serialize with d_delete() */
inode_lock(d_inode(parent));
/* Make sure the object is still alive */
if (simple_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
inode_unlock(d_inode(parent));
if (!ret) {
ret = single_open(file, show, data);
if (ret)
kref_put(kref, release);
}
return ret;
}
static int in_flight_summary_open(struct inode *inode, struct file *file)
{
struct drbd_resource *resource = inode->i_private;
return drbd_single_open(file, in_flight_summary_show, resource,
&resource->kref, drbd_destroy_resource);
}
static int in_flight_summary_release(struct inode *inode, struct file *file)
{
struct drbd_resource *resource = inode->i_private;
kref_put(&resource->kref, drbd_destroy_resource);
return single_release(inode, file);
}
static const struct file_operations in_flight_summary_fops = {
.owner = THIS_MODULE,
.open = in_flight_summary_open,
.read = seq_read,
.llseek = seq_lseek,
.release = in_flight_summary_release,
};
void drbd_debugfs_resource_add(struct drbd_resource *resource)
{
struct dentry *dentry;
dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
resource->debugfs_res = dentry;
dentry = debugfs_create_dir("volumes", resource->debugfs_res);
resource->debugfs_res_volumes = dentry;
dentry = debugfs_create_dir("connections", resource->debugfs_res);
resource->debugfs_res_connections = dentry;
dentry = debugfs_create_file("in_flight_summary", 0440,
resource->debugfs_res, resource,
&in_flight_summary_fops);
resource->debugfs_res_in_flight_summary = dentry;
}
static void drbd_debugfs_remove(struct dentry **dp)
{
debugfs_remove(*dp);
*dp = NULL;
}
void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
{
/* it is ok to call debugfs_remove(NULL) */
drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
drbd_debugfs_remove(&resource->debugfs_res_connections);
drbd_debugfs_remove(&resource->debugfs_res_volumes);
drbd_debugfs_remove(&resource->debugfs_res);
}
static void seq_print_one_timing_detail(struct seq_file *m,
const struct drbd_thread_timing_details *tdp,
unsigned long now)
{
struct drbd_thread_timing_details td;
/* No locking...
* use temporary assignment to get at consistent data. */
do {
td = *tdp;
} while (td.cb_nr != tdp->cb_nr);
if (!td.cb_addr)
return;
seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
td.cb_nr,
jiffies_to_msecs(now - td.start_jif),
td.caller_fn, td.line,
td.cb_addr);
}
static void seq_print_timing_details(struct seq_file *m,
const char *title,
unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
{
unsigned int start_idx;
unsigned int i;
seq_printf(m, "%s\n", title);
/* If not much is going on, this will result in natural ordering.
* If it is very busy, we will possibly skip events, or even see wrap
* arounds, which could only be avoided with locking.
*/
start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
seq_print_one_timing_detail(m, tdp+i, now);
for (i = 0; i < start_idx; i++)
seq_print_one_timing_detail(m, tdp+i, now);
}
static int callback_history_show(struct seq_file *m, void *ignored)
{
struct drbd_connection *connection = m->private;
unsigned long jif = jiffies;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
seq_puts(m, "n\tage\tcallsite\tfn\n");
seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
return 0;
}
static int callback_history_open(struct inode *inode, struct file *file)
{
struct drbd_connection *connection = inode->i_private;
return drbd_single_open(file, callback_history_show, connection,
&connection->kref, drbd_destroy_connection);
}
static int callback_history_release(struct inode *inode, struct file *file)
{
struct drbd_connection *connection = inode->i_private;
kref_put(&connection->kref, drbd_destroy_connection);
return single_release(inode, file);
}
static const struct file_operations connection_callback_history_fops = {
.owner = THIS_MODULE,
.open = callback_history_open,
.read = seq_read,
.llseek = seq_lseek,
.release = callback_history_release,
};
static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
{
struct drbd_connection *connection = m->private;
unsigned long now = jiffies;
struct drbd_request *r1, *r2;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
spin_lock_irq(&connection->resource->req_lock);
r1 = connection->req_next;
if (r1)
seq_print_minor_vnr_req(m, r1, now);
r2 = connection->req_ack_pending;
if (r2 && r2 != r1) {
r1 = r2;
seq_print_minor_vnr_req(m, r1, now);
}
r2 = connection->req_not_net_done;
if (r2 && r2 != r1)
seq_print_minor_vnr_req(m, r2, now);
spin_unlock_irq(&connection->resource->req_lock);
return 0;
}
static int connection_oldest_requests_open(struct inode *inode, struct file *file)
{
struct drbd_connection *connection = inode->i_private;
return drbd_single_open(file, connection_oldest_requests_show, connection,
&connection->kref, drbd_destroy_connection);
}
static int connection_oldest_requests_release(struct inode *inode, struct file *file)
{
struct drbd_connection *connection = inode->i_private;
kref_put(&connection->kref, drbd_destroy_connection);
return single_release(inode, file);
}
static const struct file_operations connection_oldest_requests_fops = {
.owner = THIS_MODULE,
.open = connection_oldest_requests_open,
.read = seq_read,
.llseek = seq_lseek,
.release = connection_oldest_requests_release,
};
void drbd_debugfs_connection_add(struct drbd_connection *connection)
{
struct dentry *conns_dir = connection->resource->debugfs_res_connections;
struct dentry *dentry;
/* Once we enable mutliple peers,
* these connections will have descriptive names.
* For now, it is just the one connection to the (only) "peer". */
dentry = debugfs_create_dir("peer", conns_dir);
connection->debugfs_conn = dentry;
dentry = debugfs_create_file("callback_history", 0440,
connection->debugfs_conn, connection,
&connection_callback_history_fops);
connection->debugfs_conn_callback_history = dentry;
dentry = debugfs_create_file("oldest_requests", 0440,
connection->debugfs_conn, connection,
&connection_oldest_requests_fops);
connection->debugfs_conn_oldest_requests = dentry;
}
void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
{
drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
drbd_debugfs_remove(&connection->debugfs_conn);
}
static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
{
struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
seq_printf(m, "%5d %s %s %s", bme->rs_left,
test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
);
}
static int device_resync_extents_show(struct seq_file *m, void *ignored)
{
struct drbd_device *device = m->private;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
if (get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(m, device->resync);
lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
put_ldev(device);
}
return 0;
}
static int device_act_log_extents_show(struct seq_file *m, void *ignored)
{
struct drbd_device *device = m->private;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
if (get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(m, device->act_log);
lc_seq_dump_details(m, device->act_log, "", NULL);
put_ldev(device);
}
return 0;
}
static int device_oldest_requests_show(struct seq_file *m, void *ignored)
{
struct drbd_device *device = m->private;
struct drbd_resource *resource = device->resource;
unsigned long now = jiffies;
struct drbd_request *r1, *r2;
int i;
/* BUMP me if you change the file format/content/presentation */
seq_printf(m, "v: %u\n\n", 0);
seq_puts(m, RQ_HDR);
spin_lock_irq(&resource->req_lock);
/* WRITE, then READ */
for (i = 1; i >= 0; --i) {
r1 = list_first_entry_or_null(&device->pending_master_completion[i],
struct drbd_request, req_pending_master_completion);
r2 = list_first_entry_or_null(&device->pending_completion[i],
struct drbd_request, req_pending_local);
if (r1)
seq_print_one_request(m, r1, now);
if (r2 && r2 != r1)
seq_print_one_request(m, r2, now);
}
spin_unlock_irq(&resource->req_lock);
return 0;
}
static int device_data_gen_id_show(struct seq_file *m, void *ignored)
{
struct drbd_device *device = m->private;
struct drbd_md *md;
enum drbd_uuid_index idx;
if (!get_ldev_if_state(device, D_FAILED))
return -ENODEV;
md = &device->ldev->md;
spin_lock_irq(&md->uuid_lock);
for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) {
seq_printf(m, "0x%016llX\n", md->uuid[idx]);
}
spin_unlock_irq(&md->uuid_lock);
put_ldev(device);
return 0;
}
static int device_ed_gen_id_show(struct seq_file *m, void *ignored)
{
struct drbd_device *device = m->private;
seq_printf(m, "0x%016llX\n", (unsigned long long)device->ed_uuid);
return 0;
}
#define drbd_debugfs_device_attr(name) \
static int device_ ## name ## _open(struct inode *inode, struct file *file) \
{ \
struct drbd_device *device = inode->i_private; \
return drbd_single_open(file, device_ ## name ## _show, device, \
&device->kref, drbd_destroy_device); \
} \
static int device_ ## name ## _release(struct inode *inode, struct file *file) \
{ \
struct drbd_device *device = inode->i_private; \
kref_put(&device->kref, drbd_destroy_device); \
return single_release(inode, file); \
} \
static const struct file_operations device_ ## name ## _fops = { \
.owner = THIS_MODULE, \
.open = device_ ## name ## _open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = device_ ## name ## _release, \
};
drbd_debugfs_device_attr(oldest_requests)
drbd_debugfs_device_attr(act_log_extents)
drbd_debugfs_device_attr(resync_extents)
drbd_debugfs_device_attr(data_gen_id)
drbd_debugfs_device_attr(ed_gen_id)
void drbd_debugfs_device_add(struct drbd_device *device)
{
struct dentry *vols_dir = device->resource->debugfs_res_volumes;
char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
char vnr_buf[8]; /* volume number vnr is even 16 bit only; */
char *slink_name = NULL;
struct dentry *dentry;
if (!vols_dir || !drbd_debugfs_minors)
return;
snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
dentry = debugfs_create_dir(vnr_buf, vols_dir);
device->debugfs_vol = dentry;
snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
device->resource->name, device->vnr);
if (!slink_name)
goto fail;
dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
device->debugfs_minor = dentry;
kfree(slink_name);
slink_name = NULL;
#define DCF(name) do { \
dentry = debugfs_create_file(#name, 0440, \
device->debugfs_vol, device, \
&device_ ## name ## _fops); \
device->debugfs_vol_ ## name = dentry; \
} while (0)
DCF(oldest_requests);
DCF(act_log_extents);
DCF(resync_extents);
DCF(data_gen_id);
DCF(ed_gen_id);
#undef DCF
return;
fail:
drbd_debugfs_device_cleanup(device);
drbd_err(device, "failed to create debugfs entries\n");
}
void drbd_debugfs_device_cleanup(struct drbd_device *device)
{
drbd_debugfs_remove(&device->debugfs_minor);
drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
drbd_debugfs_remove(&device->debugfs_vol_ed_gen_id);
drbd_debugfs_remove(&device->debugfs_vol);
}
void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
{
struct dentry *conn_dir = peer_device->connection->debugfs_conn;
struct dentry *dentry;
char vnr_buf[8];
snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
dentry = debugfs_create_dir(vnr_buf, conn_dir);
peer_device->debugfs_peer_dev = dentry;
}
void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
{
drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
}
static int drbd_version_show(struct seq_file *m, void *ignored)
{
seq_printf(m, "# %s\n", drbd_buildtag());
seq_printf(m, "VERSION=%s\n", REL_VERSION);
seq_printf(m, "API_VERSION=%u\n", GENL_MAGIC_VERSION);
seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
return 0;
}
static int drbd_version_open(struct inode *inode, struct file *file)
{
return single_open(file, drbd_version_show, NULL);
}
static const struct file_operations drbd_version_fops = {
.owner = THIS_MODULE,
.open = drbd_version_open,
.llseek = seq_lseek,
.read = seq_read,
.release = single_release,
};
/* not __exit, may be indirectly called
* from the module-load-failure path as well. */
void drbd_debugfs_cleanup(void)
{
drbd_debugfs_remove(&drbd_debugfs_resources);
drbd_debugfs_remove(&drbd_debugfs_minors);
drbd_debugfs_remove(&drbd_debugfs_version);
drbd_debugfs_remove(&drbd_debugfs_root);
}
void __init drbd_debugfs_init(void)
{
struct dentry *dentry;
dentry = debugfs_create_dir("drbd", NULL);
drbd_debugfs_root = dentry;
dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
drbd_debugfs_version = dentry;
dentry = debugfs_create_dir("resources", drbd_debugfs_root);
drbd_debugfs_resources = dentry;
dentry = debugfs_create_dir("minors", drbd_debugfs_root);
drbd_debugfs_minors = dentry;
}
| linux-master | drivers/block/drbd/drbd_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_receiver.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <net/sock.h>
#include <linux/drbd.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/in.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
#include <linux/sched/signal.h>
#include <linux/pkt_sched.h>
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_vli.h"
#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME|DRBD_FF_WZEROES)
struct packet_info {
enum drbd_packet cmd;
unsigned int size;
unsigned int vnr;
void *data;
};
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
FE_RECYCLED,
};
static int drbd_do_features(struct drbd_connection *connection);
static int drbd_do_auth(struct drbd_connection *connection);
static int drbd_disconnected(struct drbd_peer_device *);
static void conn_wait_active_ee_empty(struct drbd_connection *connection);
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
/*
* some helper functions to deal with single linked page lists,
* page->private being our "next" pointer.
*/
/* If at least n pages are linked at head, get n pages off.
* Otherwise, don't modify head, and return NULL.
* Locking is the responsibility of the caller.
*/
static struct page *page_chain_del(struct page **head, int n)
{
struct page *page;
struct page *tmp;
BUG_ON(!n);
BUG_ON(!head);
page = *head;
if (!page)
return NULL;
while (page) {
tmp = page_chain_next(page);
if (--n == 0)
break; /* found sufficient pages */
if (tmp == NULL)
/* insufficient pages, don't use any of them. */
return NULL;
page = tmp;
}
/* add end of list marker for the returned list */
set_page_private(page, 0);
/* actual return value, and adjustment of head */
page = *head;
*head = tmp;
return page;
}
/* may be used outside of locks to find the tail of a (usually short)
* "private" page chain, before adding it back to a global chain head
* with page_chain_add() under a spinlock. */
static struct page *page_chain_tail(struct page *page, int *len)
{
struct page *tmp;
int i = 1;
while ((tmp = page_chain_next(page))) {
++i;
page = tmp;
}
if (len)
*len = i;
return page;
}
static int page_chain_free(struct page *page)
{
struct page *tmp;
int i = 0;
page_chain_for_each_safe(page, tmp) {
put_page(page);
++i;
}
return i;
}
static void page_chain_add(struct page **head,
struct page *chain_first, struct page *chain_last)
{
#if 1
struct page *tmp;
tmp = page_chain_tail(chain_first, NULL);
BUG_ON(tmp != chain_last);
#endif
/* add chain to head */
set_page_private(chain_last, (unsigned long)*head);
*head = chain_first;
}
static struct page *__drbd_alloc_pages(struct drbd_device *device,
unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
if (drbd_pp_vacant >= number) {
spin_lock(&drbd_pp_lock);
page = page_chain_del(&drbd_pp_pool, number);
if (page)
drbd_pp_vacant -= number;
spin_unlock(&drbd_pp_lock);
if (page)
return page;
}
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
tmp = alloc_page(GFP_TRY);
if (!tmp)
break;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
if (i == number)
return page;
/* Not enough pages immediately available this time.
* No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
spin_lock(&drbd_pp_lock);
page_chain_add(&drbd_pp_pool, page, tmp);
drbd_pp_vacant += i;
spin_unlock(&drbd_pp_lock);
}
return NULL;
}
static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
struct list_head *to_be_freed)
{
struct drbd_peer_request *peer_req, *tmp;
/* The EEs are always appended to the end of the list. Since
they are sent in order over the wire, they have to finish
in order. As soon as we see the first not finished we can
stop to examine the list... */
list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
if (drbd_peer_req_has_active_page(peer_req))
break;
list_move(&peer_req->w.list, to_be_freed);
}
}
static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
spin_lock_irq(&device->resource->req_lock);
reclaim_finished_net_peer_reqs(device, &reclaimed);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
drbd_free_net_peer_req(device, peer_req);
}
static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (!atomic_read(&device->pp_in_use_by_net))
continue;
kref_get(&device->kref);
rcu_read_unlock();
drbd_reclaim_net_peer_reqs(device);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
*
* Tries to allocate number pages, first from our own page pool, then from
* the kernel.
* Possibly retry until DRBD frees sufficient pages somewhere else.
*
* If this allocation would exceed the max_buffers setting, we throttle
* allocation (schedule_timeout) to give the system some room to breathe.
*
* We do not use max-buffers as hard limit, because it could lead to
* congestion and further to a distributed deadlock during online-verify or
* (checksum based) resync, if the max-buffers, socket buffer sizes and
* resync-rate settings are mis-configured.
*
* Returns a page chain linked via page->private.
*/
struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
bool retry)
{
struct drbd_device *device = peer_device->device;
struct page *page = NULL;
struct net_conf *nc;
DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
if (atomic_read(&device->pp_in_use) < mxb)
page = __drbd_alloc_pages(device, number);
/* Try to keep the fast path fast, but occasionally we need
* to reclaim the pages we lended to the network stack. */
if (page && atomic_read(&device->pp_in_use_by_net) > 512)
drbd_reclaim_net_peer_reqs(device);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_reclaim_net_peer_reqs(device);
if (atomic_read(&device->pp_in_use) < mxb) {
page = __drbd_alloc_pages(device, number);
if (page)
break;
}
if (!retry)
break;
if (signal_pending(current)) {
drbd_warn(device, "drbd_alloc_pages interrupted!\n");
break;
}
if (schedule_timeout(HZ/10) == 0)
mxb = UINT_MAX;
}
finish_wait(&drbd_pp_wait, &wait);
if (page)
atomic_add(number, &device->pp_in_use);
return page;
}
/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
{
atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
int i;
if (page == NULL)
return;
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
tmp = page_chain_tail(page, &i);
spin_lock(&drbd_pp_lock);
page_chain_add(&drbd_pp_pool, page, tmp);
drbd_pp_vacant += i;
spin_unlock(&drbd_pp_lock);
}
i = atomic_sub_return(i, a);
if (i < 0)
drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
wake_up(&drbd_pp_wait);
}
/*
You need to hold the req_lock:
_drbd_wait_ee_list_empty()
You must not have the req_lock:
drbd_free_peer_req()
drbd_alloc_peer_req()
drbd_free_peer_reqs()
drbd_ee_fix_bhs()
drbd_finish_peer_reqs()
drbd_clear_done_ee()
drbd_wait_ee_list_empty()
*/
/* normal: payload_size == request size (bi_size)
* w_same: payload_size == logical_block_size
* trim: payload_size == 0 */
struct drbd_peer_request *
drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned int nr_pages = PFN_UP(payload_size);
if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
drbd_err(device, "%s: allocation failed\n", __func__);
return NULL;
}
if (nr_pages) {
page = drbd_alloc_pages(peer_device, nr_pages,
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
}
memset(peer_req, 0, sizeof(*peer_req));
INIT_LIST_HEAD(&peer_req->w.list);
drbd_clear_interval(&peer_req->i);
peer_req->i.size = request_size;
peer_req->i.sector = sector;
peer_req->submit_jif = jiffies;
peer_req->peer_device = peer_device;
peer_req->pages = page;
/*
* The block_id is opaque to the receiver. It is not endianness
* converted, and sent back to the sender unchanged.
*/
peer_req->block_id = id;
return peer_req;
fail:
mempool_free(peer_req, &drbd_ee_mempool);
return NULL;
}
void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
int is_net)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
drbd_free_pages(device, peer_req->pages, is_net);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
drbd_al_complete_io(device, &peer_req->i);
}
mempool_free(peer_req, &drbd_ee_mempool);
}
int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
{
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
__drbd_free_peer_req(device, peer_req, is_net);
count++;
}
return count;
}
/*
* See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
drbd_free_net_peer_req(device, peer_req);
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
*/
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
int err2;
/* list_del not necessary, next/prev members not touched */
err2 = peer_req->w.cb(&peer_req->w, !!err);
if (!err)
err = err2;
drbd_free_peer_req(device, peer_req);
}
wake_up(&device->ee_wait);
return err;
}
static void _drbd_wait_ee_list_empty(struct drbd_device *device,
struct list_head *head)
{
DEFINE_WAIT(wait);
/* avoids spin_lock/unlock
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&device->resource->req_lock);
io_schedule();
finish_wait(&device->ee_wait, &wait);
spin_lock_irq(&device->resource->req_lock);
}
}
static void drbd_wait_ee_list_empty(struct drbd_device *device,
struct list_head *head)
{
spin_lock_irq(&device->resource->req_lock);
_drbd_wait_ee_list_empty(device, head);
spin_unlock_irq(&device->resource->req_lock);
}
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{
struct kvec iov = {
.iov_base = buf,
.iov_len = size,
};
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size);
return sock_recvmsg(sock, &msg, msg.msg_flags);
}
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
{
int rv;
rv = drbd_recv_short(connection->data.socket, buf, size, 0);
if (rv < 0) {
if (rv == -ECONNRESET)
drbd_info(connection, "sock was reset by peer\n");
else if (rv != -ERESTARTSYS)
drbd_err(connection, "sock_recvmsg returned %d\n", rv);
} else if (rv == 0) {
if (test_bit(DISCONNECT_SENT, &connection->flags)) {
long t;
rcu_read_lock();
t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
rcu_read_unlock();
t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
if (t)
goto out;
}
drbd_info(connection, "sock was shut down by peer\n");
}
if (rv != size)
conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
out:
return rv;
}
static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
{
int err;
err = drbd_recv(connection, buf, size);
if (err != size) {
if (err >= 0)
err = -EIO;
} else
err = 0;
return err;
}
static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
{
int err;
err = drbd_recv_all(connection, buf, size);
if (err && !signal_pending(current))
drbd_warn(connection, "short read (expected size %d)\n", (int)size);
return err;
}
/* quoting tcp(7):
* On individual connections, the socket buffer size must be set prior to the
* listen(2) or connect(2) calls in order to have it take effect.
* This is our wrapper to do so.
*/
static void drbd_setbufsize(struct socket *sock, unsigned int snd,
unsigned int rcv)
{
/* open coded SO_SNDBUF, SO_RCVBUF */
if (snd) {
sock->sk->sk_sndbuf = snd;
sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
}
if (rcv) {
sock->sk->sk_rcvbuf = rcv;
sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
}
static struct socket *drbd_try_connect(struct drbd_connection *connection)
{
const char *what;
struct socket *sock;
struct sockaddr_in6 src_in6;
struct sockaddr_in6 peer_in6;
struct net_conf *nc;
int err, peer_addr_len, my_addr_len;
int sndbuf_size, rcvbuf_size, connect_int;
int disconnect_on_error = 1;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return NULL;
}
sndbuf_size = nc->sndbuf_size;
rcvbuf_size = nc->rcvbuf_size;
connect_int = nc->connect_int;
rcu_read_unlock();
my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
memcpy(&src_in6, &connection->my_addr, my_addr_len);
if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
src_in6.sin6_port = 0;
else
((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
what = "sock_create_kern";
err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
goto out;
}
sock->sk->sk_rcvtimeo =
sock->sk->sk_sndtimeo = connect_int * HZ;
drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
* This is needed for multihomed hosts and to be
* able to use lo: interfaces for drbd.
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
what = "bind before connect";
err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
if (err < 0)
goto out;
/* connect may fail, peer not yet available.
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
if (sock) {
sock_release(sock);
sock = NULL;
}
switch (-err) {
/* timeout, busy, signal pending */
case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
case EINTR: case ERESTARTSYS:
/* peer not (yet) available, network problem */
case ECONNREFUSED: case ENETUNREACH:
case EHOSTDOWN: case EHOSTUNREACH:
disconnect_on_error = 0;
break;
default:
drbd_err(connection, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
return sock;
}
struct accept_wait_data {
struct drbd_connection *connection;
struct socket *s_listen;
struct completion door_bell;
void (*original_sk_state_change)(struct sock *sk);
};
static void drbd_incoming_connection(struct sock *sk)
{
struct accept_wait_data *ad = sk->sk_user_data;
void (*state_change)(struct sock *sk);
state_change = ad->original_sk_state_change;
if (sk->sk_state == TCP_ESTABLISHED)
complete(&ad->door_bell);
state_change(sk);
}
static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
{
int err, sndbuf_size, rcvbuf_size, my_addr_len;
struct sockaddr_in6 my_addr;
struct socket *s_listen;
struct net_conf *nc;
const char *what;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -EIO;
}
sndbuf_size = nc->sndbuf_size;
rcvbuf_size = nc->rcvbuf_size;
rcu_read_unlock();
my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
memcpy(&my_addr, &connection->my_addr, my_addr_len);
what = "sock_create_kern";
err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
goto out;
}
s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
if (err < 0)
goto out;
ad->s_listen = s_listen;
write_lock_bh(&s_listen->sk->sk_callback_lock);
ad->original_sk_state_change = s_listen->sk->sk_state_change;
s_listen->sk->sk_state_change = drbd_incoming_connection;
s_listen->sk->sk_user_data = ad;
write_unlock_bh(&s_listen->sk->sk_callback_lock);
what = "listen";
err = s_listen->ops->listen(s_listen, 5);
if (err < 0)
goto out;
return 0;
out:
if (s_listen)
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
drbd_err(connection, "%s failed, err = %d\n", what, err);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
return -EIO;
}
static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
{
write_lock_bh(&sk->sk_callback_lock);
sk->sk_state_change = ad->original_sk_state_change;
sk->sk_user_data = NULL;
write_unlock_bh(&sk->sk_callback_lock);
}
static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
{
int timeo, connect_int, err = 0;
struct socket *s_estab = NULL;
struct net_conf *nc;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return NULL;
}
connect_int = nc->connect_int;
rcu_read_unlock();
timeo = connect_int * HZ;
/* 28.5% random jitter */
timeo += get_random_u32_below(2) ? timeo / 7 : -timeo / 7;
err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
if (err <= 0)
return NULL;
err = kernel_accept(ad->s_listen, &s_estab, 0);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
drbd_err(connection, "accept failed, err = %d\n", err);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
if (s_estab)
unregister_state_change(s_estab->sk, ad);
return s_estab;
}
static int decode_header(struct drbd_connection *, void *, struct packet_info *);
static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd)
{
if (!conn_prepare_command(connection, sock))
return -EIO;
return conn_send_command(connection, sock, cmd, 0, NULL, 0);
}
static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
{
unsigned int header_size = drbd_header_size(connection);
struct packet_info pi;
struct net_conf *nc;
int err;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -EIO;
}
sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
rcu_read_unlock();
err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
if (err != header_size) {
if (err >= 0)
err = -EIO;
return err;
}
err = decode_header(connection, connection->data.rbuf, &pi);
if (err)
return err;
return pi.cmd;
}
/**
* drbd_socket_okay() - Free the socket if its connection is not okay
* @sock: pointer to the pointer to the socket.
*/
static bool drbd_socket_okay(struct socket **sock)
{
int rr;
char tb[4];
if (!*sock)
return false;
rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
if (rr > 0 || rr == -EAGAIN) {
return true;
} else {
sock_release(*sock);
*sock = NULL;
return false;
}
}
static bool connection_established(struct drbd_connection *connection,
struct socket **sock1,
struct socket **sock2)
{
struct net_conf *nc;
int timeout;
bool ok;
if (!*sock1 || !*sock2)
return false;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
rcu_read_unlock();
schedule_timeout_interruptible(timeout);
ok = drbd_socket_okay(sock1);
ok = drbd_socket_okay(sock2) && ok;
return ok;
}
/* Gets called if a connection is established, or if a new minor gets created
in a connection */
int drbd_connected(struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
int err;
atomic_set(&device->packet_seq, 0);
device->peer_seq = 0;
device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
&peer_device->connection->cstate_mutex :
&device->own_state_mutex;
err = drbd_send_sync_param(peer_device);
if (!err)
err = drbd_send_sizes(peer_device, 0, 0);
if (!err)
err = drbd_send_uuids(peer_device);
if (!err)
err = drbd_send_current_state(peer_device);
clear_bit(USE_DEGR_WFC_T, &device->flags);
clear_bit(RESIZE_PENDING, &device->flags);
atomic_set(&device->ap_in_flight, 0);
mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
return err;
}
/*
* return values:
* 1 yes, we have a valid connection
* 0 oops, did not work out, please try again
* -1 peer talks different language,
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
static int conn_connect(struct drbd_connection *connection)
{
struct drbd_socket sock, msock;
struct drbd_peer_device *peer_device;
struct net_conf *nc;
int vnr, timeout, h;
bool discard_my_data, ok;
enum drbd_state_rv rv;
struct accept_wait_data ad = {
.connection = connection,
.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
};
clear_bit(DISCONNECT_SENT, &connection->flags);
if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
mutex_init(&sock.mutex);
sock.sbuf = connection->data.sbuf;
sock.rbuf = connection->data.rbuf;
sock.socket = NULL;
mutex_init(&msock.mutex);
msock.sbuf = connection->meta.sbuf;
msock.rbuf = connection->meta.rbuf;
msock.socket = NULL;
/* Assume that the peer only understands protocol 80 until we know better. */
connection->agreed_pro_version = 80;
if (prepare_listen_socket(connection, &ad))
return 0;
do {
struct socket *s;
s = drbd_try_connect(connection);
if (s) {
if (!sock.socket) {
sock.socket = s;
send_first_packet(connection, &sock, P_INITIAL_DATA);
} else if (!msock.socket) {
clear_bit(RESOLVE_CONFLICTS, &connection->flags);
msock.socket = s;
send_first_packet(connection, &msock, P_INITIAL_META);
} else {
drbd_err(connection, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
if (connection_established(connection, &sock.socket, &msock.socket))
break;
retry:
s = drbd_wait_for_connect(connection, &ad);
if (s) {
int fp = receive_first_packet(connection, s);
drbd_socket_okay(&sock.socket);
drbd_socket_okay(&msock.socket);
switch (fp) {
case P_INITIAL_DATA:
if (sock.socket) {
drbd_warn(connection, "initial packet S crossed\n");
sock_release(sock.socket);
sock.socket = s;
goto randomize;
}
sock.socket = s;
break;
case P_INITIAL_META:
set_bit(RESOLVE_CONFLICTS, &connection->flags);
if (msock.socket) {
drbd_warn(connection, "initial packet M crossed\n");
sock_release(msock.socket);
msock.socket = s;
goto randomize;
}
msock.socket = s;
break;
default:
drbd_warn(connection, "Error receiving initial packet\n");
sock_release(s);
randomize:
if (get_random_u32_below(2))
goto retry;
}
}
if (connection->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
if (get_t_state(&connection->receiver) == EXITING)
goto out_release_sockets;
}
ok = connection_established(connection, &sock.socket, &msock.socket);
} while (!ok);
if (ad.s_listen)
sock_release(ad.s_listen);
sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
sock.socket->sk->sk_allocation = GFP_NOIO;
msock.socket->sk->sk_allocation = GFP_NOIO;
sock.socket->sk->sk_use_task_frag = false;
msock.socket->sk->sk_use_task_frag = false;
sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
* sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
* sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
* first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
sock.socket->sk->sk_sndtimeo =
sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
timeout = nc->timeout * HZ / 10;
discard_my_data = nc->discard_my_data;
rcu_read_unlock();
msock.socket->sk->sk_sndtimeo = timeout;
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
tcp_sock_set_nodelay(sock.socket->sk);
tcp_sock_set_nodelay(msock.socket->sk);
connection->data.socket = sock.socket;
connection->meta.socket = msock.socket;
connection->last_received = jiffies;
h = drbd_do_features(connection);
if (h <= 0)
return h;
if (connection->cram_hmac_tfm) {
/* drbd_request_state(device, NS(conn, WFAuth)); */
switch (drbd_do_auth(connection)) {
case -1:
drbd_err(connection, "Authentication of peer failed\n");
return -1;
case 0:
drbd_err(connection, "Authentication of peer failed, trying again.\n");
return 0;
}
}
connection->data.socket->sk->sk_sndtimeo = timeout;
connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
if (drbd_send_protocol(connection) == -EOPNOTSUPP)
return -1;
/* Prevent a race between resync-handshake and
* being promoted to Primary.
*
* Grab and release the state mutex, so we know that any current
* drbd_set_role() is finished, and any incoming drbd_set_role
* will see the STATE_SENT flag, and wait for it to be cleared.
*/
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_lock(peer_device->device->state_mutex);
/* avoid a race with conn_request_state( C_DISCONNECTING ) */
spin_lock_irq(&connection->resource->req_lock);
set_bit(STATE_SENT, &connection->flags);
spin_unlock_irq(&connection->resource->req_lock);
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_unlock(peer_device->device->state_mutex);
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
if (discard_my_data)
set_bit(DISCARD_MY_DATA, &device->flags);
else
clear_bit(DISCARD_MY_DATA, &device->flags);
drbd_connected(peer_device);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
clear_bit(STATE_SENT, &connection->flags);
return 0;
}
drbd_thread_start(&connection->ack_receiver);
/* opencoded create_singlethread_workqueue(),
* to be able to use format string arguments */
connection->ack_sender =
alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
if (!connection->ack_sender) {
drbd_err(connection, "Failed to create workqueue ack_sender\n");
return 0;
}
mutex_lock(&connection->resource->conf_update);
/* The discard_my_data flag is a single-shot modifier to the next
* connection attempt, the handshake of which is now well underway.
* No need for rcu style copying of the whole struct
* just to clear a single value. */
connection->net_conf->discard_my_data = 0;
mutex_unlock(&connection->resource->conf_update);
return h;
out_release_sockets:
if (ad.s_listen)
sock_release(ad.s_listen);
if (sock.socket)
sock_release(sock.socket);
if (msock.socket)
sock_release(msock.socket);
return -1;
}
static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
{
unsigned int header_size = drbd_header_size(connection);
if (header_size == sizeof(struct p_header100) &&
*(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
struct p_header100 *h = header;
if (h->pad != 0) {
drbd_err(connection, "Header padding is not zero\n");
return -EINVAL;
}
pi->vnr = be16_to_cpu(h->volume);
pi->cmd = be16_to_cpu(h->command);
pi->size = be32_to_cpu(h->length);
} else if (header_size == sizeof(struct p_header95) &&
*(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
struct p_header95 *h = header;
pi->cmd = be16_to_cpu(h->command);
pi->size = be32_to_cpu(h->length);
pi->vnr = 0;
} else if (header_size == sizeof(struct p_header80) &&
*(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
struct p_header80 *h = header;
pi->cmd = be16_to_cpu(h->command);
pi->size = be16_to_cpu(h->length);
pi->vnr = 0;
} else {
drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
be32_to_cpu(*(__be32 *)header),
connection->agreed_pro_version);
return -EINVAL;
}
pi->data = header + header_size;
return 0;
}
static void drbd_unplug_all_devices(struct drbd_connection *connection)
{
if (current->plug == &connection->receiver_plug) {
blk_finish_plug(&connection->receiver_plug);
blk_start_plug(&connection->receiver_plug);
} /* else: maybe just schedule() ?? */
}
static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{
void *buffer = connection->data.rbuf;
int err;
err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
if (err)
return err;
err = decode_header(connection, buffer, pi);
connection->last_received = jiffies;
return err;
}
static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
{
void *buffer = connection->data.rbuf;
unsigned int size = drbd_header_size(connection);
int err;
err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
if (err != size) {
/* If we have nothing in the receive buffer now, to reduce
* application latency, try to drain the backend queues as
* quickly as possible, and let remote TCP know what we have
* received so far. */
if (err == -EAGAIN) {
tcp_sock_set_quickack(connection->data.socket->sk, 2);
drbd_unplug_all_devices(connection);
}
if (err > 0) {
buffer += err;
size -= err;
}
err = drbd_recv_all_warn(connection, buffer, size);
if (err)
return err;
}
err = decode_header(connection, connection->data.rbuf, pi);
connection->last_received = jiffies;
return err;
}
/* This is blkdev_issue_flush, but asynchronous.
* We want to submit to all component volumes in parallel,
* then wait for all completions.
*/
struct issue_flush_context {
atomic_t pending;
int error;
struct completion done;
};
struct one_flush_context {
struct drbd_device *device;
struct issue_flush_context *ctx;
};
static void one_flush_endio(struct bio *bio)
{
struct one_flush_context *octx = bio->bi_private;
struct drbd_device *device = octx->device;
struct issue_flush_context *ctx = octx->ctx;
if (bio->bi_status) {
ctx->error = blk_status_to_errno(bio->bi_status);
drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
}
kfree(octx);
bio_put(bio);
clear_bit(FLUSH_PENDING, &device->flags);
put_ldev(device);
kref_put(&device->kref, drbd_destroy_device);
if (atomic_dec_and_test(&ctx->pending))
complete(&ctx->done);
}
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!octx) {
drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
/* FIXME: what else can I do now? disconnecting or detaching
* really does not help to improve the state of the world, either.
*/
bio_put(bio);
ctx->error = -ENOMEM;
put_ldev(device);
kref_put(&device->kref, drbd_destroy_device);
return;
}
octx->device = device;
octx->ctx = ctx;
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
atomic_inc(&ctx->pending);
submit_bio(bio);
}
static void drbd_flush(struct drbd_connection *connection)
{
if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
struct drbd_peer_device *peer_device;
struct issue_flush_context ctx;
int vnr;
atomic_set(&ctx.pending, 1);
ctx.error = 0;
init_completion(&ctx.done);
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (!get_ldev(device))
continue;
kref_get(&device->kref);
rcu_read_unlock();
submit_one_flush(device, &ctx);
rcu_read_lock();
}
rcu_read_unlock();
/* Do we want to add a timeout,
* if disk-timeout is set? */
if (!atomic_dec_and_test(&ctx.pending))
wait_for_completion(&ctx.done);
if (ctx.error) {
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
/* Any error is already reported by bio_endio callback. */
drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
}
}
}
/**
* drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
* @connection: DRBD connection.
* @epoch: Epoch object.
* @ev: Epoch event.
*/
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
int epoch_size;
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
spin_lock(&connection->epoch_lock);
do {
next_epoch = NULL;
epoch_size = atomic_read(&epoch->epoch_size);
switch (ev & ~EV_CLEANUP) {
case EV_PUT:
atomic_dec(&epoch->active);
break;
case EV_GOT_BARRIER_NR:
set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
break;
case EV_BECAME_LAST:
/* nothing to do*/
break;
}
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&connection->epoch_lock);
drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
spin_lock(&connection->epoch_lock);
}
#if 0
/* FIXME: dec unacked on connection, once we have
* something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
dec_unacked(epoch->connection);
#endif
if (connection->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
connection->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
rv = FE_DESTROYED;
} else {
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
}
}
if (!next_epoch)
break;
epoch = next_epoch;
} while (1);
spin_unlock(&connection->epoch_lock);
return rv;
}
static enum write_ordering_e
max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
{
struct disk_conf *dc;
dc = rcu_dereference(bdev->disk_conf);
if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
wo = WO_DRAIN_IO;
if (wo == WO_DRAIN_IO && !dc->disk_drain)
wo = WO_NONE;
return wo;
}
/*
* drbd_bump_write_ordering() - Fall back to an other write ordering method
* @wo: Write ordering method to try.
*/
void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
enum write_ordering_e wo)
{
struct drbd_device *device;
enum write_ordering_e pwo;
int vnr;
static char *write_ordering_str[] = {
[WO_NONE] = "none",
[WO_DRAIN_IO] = "drain",
[WO_BDEV_FLUSH] = "flush",
};
pwo = resource->write_ordering;
if (wo != WO_BDEV_FLUSH)
wo = min(pwo, wo);
rcu_read_lock();
idr_for_each_entry(&resource->devices, device, vnr) {
if (get_ldev(device)) {
wo = max_allowed_wo(device->ldev, wo);
if (device->ldev == bdev)
bdev = NULL;
put_ldev(device);
}
}
if (bdev)
wo = max_allowed_wo(bdev, wo);
rcu_read_unlock();
resource->write_ordering = wo;
if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
}
/*
* Mapping "discard" to ZEROOUT with UNMAP does not work for us:
* Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it
* will directly go to fallback mode, submitting normal writes, and
* never even try to UNMAP.
*
* And dm-thin does not do this (yet), mostly because in general it has
* to assume that "skip_block_zeroing" is set. See also:
* https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html
* https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html
*
* We *may* ignore the discard-zeroes-data setting, if so configured.
*
* Assumption is that this "discard_zeroes_data=0" is only because the backend
* may ignore partial unaligned discards.
*
* LVM/DM thin as of at least
* LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
* Library version: 1.02.93-RHEL7 (2015-01-28)
* Driver version: 4.29.0
* still behaves this way.
*
* For unaligned (wrt. alignment and granularity) or too small discards,
* we zero-out the initial (and/or) trailing unaligned partial chunks,
* but discard all the aligned full chunks.
*
* At least for LVM/DM thin, with skip_block_zeroing=false,
* the result is effectively "discard_zeroes_data=1".
*/
/* flags: EE_TRIM|EE_ZEROOUT */
int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
{
struct block_device *bdev = device->ldev->backing_bdev;
sector_t tmp, nr;
unsigned int max_discard_sectors, granularity;
int alignment;
int err = 0;
if ((flags & EE_ZEROOUT) || !(flags & EE_TRIM))
goto zero_out;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(bdev_discard_granularity(bdev) >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors))
goto zero_out;
if (nr_sectors < granularity)
goto zero_out;
tmp = start;
if (sector_div(tmp, granularity) != alignment) {
if (nr_sectors < 2*granularity)
goto zero_out;
/* start + gran - (start + gran - align) % gran */
tmp = start + granularity - alignment;
tmp = start + granularity - sector_div(tmp, granularity);
nr = tmp - start;
/* don't flag BLKDEV_ZERO_NOUNMAP, we don't know how many
* layers are below us, some may have smaller granularity */
err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
nr_sectors -= nr;
start = tmp;
}
while (nr_sectors >= max_discard_sectors) {
err |= blkdev_issue_discard(bdev, start, max_discard_sectors,
GFP_NOIO);
nr_sectors -= max_discard_sectors;
start += max_discard_sectors;
}
if (nr_sectors) {
/* max_discard_sectors is unsigned int (and a multiple of
* granularity, we made sure of that above already);
* nr is < max_discard_sectors;
* I don't need sector_div here, even though nr is sector_t */
nr = nr_sectors;
nr -= (unsigned int)nr % granularity;
if (nr) {
err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO);
nr_sectors -= nr;
start += nr;
}
}
zero_out:
if (nr_sectors) {
err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO,
(flags & EE_TRIM) ? 0 : BLKDEV_ZERO_NOUNMAP);
}
return err != 0;
}
static bool can_do_reliable_discards(struct drbd_device *device)
{
struct disk_conf *dc;
bool can_do;
if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
return false;
rcu_read_lock();
dc = rcu_dereference(device->ldev->disk_conf);
can_do = dc->discard_zeroes_if_aligned;
rcu_read_unlock();
return can_do;
}
static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
/* If the backend cannot discard, or does not guarantee
* read-back zeroes in discarded ranges, we fall back to
* zero-out. Unless configuration specifically requested
* otherwise. */
if (!can_do_reliable_discards(device))
peer_req->flags |= EE_ZEROOUT;
if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM)))
peer_req->flags |= EE_WAS_ERROR;
drbd_endio_write_sec_final(peer_req);
}
static int peer_request_fault_type(struct drbd_peer_request *peer_req)
{
if (peer_req_op(peer_req) == REQ_OP_READ) {
return peer_req->flags & EE_APPLICATION ?
DRBD_FAULT_DT_RD : DRBD_FAULT_RS_RD;
} else {
return peer_req->flags & EE_APPLICATION ?
DRBD_FAULT_DT_WR : DRBD_FAULT_RS_WR;
}
}
/**
* drbd_submit_peer_request()
* @peer_req: peer request
*
* May spread the pages to multiple bios,
* depending on bio_add_page restrictions.
*
* Returns 0 if all bios have been submitted,
* -ENOMEM if we could not allocate enough bios,
* -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
* single page to an empty bio (which should never happen and likely indicates
* that the lower level IO stack is in some way broken). This has been observed
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_peer_request *peer_req)
{
struct drbd_device *device = peer_req->peer_device->device;
struct bio *bios = NULL;
struct bio *bio;
struct page *page = peer_req->pages;
sector_t sector = peer_req->i.sector;
unsigned int data_size = peer_req->i.size;
unsigned int n_bios = 0;
unsigned int nr_pages = PFN_UP(data_size);
/* TRIM/DISCARD: for now, always use the helper function
* blkdev_issue_zeroout(..., discard=true).
* It's synchronous, but it does the right thing wrt. bio splitting.
* Correctness first, performance later. Next step is to code an
* asynchronous variant of the same.
*/
if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) {
/* wait for all pending IO completions, before we start
* zeroing things out. */
conn_wait_active_ee_empty(peer_req->peer_device->connection);
/* add it to the active list now,
* so we can find it to present it in debugfs */
peer_req->submit_jif = jiffies;
peer_req->flags |= EE_SUBMITTED;
/* If this was a resync request from receive_rs_deallocated(),
* it is already on the sync_ee list */
if (list_empty(&peer_req->w.list)) {
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
}
drbd_issue_peer_discard_or_zero_out(device, peer_req);
return 0;
}
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
* request in more than one bio.
*
* Plain bio_alloc is good enough here, this is no DRBD internally
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
/* _DISCARD, _WRITE_ZEROES handled above.
* REQ_OP_FLUSH (empty flush) not expected,
* should have been mapped to a "drbd protocol barrier".
* REQ_OP_SECURE_ERASE: I don't see how we could ever support that.
*/
if (!(peer_req_op(peer_req) == REQ_OP_WRITE ||
peer_req_op(peer_req) == REQ_OP_READ)) {
drbd_err(device, "Invalid bio op received: 0x%x\n", peer_req->opf);
return -EINVAL;
}
bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
bio->bi_next = bios;
bios = bio;
++n_bios;
page_chain_for_each(page) {
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
if (!bio_add_page(bio, page, len, 0))
goto next_bio;
data_size -= len;
sector += len >> 9;
--nr_pages;
}
D_ASSERT(device, data_size == 0);
D_ASSERT(device, page == NULL);
atomic_set(&peer_req->pending_bios, n_bios);
/* for debugfs: update timestamp, mark as submitted */
peer_req->submit_jif = jiffies;
peer_req->flags |= EE_SUBMITTED;
do {
bio = bios;
bios = bios->bi_next;
bio->bi_next = NULL;
drbd_submit_bio_noacct(device, peer_request_fault_type(peer_req), bio);
} while (bios);
return 0;
}
static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
struct drbd_peer_request *peer_req)
{
struct drbd_interval *i = &peer_req->i;
drbd_remove_interval(&device->write_requests, i);
drbd_clear_interval(i);
/* Wake up any processes waiting for this peer request to complete. */
if (i->waiting)
wake_up(&device->misc_wait);
}
static void conn_wait_active_ee_empty(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
drbd_wait_ee_list_empty(device, &device->active_ee);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
{
int rv;
struct p_barrier *p = pi->data;
struct drbd_epoch *epoch;
/* FIXME these are unacked on connection,
* not a specific (peer)device.
*/
connection->current_epoch->barrier_nr = p->barrier;
connection->current_epoch->connection = connection;
rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
switch (connection->resource->write_ordering) {
case WO_NONE:
if (rv == FE_RECYCLED)
return 0;
/* receiver context, in the writeout path of the other node.
* avoid potential distributed deadlock */
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
else
drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
fallthrough;
case WO_BDEV_FLUSH:
case WO_DRAIN_IO:
conn_wait_active_ee_empty(connection);
drbd_flush(connection);
if (atomic_read(&connection->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
}
return 0;
default:
drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
connection->resource->write_ordering);
return -EIO;
}
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
spin_lock(&connection->epoch_lock);
if (atomic_read(&connection->current_epoch->epoch_size)) {
list_add(&epoch->list, &connection->current_epoch->list);
connection->current_epoch = epoch;
connection->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
spin_unlock(&connection->epoch_lock);
return 0;
}
/* quick wrapper in case payload size != request_size (write same) */
static void drbd_csum_ee_size(struct crypto_shash *h,
struct drbd_peer_request *r, void *d,
unsigned int payload_size)
{
unsigned int tmp = r->i.size;
r->i.size = payload_size;
drbd_csum_ee(h, r, d);
r->i.size = tmp;
}
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data.
* data_size: actual payload ("data in")
* for normal writes that is bi_size.
* for discards, that is zero.
* for write same, it is logical_block_size.
* both trim and write same have the bi_size ("data len to be affected")
* as extra argument in the packet header.
*/
static struct drbd_peer_request *
read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct packet_info *pi) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
const sector_t capacity = get_capacity(device->vdisk);
struct drbd_peer_request *peer_req;
struct page *page;
int digest_size, err;
unsigned int data_size = pi->size, ds;
void *dig_in = peer_device->connection->int_dig_in;
void *dig_vv = peer_device->connection->int_dig_vv;
unsigned long *data;
struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL;
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
*/
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return NULL;
data_size -= digest_size;
}
/* assume request_size == data_size, but special case trim. */
ds = data_size;
if (trim) {
if (!expect(peer_device, data_size == 0))
return NULL;
ds = be32_to_cpu(trim->size);
} else if (zeroes) {
if (!expect(peer_device, data_size == 0))
return NULL;
ds = be32_to_cpu(zeroes->size);
}
if (!expect(peer_device, IS_ALIGNED(ds, 512)))
return NULL;
if (trim || zeroes) {
if (!expect(peer_device, ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
return NULL;
} else if (!expect(peer_device, ds <= DRBD_MAX_BIO_SIZE))
return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
if (sector + (ds>>9) > capacity) {
drbd_err(device, "request from peer beyond end of local disk: "
"capacity: %llus < sector: %llus + size: %u\n",
(unsigned long long)capacity,
(unsigned long long)sector, ds);
return NULL;
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
if (!peer_req)
return NULL;
peer_req->flags |= EE_WRITE;
if (trim) {
peer_req->flags |= EE_TRIM;
return peer_req;
}
if (zeroes) {
peer_req->flags |= EE_ZEROOUT;
return peer_req;
}
/* receive payload size bytes into page chain */
ds = data_size;
page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
err = drbd_recv_all_warn(peer_device->connection, data, len);
if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
drbd_err(device, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
if (err) {
drbd_free_peer_req(device, peer_req);
return NULL;
}
ds -= len;
}
if (digest_size) {
drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
if (memcmp(dig_in, dig_vv, digest_size)) {
drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
drbd_free_peer_req(device, peer_req);
return NULL;
}
}
device->recv_cnt += data_size >> 9;
return peer_req;
}
/* drbd_drain_block() just takes a data block
* out of the socket input buffer, and discards it.
*/
static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
{
struct page *page;
int err = 0;
void *data;
if (!data_size)
return 0;
page = drbd_alloc_pages(peer_device, 1, 1);
data = kmap(page);
while (data_size) {
unsigned int len = min_t(int, data_size, PAGE_SIZE);
err = drbd_recv_all_warn(peer_device->connection, data, len);
if (err)
break;
data_size -= len;
}
kunmap(page);
drbd_free_pages(peer_device->device, page, 0);
return err;
}
static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
sector_t sector, int data_size)
{
struct bio_vec bvec;
struct bvec_iter iter;
struct bio *bio;
int digest_size, err, expect;
void *dig_in = peer_device->connection->int_dig_in;
void *dig_vv = peer_device->connection->int_dig_vv;
digest_size = 0;
if (peer_device->connection->peer_integrity_tfm) {
digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return err;
data_size -= digest_size;
}
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
peer_device->device->recv_cnt += data_size>>9;
bio = req->master_bio;
D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, iter) {
void *mapped = bvec_kmap_local(&bvec);
expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
kunmap_local(mapped);
if (err)
return err;
data_size -= expect;
}
if (digest_size) {
drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, digest_size)) {
drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
return -EINVAL;
}
}
D_ASSERT(peer_device->device, data_size == 0);
return 0;
}
/*
* e_end_resync_block() is called in ack_sender context via
* drbd_finish_peer_reqs().
*/
static int e_end_resync_block(struct drbd_work *w, int unused)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
int err;
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
drbd_set_in_sync(peer_device, sector, peer_req->i.size);
err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
drbd_rs_failed_io(peer_device, sector, peer_req->i.size);
err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
}
dec_unacked(device);
return err;
}
static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
struct packet_info *pi) __releases(local)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
if (!peer_req)
goto fail;
dec_rs_pending(peer_device);
inc_unacked(device);
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
peer_req->w.cb = e_end_resync_block;
peer_req->opf = REQ_OP_WRITE;
peer_req->submit_jif = jiffies;
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->sync_ee);
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
if (drbd_submit_peer_request(peer_req) == 0)
return 0;
/* don't care for the reason here */
drbd_err(device, "submit failed, triggering re-connect\n");
spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
spin_unlock_irq(&device->resource->req_lock);
drbd_free_peer_req(device, peer_req);
fail:
put_ldev(device);
return -EIO;
}
static struct drbd_request *
find_request(struct drbd_device *device, struct rb_root *root, u64 id,
sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
/* Request object according to our peer */
req = (struct drbd_request *)(unsigned long)id;
if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
return req;
if (!missing_ok) {
drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
(unsigned long)id, (unsigned long long)sector);
}
return NULL;
}
static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct drbd_request *req;
sector_t sector;
int err;
struct p_data *p = pi->data;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
sector = be64_to_cpu(p->sector);
spin_lock_irq(&device->resource->req_lock);
req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
spin_unlock_irq(&device->resource->req_lock);
if (unlikely(!req))
return -EIO;
err = recv_dless_read(peer_device, req, sector, pi->size);
if (!err)
req_mod(req, DATA_RECEIVED, peer_device);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
return err;
}
static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
sector_t sector;
int err;
struct p_data *p = pi->data;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
sector = be64_to_cpu(p->sector);
D_ASSERT(device, p->block_id == ID_SYNCER);
if (get_ldev(device)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
* or in drbd_peer_request_endio. */
err = recv_resync_read(peer_device, sector, pi);
} else {
if (drbd_ratelimit())
drbd_err(device, "Can not write resync data to local disk.\n");
err = drbd_drain_block(peer_device, pi->size);
drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
}
atomic_add(pi->size >> 9, &device->rs_sect_in);
return err;
}
static void restart_conflicting_writes(struct drbd_device *device,
sector_t sector, int size)
{
struct drbd_interval *i;
struct drbd_request *req;
drbd_for_each_overlap(i, &device->write_requests, sector, size) {
if (!i->local)
continue;
req = container_of(i, struct drbd_request, i);
if (req->rq_state & RQ_LOCAL_PENDING ||
!(req->rq_state & RQ_POSTPONED))
continue;
/* as it is RQ_POSTPONED, this will cause it to
* be queued on the retry workqueue. */
__req_mod(req, CONFLICT_RESOLVED, NULL, NULL);
}
}
/*
* e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
*/
static int e_end_block(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
int err = 0, pcmd;
if (peer_req->flags & EE_SEND_WRITE_ACK) {
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
pcmd = (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T &&
peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
err = drbd_send_ack(peer_device, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
drbd_set_in_sync(peer_device, sector, peer_req->i.size);
} else {
err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
dec_unacked(device);
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (peer_req->flags & EE_IN_INTERVAL_TREE) {
spin_lock_irq(&device->resource->req_lock);
D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
drbd_remove_epoch_entry_interval(device, peer_req);
if (peer_req->flags & EE_RESTART_REQUESTS)
restart_conflicting_writes(device, sector, peer_req->i.size);
spin_unlock_irq(&device->resource->req_lock);
} else
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
return err;
}
static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
struct drbd_peer_device *peer_device = peer_req->peer_device;
int err;
err = drbd_send_ack(peer_device, ack, peer_req);
dec_unacked(peer_device->device);
return err;
}
static int e_send_superseded(struct drbd_work *w, int unused)
{
return e_send_ack(w, P_SUPERSEDED);
}
static int e_send_retry_write(struct drbd_work *w, int unused)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
struct drbd_connection *connection = peer_req->peer_device->connection;
return e_send_ack(w, connection->agreed_pro_version >= 100 ?
P_RETRY_WRITE : P_SUPERSEDED);
}
static bool seq_greater(u32 a, u32 b)
{
/*
* We assume 32-bit wrap-around here.
* For 24-bit wrap-around, we would have to shift:
* a <<= 8; b <<= 8;
*/
return (s32)a - (s32)b > 0;
}
static u32 seq_max(u32 a, u32 b)
{
return seq_greater(a, b) ? a : b;
}
static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
{
struct drbd_device *device = peer_device->device;
unsigned int newest_peer_seq;
if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
spin_lock(&device->peer_seq_lock);
newest_peer_seq = seq_max(device->peer_seq, peer_seq);
device->peer_seq = newest_peer_seq;
spin_unlock(&device->peer_seq_lock);
/* wake up only if we actually changed device->peer_seq */
if (peer_seq == newest_peer_seq)
wake_up(&device->seq_wait);
}
}
static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
{
return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
}
/* maybe change sync_ee into interval trees as well? */
static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
struct drbd_peer_request *rs_req;
bool rv = false;
spin_lock_irq(&device->resource->req_lock);
list_for_each_entry(rs_req, &device->sync_ee, w.list) {
if (overlaps(peer_req->i.sector, peer_req->i.size,
rs_req->i.sector, rs_req->i.size)) {
rv = true;
break;
}
}
spin_unlock_irq(&device->resource->req_lock);
return rv;
}
/* Called from receive_Data.
* Synchronize packets on sock with packets on msock.
*
* This is here so even when a P_DATA packet traveling via sock overtook an Ack
* packet traveling on msock, they are still processed in the order they have
* been sent.
*
* Note: we don't care for Ack packets overtaking P_DATA packets.
*
* In case packet_seq is larger than device->peer_seq number, there are
* outstanding packets on the msock. We wait for them to arrive.
* In case we are the logically next packet, we update device->peer_seq
* ourselves. Correctly handles 32bit wrap around.
*
* Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
* about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
* for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
* 1<<9 == 512 seconds aka ages for the 32bit wrap around...
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
{
struct drbd_device *device = peer_device->device;
DEFINE_WAIT(wait);
long timeout;
int ret = 0, tp;
if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
return 0;
spin_lock(&device->peer_seq_lock);
for (;;) {
if (!seq_greater(peer_seq - 1, device->peer_seq)) {
device->peer_seq = seq_max(device->peer_seq, peer_seq);
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
rcu_read_lock();
tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
rcu_read_unlock();
if (!tp)
break;
/* Only need to wait if two_primaries is enabled */
prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&device->peer_seq_lock);
rcu_read_lock();
timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
rcu_read_unlock();
timeout = schedule_timeout(timeout);
spin_lock(&device->peer_seq_lock);
if (!timeout) {
ret = -ETIMEDOUT;
drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
spin_unlock(&device->peer_seq_lock);
finish_wait(&device->seq_wait, &wait);
return ret;
}
static enum req_op wire_flags_to_bio_op(u32 dpf)
{
if (dpf & DP_ZEROES)
return REQ_OP_WRITE_ZEROES;
if (dpf & DP_DISCARD)
return REQ_OP_DISCARD;
else
return REQ_OP_WRITE;
}
/* see also bio_flags_to_wire() */
static blk_opf_t wire_flags_to_bio(struct drbd_connection *connection, u32 dpf)
{
return wire_flags_to_bio_op(dpf) |
(dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
}
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
unsigned int size)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_interval *i;
repeat:
drbd_for_each_overlap(i, &device->write_requests, sector, size) {
struct drbd_request *req;
struct bio_and_error m;
if (!i->local)
continue;
req = container_of(i, struct drbd_request, i);
if (!(req->rq_state & RQ_POSTPONED))
continue;
req->rq_state &= ~RQ_POSTPONED;
__req_mod(req, NEG_ACKED, peer_device, &m);
spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
complete_master_bio(device, &m);
spin_lock_irq(&device->resource->req_lock);
goto repeat;
}
}
static int handle_write_conflicts(struct drbd_device *device,
struct drbd_peer_request *peer_req)
{
struct drbd_connection *connection = peer_req->peer_device->connection;
bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
sector_t sector = peer_req->i.sector;
const unsigned int size = peer_req->i.size;
struct drbd_interval *i;
bool equal;
int err;
/*
* Inserting the peer request into the write_requests tree will prevent
* new conflicting local requests from being added.
*/
drbd_insert_interval(&device->write_requests, &peer_req->i);
repeat:
drbd_for_each_overlap(i, &device->write_requests, sector, size) {
if (i == &peer_req->i)
continue;
if (i->completed)
continue;
if (!i->local) {
/*
* Our peer has sent a conflicting remote request; this
* should not happen in a two-node setup. Wait for the
* earlier peer request to complete.
*/
err = drbd_wait_misc(device, i);
if (err)
goto out;
goto repeat;
}
equal = i->sector == sector && i->size == size;
if (resolve_conflicts) {
/*
* If the peer request is fully contained within the
* overlapping request, it can be considered overwritten
* and thus superseded; otherwise, it will be retried
* once all overlapping requests have completed.
*/
bool superseded = i->sector <= sector && i->sector +
(i->size >> 9) >= sector + (size >> 9);
if (!equal)
drbd_alert(device, "Concurrent writes detected: "
"local=%llus +%u, remote=%llus +%u, "
"assuming %s came first\n",
(unsigned long long)i->sector, i->size,
(unsigned long long)sector, size,
superseded ? "local" : "remote");
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
list_add_tail(&peer_req->w.list, &device->done_ee);
queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
err = -ENOENT;
goto out;
} else {
struct drbd_request *req =
container_of(i, struct drbd_request, i);
if (!equal)
drbd_alert(device, "Concurrent writes detected: "
"local=%llus +%u, remote=%llus +%u\n",
(unsigned long long)i->sector, i->size,
(unsigned long long)sector, size);
if (req->rq_state & RQ_LOCAL_PENDING ||
!(req->rq_state & RQ_POSTPONED)) {
/*
* Wait for the node with the discard flag to
* decide if this request has been superseded
* or needs to be retried.
* Requests that have been superseded will
* disappear from the write_requests tree.
*
* In addition, wait for the conflicting
* request to finish locally before submitting
* the conflicting peer request.
*/
err = drbd_wait_misc(device, &req->i);
if (err) {
_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
fail_postponed_requests(device, sector, size);
goto out;
}
goto repeat;
}
/*
* Remember to restart the conflicting requests after
* the new peer request has completed.
*/
peer_req->flags |= EE_RESTART_REQUESTS;
}
}
err = 0;
out:
if (err)
drbd_remove_epoch_entry_interval(device, peer_req);
return err;
}
/* mirrored write */
static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct net_conf *nc;
sector_t sector;
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
u32 dp_flags;
int err, tp;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
if (!get_ldev(device)) {
int err2;
err = wait_for_and_update_peer_seq(peer_device, peer_seq);
drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
atomic_inc(&connection->current_epoch->epoch_size);
err2 = drbd_drain_block(peer_device, pi->size);
if (!err)
err = err2;
return err;
}
/*
* Corresponding put_ldev done either below (on various errors), or in
* drbd_peer_request_endio, if we successfully submit the data at the
* end of this function.
*/
sector = be64_to_cpu(p->sector);
peer_req = read_in_block(peer_device, p->block_id, sector, pi);
if (!peer_req) {
put_ldev(device);
return -EIO;
}
peer_req->w.cb = e_end_block;
peer_req->submit_jif = jiffies;
peer_req->flags |= EE_APPLICATION;
dp_flags = be32_to_cpu(p->dp_flags);
peer_req->opf = wire_flags_to_bio(connection, dp_flags);
if (pi->cmd == P_TRIM) {
D_ASSERT(peer_device, peer_req->i.size > 0);
D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_DISCARD);
D_ASSERT(peer_device, peer_req->pages == NULL);
/* need to play safe: an older DRBD sender
* may mean zero-out while sending P_TRIM. */
if (0 == (connection->agreed_features & DRBD_FF_WZEROES))
peer_req->flags |= EE_ZEROOUT;
} else if (pi->cmd == P_ZEROES) {
D_ASSERT(peer_device, peer_req->i.size > 0);
D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_WRITE_ZEROES);
D_ASSERT(peer_device, peer_req->pages == NULL);
/* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */
if (dp_flags & DP_DISCARD)
peer_req->flags |= EE_TRIM;
} else if (peer_req->pages == NULL) {
D_ASSERT(device, peer_req->i.size == 0);
D_ASSERT(device, dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
peer_req->flags |= EE_MAY_SET_IN_SYNC;
spin_lock(&connection->epoch_lock);
peer_req->epoch = connection->current_epoch;
atomic_inc(&peer_req->epoch->epoch_size);
atomic_inc(&peer_req->epoch->active);
spin_unlock(&connection->epoch_lock);
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
tp = nc->two_primaries;
if (peer_device->connection->agreed_pro_version < 100) {
switch (nc->wire_protocol) {
case DRBD_PROT_C:
dp_flags |= DP_SEND_WRITE_ACK;
break;
case DRBD_PROT_B:
dp_flags |= DP_SEND_RECEIVE_ACK;
break;
}
}
rcu_read_unlock();
if (dp_flags & DP_SEND_WRITE_ACK) {
peer_req->flags |= EE_SEND_WRITE_ACK;
inc_unacked(device);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
}
if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
}
if (tp) {
/* two primaries implies protocol C */
D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
peer_req->flags |= EE_IN_INTERVAL_TREE;
err = wait_for_and_update_peer_seq(peer_device, peer_seq);
if (err)
goto out_interrupted;
spin_lock_irq(&device->resource->req_lock);
err = handle_write_conflicts(device, peer_req);
if (err) {
spin_unlock_irq(&device->resource->req_lock);
if (err == -ENOENT) {
put_ldev(device);
return 0;
}
goto out_interrupted;
}
} else {
update_peer_seq(peer_device, peer_seq);
spin_lock_irq(&device->resource->req_lock);
}
/* TRIM and is processed synchronously,
* we wait for all pending requests, respectively wait for
* active_ee to become empty in drbd_submit_peer_request();
* better not add ourselves here. */
if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0)
list_add_tail(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
if (device->state.conn == C_SYNC_TARGET)
wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
if (device->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
drbd_al_begin_io(device, &peer_req->i);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
}
err = drbd_submit_peer_request(peer_req);
if (!err)
return 0;
/* don't care for the reason here */
drbd_err(device, "submit failed, triggering re-connect\n");
spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
drbd_remove_epoch_entry_interval(device, peer_req);
spin_unlock_irq(&device->resource->req_lock);
if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
drbd_al_complete_io(device, &peer_req->i);
}
out_interrupted:
drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
put_ldev(device);
drbd_free_peer_req(device, peer_req);
return err;
}
/* We may throttle resync, if the lower device seems to be busy,
* and current sync rate is above c_min_rate.
*
* To decide whether or not the lower device is busy, we use a scheme similar
* to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
* (more than 64 sectors) of activity we cannot account for with our own resync
* activity, it obviously is "busy".
*
* The current sync rate used here uses only the most recent two step marks,
* to have a short time average so we can react faster.
*/
bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
bool throttle_if_app_is_waiting)
{
struct drbd_device *device = peer_device->device;
struct lc_element *tmp;
bool throttle = drbd_rs_c_min_rate_throttle(device);
if (!throttle || throttle_if_app_is_waiting)
return throttle;
spin_lock_irq(&device->al_lock);
tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
if (tmp) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_PRIORITY, &bm_ext->flags))
throttle = false;
/* Do not slow down if app IO is already waiting for this extent,
* and our progress is necessary for application IO to complete. */
}
spin_unlock_irq(&device->al_lock);
return throttle;
}
bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
{
struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
unsigned long db, dt, dbdt;
unsigned int c_min_rate;
int curr_events;
rcu_read_lock();
c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
rcu_read_unlock();
/* feature disabled? */
if (c_min_rate == 0)
return false;
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
|| curr_events - device->rs_last_events > 64) {
unsigned long rs_left;
int i;
device->rs_last_events = curr_events;
/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
* approx. */
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
rs_left = device->ov_left;
else
rs_left = drbd_bm_total_weight(device) - device->rs_failed;
dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
if (dbdt > c_min_rate)
return true;
}
return false;
}
static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
sector_t sector;
sector_t capacity;
struct drbd_peer_request *peer_req;
struct digest_info *di = NULL;
int size, verb;
struct p_block_req *p = pi->data;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
capacity = get_capacity(device->vdisk);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
return -EINVAL;
}
if (sector + (size>>9) > capacity) {
drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
return -EINVAL;
}
if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
verb = 1;
switch (pi->cmd) {
case P_DATA_REQUEST:
drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
break;
case P_RS_THIN_REQ:
case P_RS_DATA_REQUEST:
case P_CSUM_RS_REQUEST:
case P_OV_REQUEST:
drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
break;
case P_OV_REPLY:
verb = 0;
dec_rs_pending(peer_device);
drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
BUG();
}
if (verb && drbd_ratelimit())
drbd_err(device, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
return drbd_drain_block(peer_device, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
size, GFP_NOIO);
if (!peer_req) {
put_ldev(device);
return -ENOMEM;
}
peer_req->opf = REQ_OP_READ;
switch (pi->cmd) {
case P_DATA_REQUEST:
peer_req->w.cb = w_e_end_data_req;
/* application IO, don't drbd_rs_begin_io */
peer_req->flags |= EE_APPLICATION;
goto submit;
case P_RS_THIN_REQ:
/* If at some point in the future we have a smart way to
find out if this data block is completely deallocated,
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
fallthrough;
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
/* used in the sector offset progress display */
device->bm_resync_fo = BM_SECT_TO_BIT(sector);
break;
case P_OV_REPLY:
case P_CSUM_RS_REQUEST:
di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
if (!di)
goto out_free_e;
di->digest_size = pi->size;
di->digest = (((char *)di)+sizeof(struct digest_info));
peer_req->digest = di;
peer_req->flags |= EE_HAS_DIGEST;
if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
goto out_free_e;
if (pi->cmd == P_CSUM_RS_REQUEST) {
D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
device->bm_resync_fo = BM_SECT_TO_BIT(sector);
/* remember to report stats in drbd_resync_finished */
device->use_csums = true;
} else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
atomic_add(size >> 9, &device->rs_sect_in);
peer_req->w.cb = w_e_end_ov_reply;
dec_rs_pending(peer_device);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
goto submit_for_resync;
}
break;
case P_OV_REQUEST:
if (device->ov_start_sector == ~(sector_t)0 &&
peer_device->connection->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
device->ov_start_sector = sector;
device->ov_position = sector;
device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
device->rs_total = device->ov_left;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
device->rs_mark_left[i] = device->ov_left;
device->rs_mark_time[i] = now;
}
drbd_info(device, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
peer_req->w.cb = w_e_end_ov_req;
break;
default:
BUG();
}
/* Throttle, drbd_rs_begin_io and submit should become asynchronous
* wrt the receiver, but it is not as straightforward as it may seem.
* Various places in the resync start and stop logic assume resync
* requests are processed in order, requeuing this on the worker thread
* introduces a bunch of new code for synchronization between threads.
*
* Unlimited throttling before drbd_rs_begin_io may stall the resync
* "forever", throttling after drbd_rs_begin_io will lock that extent
* for application writes for the same time. For now, just throttle
* here, where the rest of the code expects the receiver to sleep for
* a while, anyways.
*/
/* Throttle before drbd_rs_begin_io, as that locks out application IO;
* this defers syncer requests for some time, before letting at least
* on request through. The resync controller on the receiving side
* will adapt to the incoming rate accordingly.
*
* We cannot throttle here if remote is Primary/SyncTarget:
* we would also throttle its application reads.
* In that case, throttling is done on the SyncTarget only.
*/
/* Even though this may be a resync request, we do add to "read_ee";
* "sync_ee" is only used for resync WRITEs.
* Add to list early, so debugfs can find this request
* even if we have to sleep below. */
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->read_ee);
spin_unlock_irq(&device->resource->req_lock);
update_receiver_timing_details(connection, drbd_rs_should_slow_down);
if (device->state.peer != R_PRIMARY
&& drbd_rs_should_slow_down(peer_device, sector, false))
schedule_timeout_uninterruptible(HZ/10);
update_receiver_timing_details(connection, drbd_rs_begin_io);
if (drbd_rs_begin_io(device, sector))
goto out_free_e;
submit_for_resync:
atomic_add(size >> 9, &device->rs_sect_ev);
submit:
update_receiver_timing_details(connection, drbd_submit_peer_request);
inc_unacked(device);
if (drbd_submit_peer_request(peer_req) == 0)
return 0;
/* don't care for the reason here */
drbd_err(device, "submit failed, triggering re-connect\n");
out_free_e:
spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
spin_unlock_irq(&device->resource->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
put_ldev(device);
drbd_free_peer_req(device, peer_req);
return -EIO;
}
/*
* drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
*/
static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
enum drbd_after_sb_p after_sb_0p;
self = device->ldev->md.uuid[UI_BITMAP] & 1;
peer = device->p_uuid[UI_BITMAP] & 1;
ch_peer = device->p_uuid[UI_SIZE];
ch_self = device->comm_bm_set;
rcu_read_lock();
after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
rcu_read_unlock();
switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
case ASB_VIOLENTLY:
drbd_err(device, "Configuration error.\n");
break;
case ASB_DISCONNECT:
break;
case ASB_DISCARD_YOUNGER_PRI:
if (self == 0 && peer == 1) {
rv = -1;
break;
}
if (self == 1 && peer == 0) {
rv = 1;
break;
}
fallthrough; /* to one of the other strategies */
case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
break;
}
if (self == 1 && peer == 0) {
rv = -1;
break;
}
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
fallthrough;
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
? -1 : 1;
break;
} else {
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
fallthrough;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
else if (ch_self > ch_peer)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
rv = -1;
break;
case ASB_DISCARD_REMOTE:
rv = 1;
}
return rv;
}
/*
* drbd_asb_recover_1p - Recover after split-brain with one remaining primary
*/
static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
int hg, rv = -100;
enum drbd_after_sb_p after_sb_1p;
rcu_read_lock();
after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
rcu_read_unlock();
switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
case ASB_DISCARD_ZERO_CHG:
drbd_err(device, "Configuration error.\n");
break;
case ASB_DISCONNECT:
break;
case ASB_CONSENSUS:
hg = drbd_asb_recover_0p(peer_device);
if (hg == -1 && device->state.role == R_SECONDARY)
rv = hg;
if (hg == 1 && device->state.role == R_PRIMARY)
rv = hg;
break;
case ASB_VIOLENTLY:
rv = drbd_asb_recover_0p(peer_device);
break;
case ASB_DISCARD_SECONDARY:
return device->state.role == R_PRIMARY ? 1 : -1;
case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(peer_device);
if (hg == -1 && device->state.role == R_PRIMARY) {
enum drbd_state_rv rv2;
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
if (rv2 != SS_SUCCESS) {
drbd_khelper(device, "pri-lost-after-sb");
} else {
drbd_warn(device, "Successfully gave up primary role.\n");
rv = hg;
}
} else
rv = hg;
}
return rv;
}
/*
* drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
*/
static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
int hg, rv = -100;
enum drbd_after_sb_p after_sb_2p;
rcu_read_lock();
after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
rcu_read_unlock();
switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_DISCARD_ZERO_CHG:
drbd_err(device, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
rv = drbd_asb_recover_0p(peer_device);
break;
case ASB_DISCONNECT:
break;
case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(peer_device);
if (hg == -1) {
enum drbd_state_rv rv2;
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
if (rv2 != SS_SUCCESS) {
drbd_khelper(device, "pri-lost-after-sb");
} else {
drbd_warn(device, "Successfully gave up primary role.\n");
rv = hg;
}
} else
rv = hg;
}
return rv;
}
static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
u64 bits, u64 flags)
{
if (!uuid) {
drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
return;
}
drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
(unsigned long long)uuid[UI_HISTORY_START],
(unsigned long long)uuid[UI_HISTORY_END],
(unsigned long long)bits,
(unsigned long long)flags);
}
/*
100 after split brain try auto recover
2 C_SYNC_SOURCE set BitMap
1 C_SYNC_SOURCE use BitMap
0 no Sync
-1 C_SYNC_TARGET use BitMap
-2 C_SYNC_TARGET set BitMap
-100 after split brain, disconnect
-1000 unrelated data
-1091 requires proto 91
-1096 requires proto 96
*/
static int drbd_uuid_compare(struct drbd_peer_device *const peer_device,
enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
{
struct drbd_connection *const connection = peer_device->connection;
struct drbd_device *device = peer_device->device;
u64 self, peer;
int i, j;
self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
*rule_nr = 10;
if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
return 0;
*rule_nr = 20;
if ((self == UUID_JUST_CREATED || self == (u64)0) &&
peer != UUID_JUST_CREATED)
return -2;
*rule_nr = 30;
if (self != UUID_JUST_CREATED &&
(peer == UUID_JUST_CREATED || peer == (u64)0))
return 2;
if (self == peer) {
int rct, dc; /* roles at crash time */
if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
if (connection->agreed_pro_version < 91)
return -1091;
if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
device->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(device, "self", device->ldev->md.uuid,
device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
*rule_nr = 34;
} else {
drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
*rule_nr = 36;
}
return 1;
}
if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
if (connection->agreed_pro_version < 91)
return -1091;
if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
(device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
device->p_uuid[UI_BITMAP] = 0UL;
drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
*rule_nr = 35;
} else {
drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
*rule_nr = 37;
}
return -1;
}
/* Common power [off|failure] */
rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
(device->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
*rule_nr = 40;
/* Neither has the "crashed primary" flag set,
* only a replication link hickup. */
if (rct == 0)
return 0;
/* Current UUID equal and no bitmap uuid; does not necessarily
* mean this was a "simultaneous hard crash", maybe IO was
* frozen, so no UUID-bump happened.
* This is a protocol change, overload DRBD_FF_WSAME as flag
* for "new-enough" peer DRBD version. */
if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
*rule_nr = 41;
if (!(connection->agreed_features & DRBD_FF_WSAME)) {
drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
}
if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
/* At least one has the "crashed primary" bit set,
* both are primary now, but neither has rotated its UUIDs?
* "Can not happen." */
drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
return -100;
}
if (device->state.role == R_PRIMARY)
return 1;
return -1;
}
/* Both are secondary.
* Really looks like recovery from simultaneous hard crash.
* Check which had been primary before, and arbitrate. */
switch (rct) {
case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
return dc ? -1 : 1;
}
}
*rule_nr = 50;
peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer)
return -1;
*rule_nr = 51;
peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
if (connection->agreed_pro_version < 96 ?
(device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
if (connection->agreed_pro_version < 91)
return -1091;
device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
drbd_info(device, "Lost last syncUUID packet, corrected:\n");
drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
return -1;
}
}
*rule_nr = 60;
self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
peer = device->p_uuid[i] & ~((u64)1);
if (self == peer)
return -2;
}
*rule_nr = 70;
self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
if (self == peer)
return 1;
*rule_nr = 71;
self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
if (connection->agreed_pro_version < 96 ?
(device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
if (connection->agreed_pro_version < 91)
return -1091;
__drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
__drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
drbd_info(device, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(device, "self", device->ldev->md.uuid,
device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
return 1;
}
}
*rule_nr = 80;
peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
self = device->ldev->md.uuid[i] & ~((u64)1);
if (self == peer)
return 2;
}
*rule_nr = 90;
self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer && self != ((u64)0))
return 100;
*rule_nr = 100;
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
self = device->ldev->md.uuid[i] & ~((u64)1);
for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
peer = device->p_uuid[j] & ~((u64)1);
if (self == peer)
return -100;
}
}
return -1000;
}
/* drbd_sync_handshake() returns the new conn state on success, or
CONN_MASK (-1) on failure.
*/
static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
struct net_conf *nc;
int hg, rule_nr, rr_conflict, tentative, always_asbp;
mydisk = device->state.disk;
if (mydisk == D_NEGOTIATING)
mydisk = device->new_state_tmp.disk;
drbd_info(device, "drbd_sync_handshake:\n");
spin_lock_irq(&device->ldev->md.uuid_lock);
drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
drbd_uuid_dump(device, "peer", device->p_uuid,
device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(peer_device, peer_role, &rule_nr);
spin_unlock_irq(&device->ldev->md.uuid_lock);
drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
if (hg == -1000) {
drbd_alert(device, "Unrelated data, aborting!\n");
return C_MASK;
}
if (hg < -0x10000) {
int proto, fflags;
hg = -hg;
proto = hg & 0xff;
fflags = (hg >> 8) & 0xff;
drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
proto, fflags);
return C_MASK;
}
if (hg < -1000) {
drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
return C_MASK;
}
if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
(peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
int f = (hg == -100) || abs(hg) == 2;
hg = mydisk > D_INCONSISTENT ? 1 : -1;
if (f)
hg = hg*2;
drbd_info(device, "Becoming sync %s due to disk states.\n",
hg > 0 ? "source" : "target");
}
if (abs(hg) == 100)
drbd_khelper(device, "initial-split-brain");
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
always_asbp = nc->always_asbp;
rr_conflict = nc->rr_conflict;
tentative = nc->tentative;
rcu_read_unlock();
if (hg == 100 || (hg == -100 && always_asbp)) {
int pcount = (device->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
switch (pcount) {
case 0:
hg = drbd_asb_recover_0p(peer_device);
break;
case 1:
hg = drbd_asb_recover_1p(peer_device);
break;
case 2:
hg = drbd_asb_recover_2p(peer_device);
break;
}
if (abs(hg) < 100) {
drbd_warn(device, "Split-Brain detected, %d primaries, "
"automatically solved. Sync from %s node\n",
pcount, (hg < 0) ? "peer" : "this");
if (forced) {
drbd_warn(device, "Doing a full sync, since"
" UUIDs where ambiguous.\n");
hg = hg*2;
}
}
}
if (hg == -100) {
if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
hg = -1;
if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
drbd_warn(device, "Split-Brain detected, manually solved. "
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
* after an attempted attach on a diskless node.
* We just refuse to attach -- well, we drop the "connection"
* to that disk, in a way... */
drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
drbd_khelper(device, "split-brain");
return C_MASK;
}
if (hg > 0 && mydisk <= D_INCONSISTENT) {
drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
return C_MASK;
}
if (hg < 0 && /* by intention we do not use mydisk here. */
device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(device, "pri-lost");
fallthrough;
case ASB_DISCONNECT:
drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
return C_MASK;
case ASB_VIOLENTLY:
drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
"assumption\n");
}
}
if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
if (hg == 0)
drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
else
drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
abs(hg) >= 2 ? "full" : "bit-map based");
return C_MASK;
}
if (abs(hg) >= 2) {
drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
BM_LOCKED_SET_ALLOWED, NULL))
return C_MASK;
}
if (hg > 0) { /* become sync source. */
rv = C_WF_BITMAP_S;
} else if (hg < 0) { /* become sync target */
rv = C_WF_BITMAP_T;
} else {
rv = C_CONNECTED;
if (drbd_bm_total_weight(device)) {
drbd_info(device, "No resync, but %lu bits in bitmap!\n",
drbd_bm_total_weight(device));
}
}
return rv;
}
static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
{
/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
if (peer == ASB_DISCARD_REMOTE)
return ASB_DISCARD_LOCAL;
/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
if (peer == ASB_DISCARD_LOCAL)
return ASB_DISCARD_REMOTE;
/* everything else is valid if they are equal on both sides. */
return peer;
}
static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_protocol *p = pi->data;
enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
int p_proto, p_discard_my_data, p_two_primaries, cf;
struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
char integrity_alg[SHARED_SECRET_MAX] = "";
struct crypto_shash *peer_integrity_tfm = NULL;
void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
p_two_primaries = be32_to_cpu(p->two_primaries);
cf = be32_to_cpu(p->conn_flags);
p_discard_my_data = cf & CF_DISCARD_MY_DATA;
if (connection->agreed_pro_version >= 87) {
int err;
if (pi->size > sizeof(integrity_alg))
return -EIO;
err = drbd_recv_all(connection, integrity_alg, pi->size);
if (err)
return err;
integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
if (pi->cmd != P_PROTOCOL_UPDATE) {
clear_bit(CONN_DRY_RUN, &connection->flags);
if (cf & CF_DRY_RUN)
set_bit(CONN_DRY_RUN, &connection->flags);
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (p_proto != nc->wire_protocol) {
drbd_err(connection, "incompatible %s settings\n", "protocol");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
goto disconnect_rcu_unlock;
}
if (p_discard_my_data && nc->discard_my_data) {
drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
goto disconnect_rcu_unlock;
}
if (p_two_primaries != nc->two_primaries) {
drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
goto disconnect_rcu_unlock;
}
if (strcmp(integrity_alg, nc->integrity_alg)) {
drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
goto disconnect_rcu_unlock;
}
rcu_read_unlock();
}
if (integrity_alg[0]) {
int hash_size;
/*
* We can only change the peer data integrity algorithm
* here. Changing our own data integrity algorithm
* requires that we send a P_PROTOCOL_UPDATE packet at
* the same time; otherwise, the peer has no way to
* tell between which packets the algorithm should
* change.
*/
peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0);
if (IS_ERR(peer_integrity_tfm)) {
peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
}
hash_size = crypto_shash_digestsize(peer_integrity_tfm);
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
goto disconnect;
}
}
new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf)
goto disconnect;
mutex_lock(&connection->data.mutex);
mutex_lock(&connection->resource->conf_update);
old_net_conf = connection->net_conf;
*new_net_conf = *old_net_conf;
new_net_conf->wire_protocol = p_proto;
new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
new_net_conf->two_primaries = p_two_primaries;
rcu_assign_pointer(connection->net_conf, new_net_conf);
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->peer_integrity_tfm = peer_integrity_tfm;
connection->int_dig_in = int_dig_in;
connection->int_dig_vv = int_dig_vv;
if (strcmp(old_net_conf->integrity_alg, integrity_alg))
drbd_info(connection, "peer data-integrity-alg: %s\n",
integrity_alg[0] ? integrity_alg : "(none)");
kvfree_rcu_mightsleep(old_net_conf);
return 0;
disconnect_rcu_unlock:
rcu_read_unlock();
disconnect:
crypto_free_shash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
/* helper function
* input: alg name, feature name
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
static struct crypto_shash *drbd_crypto_alloc_digest_safe(
const struct drbd_device *device,
const char *alg, const char *name)
{
struct crypto_shash *tfm;
if (!alg[0])
return NULL;
tfm = crypto_alloc_shash(alg, 0, 0);
if (IS_ERR(tfm)) {
drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
return tfm;
}
return tfm;
}
static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
{
void *buffer = connection->data.rbuf;
int size = pi->size;
while (size) {
int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
s = drbd_recv(connection, buffer, s);
if (s <= 0) {
if (s < 0)
return s;
break;
}
size -= s;
}
if (size)
return -EIO;
return 0;
}
/*
* config_unknown_volume - device configuration command for unknown volume
*
* When a device is added to an existing connection, the node on which the
* device is added first will send configuration commands to its peer but the
* peer will not know about the device yet. It will warn and ignore these
* commands. Once the device is added on the second node, the second node will
* send the same device configuration commands, but in the other direction.
*
* (We can also end up here if drbd is misconfigured.)
*/
static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
{
drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
cmdname(pi->cmd), pi->vnr);
return ignore_remaining_packet(connection, pi);
}
static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_shash *verify_tfm = NULL;
struct crypto_shash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
unsigned int fifo_size = 0;
int err;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
+ SHARED_SECRET_MAX
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
if (pi->size > exp_max_sz) {
drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
pi->size, exp_max_sz);
return -EIO;
}
if (apv <= 88) {
header_size = sizeof(struct p_rs_param);
data_size = pi->size - header_size;
} else if (apv <= 94) {
header_size = sizeof(struct p_rs_param_89);
data_size = pi->size - header_size;
D_ASSERT(device, data_size == 0);
} else {
header_size = sizeof(struct p_rs_param_95);
data_size = pi->size - header_size;
D_ASSERT(device, data_size == 0);
}
/* initialize verify_alg and csums_alg */
p = pi->data;
BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
memset(&p->algs, 0, sizeof(p->algs));
err = drbd_recv_all(peer_device->connection, p, header_size);
if (err)
return err;
mutex_lock(&connection->resource->conf_update);
old_net_conf = peer_device->connection->net_conf;
if (get_ldev(device)) {
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
put_ldev(device);
mutex_unlock(&connection->resource->conf_update);
drbd_err(device, "Allocation of new disk_conf failed\n");
return -ENOMEM;
}
old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
}
if (apv >= 88) {
if (apv == 88) {
if (data_size > SHARED_SECRET_MAX || data_size == 0) {
drbd_err(device, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
goto reconnect;
}
err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
if (err)
goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
D_ASSERT(device, p->verify_alg[data_size-1] == 0);
p->verify_alg[data_size-1] = 0;
} else /* apv >= 89 */ {
/* we still expect NUL terminated strings */
/* but just in case someone tries to be evil */
D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
p->verify_alg[SHARED_SECRET_MAX-1] = 0;
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
if (device->state.conn == C_WF_REPORT_PARAMS) {
drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
verify_tfm = drbd_crypto_alloc_digest_safe(device,
p->verify_alg, "verify-alg");
if (IS_ERR(verify_tfm)) {
verify_tfm = NULL;
goto disconnect;
}
}
if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
if (device->state.conn == C_WF_REPORT_PARAMS) {
drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
csums_tfm = drbd_crypto_alloc_digest_safe(device,
p->csums_alg, "csums-alg");
if (IS_ERR(csums_tfm)) {
csums_tfm = NULL;
goto disconnect;
}
}
if (apv > 94 && new_disk_conf) {
new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
if (fifo_size != device->rs_plan_s->size) {
new_plan = fifo_alloc(fifo_size);
if (!new_plan) {
drbd_err(device, "kmalloc of fifo_buffer failed");
put_ldev(device);
goto disconnect;
}
}
}
if (verify_tfm || csums_tfm) {
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf)
goto disconnect;
*new_net_conf = *old_net_conf;
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
crypto_free_shash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
crypto_free_shash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm;
drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
rcu_assign_pointer(connection->net_conf, new_net_conf);
}
}
if (new_disk_conf) {
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
put_ldev(device);
}
if (new_plan) {
old_plan = device->rs_plan_s;
rcu_assign_pointer(device->rs_plan_s, new_plan);
}
mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
if (new_net_conf)
kfree(old_net_conf);
kfree(old_disk_conf);
kfree(old_plan);
return 0;
reconnect:
if (new_disk_conf) {
put_ldev(device);
kfree(new_disk_conf);
}
mutex_unlock(&connection->resource->conf_update);
return -EIO;
disconnect:
kfree(new_plan);
if (new_disk_conf) {
put_ldev(device);
kfree(new_disk_conf);
}
mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_shash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_shash(verify_tfm);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
static void warn_if_differ_considerably(struct drbd_device *device,
const char *s, sector_t a, sector_t b)
{
sector_t d;
if (a == 0 || b == 0)
return;
d = (a > b) ? (a - b) : (b - a);
if (d > (a>>3) || d > (b>>3))
drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
(unsigned long long)a, (unsigned long long)b);
}
static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_sizes *p = pi->data;
struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, p_csize, my_usize;
sector_t new_size, cur_size;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
cur_size = get_capacity(device->vdisk);
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
p_csize = be64_to_cpu(p->c_size);
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
device->p_size = p_size;
if (get_ldev(device)) {
rcu_read_lock();
my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
warn_if_differ_considerably(device, "lower level device sizes",
p_size, drbd_get_max_capacity(device->ldev));
warn_if_differ_considerably(device, "user requested size",
p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
if (device->state.conn == C_WF_REPORT_PARAMS)
p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect,
* or "attach" on the peer.
* But allow online shrinking if we are connected. */
new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
if (new_size < cur_size &&
device->state.disk >= D_OUTDATED &&
(device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
(unsigned long long)new_size, (unsigned long long)cur_size);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(device);
return -EIO;
}
if (my_usize != p_usize) {
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
put_ldev(device);
return -ENOMEM;
}
mutex_lock(&connection->resource->conf_update);
old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = p_usize;
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&connection->resource->conf_update);
kvfree_rcu_mightsleep(old_disk_conf);
drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
(unsigned long)p_usize, (unsigned long)my_usize);
}
put_ldev(device);
}
device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
/* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
In case we cleared the QUEUE_FLAG_DISCARD from our queue in
drbd_reconsider_queue_parameters(), we can be sure that after
drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(device)) {
drbd_reconsider_queue_parameters(device, device->ldev, o);
dd = drbd_determine_dev_size(device, ddsf, NULL);
put_ldev(device);
if (dd == DS_ERROR)
return -EIO;
drbd_md_sync(device);
} else {
/*
* I am diskless, need to accept the peer's *current* size.
* I must NOT accept the peers backing disk size,
* it may have been larger than mine all along...
*
* At this point, the peer knows more about my disk, or at
* least about what we last agreed upon, than myself.
* So if his c_size is less than his d_size, the most likely
* reason is that *my* d_size was smaller last time we checked.
*
* However, if he sends a zero current size,
* take his (user-capped or) backing disk size anyways.
*
* Unless of course he does not have a disk himself.
* In which case we ignore this completely.
*/
sector_t new_size = p_csize ?: p_usize ?: p_size;
drbd_reconsider_queue_parameters(device, NULL, o);
if (new_size == 0) {
/* Ignore, peer does not know nothing. */
} else if (new_size == cur_size) {
/* nothing to do */
} else if (cur_size != 0 && p_size == 0) {
drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
(unsigned long long)new_size, (unsigned long long)cur_size);
} else if (new_size < cur_size && device->state.role == R_PRIMARY) {
drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
(unsigned long long)new_size, (unsigned long long)cur_size);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
} else {
/* I believe the peer, if
* - I don't have a current size myself
* - we agree on the size anyways
* - I do have a current size, am Secondary,
* and he has the only disk
* - I do have a current size, am Primary,
* and he has the only disk,
* which is larger than my current size
*/
drbd_set_my_capacity(device, new_size);
}
}
if (get_ldev(device)) {
if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
ldsc = 1;
}
put_ldev(device);
}
if (device->state.conn > C_WF_REPORT_PARAMS) {
if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
drbd_send_sizes(peer_device, 0, ddsf);
}
if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
(dd == DS_GREW && device->state.conn == C_CONNECTED)) {
if (device->state.pdsk >= D_INCONSISTENT &&
device->state.disk >= D_INCONSISTENT) {
if (ddsf & DDSF_NO_RESYNC)
drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
else
resync_after_online_grow(device);
} else
set_bit(RESYNC_AFTER_NEG, &device->flags);
}
}
return 0;
}
static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
if (!p_uuid)
return false;
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
kfree(device->p_uuid);
device->p_uuid = p_uuid;
if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
device->state.disk < D_INCONSISTENT &&
device->state.role == R_PRIMARY &&
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)device->ed_uuid);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
if (get_ldev(device)) {
int skip_initial_sync =
device->state.conn == C_CONNECTED &&
peer_device->connection->agreed_pro_version >= 90 &&
device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from receive_uuids",
BM_LOCKED_TEST_ALLOWED, NULL);
_drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
_drbd_uuid_set(device, UI_BITMAP, 0);
_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
drbd_md_sync(device);
updated_uuids = 1;
}
put_ldev(device);
} else if (device->state.disk < D_INCONSISTENT &&
device->state.role == R_PRIMARY) {
/* I am a diskless primary, the peer just created a new current UUID
for me. */
updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
}
/* Before we test for the disk state, we should wait until an eventually
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
mutex_lock(device->state_mutex);
mutex_unlock(device->state_mutex);
if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
if (updated_uuids)
drbd_print_uuids(device, "receiver updated UUIDs to");
return 0;
}
/**
* convert_state() - Converts the peer's view of the cluster state to our point of view
* @ps: The state as seen by the peer.
*/
static union drbd_state convert_state(union drbd_state ps)
{
union drbd_state ms;
static enum drbd_conns c_tab[] = {
[C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
[C_CONNECTED] = C_CONNECTED,
[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
[C_VERIFY_S] = C_VERIFY_T,
[C_MASK] = C_MASK,
};
ms.i = ps.i;
ms.conn = c_tab[ps.conn];
ms.peer = ps.role;
ms.role = ps.peer;
ms.pdsk = ps.disk;
ms.disk = ps.pdsk;
ms.peer_isp = (ps.aftr_isp | ps.user_isp);
return ms;
}
static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
mutex_is_locked(device->state_mutex)) {
drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
return 0;
}
mask = convert_state(mask);
val = convert_state(val);
rv = drbd_change_state(device, CS_VERBOSE, mask, val);
drbd_send_sr_reply(peer_device, rv);
drbd_md_sync(device);
return 0;
}
static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
mutex_is_locked(&connection->cstate_mutex)) {
conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
return 0;
}
mask = convert_state(mask);
val = convert_state(val);
rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
conn_send_sr_reply(connection, rv);
return 0;
}
static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
if (peer_state.disk == D_NEGOTIATING) {
real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
spin_lock_irq(&device->resource->req_lock);
retry:
os = ns = drbd_read_state(device);
spin_unlock_irq(&device->resource->req_lock);
/* If some other part of the code (ack_receiver thread, timeout)
* already decided to close the connection again,
* we must not "re-establish" it here. */
if (os.conn <= C_TEAR_DOWN)
return -ECONNRESET;
/* If this is the "end of sync" confirmation, usually the peer disk
* transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
* set) resync started in PausedSyncT, or if the timing of pause-/
* unpause-sync events has been "just right", the peer disk may
* transition from D_CONSISTENT to D_UP_TO_DATE as well.
*/
if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
real_peer_disk == D_UP_TO_DATE &&
os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
/* If we are (becoming) SyncSource, but peer is still in sync
* preparation, ignore its uptodate-ness to avoid flapping, it
* will change to inconsistent once the peer reaches active
* syncing states.
* It may have changed syncer-paused flags, however, so we
* cannot ignore this completely. */
if (peer_state.conn > C_CONNECTED &&
peer_state.conn < C_SYNC_SOURCE)
real_peer_disk = D_INCONSISTENT;
/* if peer_state changes to connected at the same time,
* it explicitly notifies us that it finished resync.
* Maybe we should finish it up, too? */
else if (os.conn >= C_SYNC_SOURCE &&
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(device) <= device->rs_failed)
drbd_resync_finished(peer_device);
return 0;
}
}
/* explicit verify finished notification, stop sector reached. */
if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
ov_out_of_sync_print(peer_device);
drbd_resync_finished(peer_device);
return 0;
}
/* peer says his disk is inconsistent, while we think it is uptodate,
* and this happens while the peer still thinks we have a sync going on,
* but we think we are already done with the sync.
* We ignore this to avoid flapping pdsk.
* This should not happen, if the peer is a recent version of drbd. */
if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
real_peer_disk = D_UP_TO_DATE;
if (ns.conn == C_WF_REPORT_PARAMS)
ns.conn = C_CONNECTED;
if (peer_state.conn == C_AHEAD)
ns.conn = C_BEHIND;
/* TODO:
* if (primary and diskless and peer uuid != effective uuid)
* abort attach on peer;
*
* If this node does not have good data, was already connected, but
* the peer did a late attach only now, trying to "negotiate" with me,
* AND I am currently Primary, possibly frozen, with some specific
* "effective" uuid, this should never be reached, really, because
* we first send the uuids, then the current state.
*
* In this scenario, we already dropped the connection hard
* when we received the unsuitable uuids (receive_uuids().
*
* Should we want to change this, that is: not drop the connection in
* receive_uuids() already, then we would need to add a branch here
* that aborts the attach of "unsuitable uuids" on the peer in case
* this node is currently Diskless Primary.
*/
if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
get_ldev_if_state(device, D_NEGOTIATING)) {
int cr; /* consider resync */
/* if we established a new connection */
cr = (os.conn < C_CONNECTED);
/* if we had an established connection
* and one of the nodes newly attaches a disk */
cr |= (os.conn == C_CONNECTED &&
(peer_state.disk == D_NEGOTIATING ||
os.disk == D_NEGOTIATING));
/* if we have both been inconsistent, and the peer has been
* forced to be UpToDate with --force */
cr |= test_bit(CONSIDER_RESYNC, &device->flags);
/* if we had been plain connected, and the admin requested to
* start a sync by "invalidate" or "invalidate-remote" */
cr |= (os.conn == C_CONNECTED &&
(peer_state.conn >= C_STARTING_SYNC_S &&
peer_state.conn <= C_WF_BITMAP_T));
if (cr)
ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
put_ldev(device);
if (ns.conn == C_MASK) {
ns.conn = C_CONNECTED;
if (device->state.disk == D_NEGOTIATING) {
drbd_force_state(device, NS(disk, D_FAILED));
} else if (peer_state.disk == D_NEGOTIATING) {
drbd_err(device, "Disk attach process on the peer node was aborted.\n");
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
return -EIO;
D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
}
}
spin_lock_irq(&device->resource->req_lock);
if (os.i != drbd_read_state(device).i)
goto retry;
clear_bit(CONSIDER_RESYNC, &device->flags);
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
ns.disk = device->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
test_bit(NEW_CUR_UUID, &device->flags)) {
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
spin_unlock_irq(&device->resource->req_lock);
drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
tl_clear(peer_device->connection);
drbd_uuid_new_current(device);
clear_bit(NEW_CUR_UUID, &device->flags);
conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
return -EIO;
}
rv = _drbd_set_state(device, ns, cs_flags, NULL);
ns = drbd_read_state(device);
spin_unlock_irq(&device->resource->req_lock);
if (rv < SS_SUCCESS) {
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
if (os.conn > C_WF_REPORT_PARAMS) {
if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
peer_state.disk != D_NEGOTIATING ) {
/* we want resync, peer has not yet decided to sync... */
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
drbd_send_uuids(peer_device);
drbd_send_current_state(peer_device);
}
}
clear_bit(DISCARD_MY_DATA, &device->flags);
drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
return 0;
}
static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_rs_uuid *p = pi->data;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
wait_event(device->misc_wait,
device->state.conn == C_WF_SYNC_UUID ||
device->state.conn == C_BEHIND ||
device->state.conn < C_CONNECTED ||
device->state.disk < D_NEGOTIATING);
/* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
/* Here the _drbd_uuid_ functions are right, current should
_not_ be rotated into the history */
if (get_ldev_if_state(device, D_NEGOTIATING)) {
_drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
_drbd_uuid_set(device, UI_BITMAP, 0UL);
drbd_print_uuids(device, "updated sync uuid");
drbd_start_resync(device, C_SYNC_TARGET);
put_ldev(device);
} else
drbd_err(device, "Ignoring SyncUUID packet!\n");
return 0;
}
/*
* receive_bitmap_plain
*
* Return 0 when done, 1 when another iteration is needed, and a negative error
* code upon failure.
*/
static int
receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
unsigned long *p, struct bm_xfer_ctx *c)
{
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
drbd_header_size(peer_device->connection);
unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
c->bm_words - c->word_offset);
unsigned int want = num_words * sizeof(*p);
int err;
if (want != size) {
drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
err = drbd_recv_all(peer_device->connection, p, want);
if (err)
return err;
drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
return 1;
}
static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
{
return (enum drbd_bitmap_code)(p->encoding & 0x0f);
}
static int dcbp_get_start(struct p_compressed_bm *p)
{
return (p->encoding & 0x80) != 0;
}
static int dcbp_get_pad_bits(struct p_compressed_bm *p)
{
return (p->encoding >> 4) & 0x7;
}
/*
* recv_bm_rle_bits
*
* Return 0 when done, 1 when another iteration is needed, and a negative error
* code upon failure.
*/
static int
recv_bm_rle_bits(struct drbd_peer_device *peer_device,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c,
unsigned int len)
{
struct bitstream bs;
u64 look_ahead;
u64 rl;
u64 tmp;
unsigned long s = c->bit_offset;
unsigned long e;
int toggle = dcbp_get_start(p);
int have;
int bits;
bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
bits = bitstream_get_bits(&bs, &look_ahead, 64);
if (bits < 0)
return -EIO;
for (have = bits; have > 0; s += rl, toggle = !toggle) {
bits = vli_decode_bits(&rl, look_ahead);
if (bits <= 0)
return -EIO;
if (toggle) {
e = s + rl -1;
if (e >= c->bm_bits) {
drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
return -EIO;
}
_drbd_bm_set_bits(peer_device->device, s, e);
}
if (have < bits) {
drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
have, bits, look_ahead,
(unsigned int)(bs.cur.b - p->code),
(unsigned int)bs.buf_len);
return -EIO;
}
/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
if (likely(bits < 64))
look_ahead >>= bits;
else
look_ahead = 0;
have -= bits;
bits = bitstream_get_bits(&bs, &tmp, 64 - have);
if (bits < 0)
return -EIO;
look_ahead |= tmp << have;
have += bits;
}
c->bit_offset = s;
bm_xfer_ctx_bit_to_word_offset(c);
return (s != c->bm_bits);
}
/*
* decode_bitmap_c
*
* Return 0 when done, 1 when another iteration is needed, and a negative error
* code upon failure.
*/
static int
decode_bitmap_c(struct drbd_peer_device *peer_device,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c,
unsigned int len)
{
if (dcbp_get_code(p) == RLE_VLI_Bits)
return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
unsigned int header_size = drbd_header_size(peer_device->connection);
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
unsigned int plain =
header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
c->bm_words * sizeof(unsigned long);
unsigned int total = c->bytes[0] + c->bytes[1];
unsigned int r;
/* total can not be zero. but just in case: */
if (total == 0)
return;
/* don't report if not compressed */
if (total >= plain)
return;
/* total < plain. check for overflow, still */
r = (total > UINT_MAX/1000) ? (total / (plain/1000))
: (1000 * total / plain);
if (r > 1000)
r = 1000;
r = 1000 - r;
drbd_info(peer_device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
"total %u; compression: %u.%u%%\n",
direction,
c->bytes[1], c->packets[1],
c->bytes[0], c->packets[0],
total, r/10, r % 10);
}
/* Since we are processing the bitfield from lower addresses to higher,
it does not matter if the process it in 32 bit chunks or 64 bit
chunks as long as it is little endian. (Understand it as byte stream,
beginning with the lowest byte...) If we would use big endian
we would need to process it from the highest address to the lowest,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct bm_xfer_ctx c;
int err;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
c = (struct bm_xfer_ctx) {
.bm_bits = drbd_bm_bits(device),
.bm_words = drbd_bm_words(device),
};
for(;;) {
if (pi->cmd == P_BITMAP)
err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
struct p_compressed_bm *p = pi->data;
if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
drbd_err(device, "ReportCBitmap packet too large\n");
err = -EIO;
goto out;
}
if (pi->size <= sizeof(*p)) {
drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
err = -EIO;
goto out;
}
err = drbd_recv_all(peer_device->connection, p, pi->size);
if (err)
goto out;
err = decode_bitmap_c(peer_device, p, &c, pi->size);
} else {
drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
err = -EIO;
goto out;
}
c.packets[pi->cmd == P_BITMAP]++;
c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
err = drbd_recv_header(peer_device->connection, pi);
if (err)
goto out;
}
INFO_bm_xfer_stats(peer_device, "receive", &c);
if (device->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
err = drbd_send_bitmap(device, peer_device);
if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
D_ASSERT(device, rv == SS_SUCCESS);
} else if (device->state.conn != C_WF_BITMAP_S) {
/* admin may have requested C_DISCONNECTING,
* other threads may have noticed network errors */
drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
drbd_conn_str(device->state.conn));
}
err = 0;
out:
drbd_bm_unlock(device);
if (!err && device->state.conn == C_WF_BITMAP_S)
drbd_start_resync(device, C_SYNC_SOURCE);
return err;
}
static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
{
drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
pi->cmd, pi->size);
return ignore_remaining_packet(connection, pi);
}
static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
tcp_sock_set_quickack(connection->data.socket->sk, 2);
return 0;
}
static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_block_desc *p = pi->data;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
switch (device->state.conn) {
case C_WF_SYNC_UUID:
case C_WF_BITMAP_T:
case C_BEHIND:
break;
default:
drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
drbd_conn_str(device->state.conn));
}
drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
return 0;
}
static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct p_block_desc *p = pi->data;
struct drbd_device *device;
sector_t sector;
int size, err = 0;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
dec_rs_pending(peer_device);
if (get_ldev(device)) {
struct drbd_peer_request *peer_req;
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
size, 0, GFP_NOIO);
if (!peer_req) {
put_ldev(device);
return -ENOMEM;
}
peer_req->w.cb = e_end_resync_block;
peer_req->opf = REQ_OP_DISCARD;
peer_req->submit_jif = jiffies;
peer_req->flags |= EE_TRIM;
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&peer_req->w.list, &device->sync_ee);
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
err = drbd_submit_peer_request(peer_req);
if (err) {
spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
spin_unlock_irq(&device->resource->req_lock);
drbd_free_peer_req(device, peer_req);
put_ldev(device);
err = 0;
goto fail;
}
inc_unacked(device);
/* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
as well as drbd_rs_complete_io() */
} else {
fail:
drbd_rs_complete_io(device, sector);
drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
}
atomic_add(size >> 9, &device->rs_sect_in);
return err;
}
struct data_cmd {
int expect_payload;
unsigned int pkt_size;
int (*fn)(struct drbd_connection *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
[P_DATA] = { 1, sizeof(struct p_data), receive_Data },
[P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
[P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
[P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
[P_BITMAP] = { 1, 0, receive_bitmap } ,
[P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
[P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
[P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
[P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
[P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
[P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
[P_STATE] = { 0, sizeof(struct p_state), receive_state },
[P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
[P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
[P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
[P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data },
[P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
};
static void drbdd(struct drbd_connection *connection)
{
struct packet_info pi;
size_t shs; /* sub header size */
int err;
while (get_t_state(&connection->receiver) == RUNNING) {
struct data_cmd const *cmd;
drbd_thread_current_set_cpu(&connection->receiver);
update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
if (drbd_recv_header_maybe_unplug(connection, &pi))
goto err_out;
cmd = &drbd_cmd_handler[pi.cmd];
if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
drbd_err(connection, "Unexpected data packet %s (0x%04x)",
cmdname(pi.cmd), pi.cmd);
goto err_out;
}
shs = cmd->pkt_size;
if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
shs += sizeof(struct o_qlim);
if (pi.size > shs && !cmd->expect_payload) {
drbd_err(connection, "No payload expected %s l:%d\n",
cmdname(pi.cmd), pi.size);
goto err_out;
}
if (pi.size < shs) {
drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
cmdname(pi.cmd), (int)shs, pi.size);
goto err_out;
}
if (shs) {
update_receiver_timing_details(connection, drbd_recv_all_warn);
err = drbd_recv_all_warn(connection, pi.data, shs);
if (err)
goto err_out;
pi.size -= shs;
}
update_receiver_timing_details(connection, cmd->fn);
err = cmd->fn(connection, &pi);
if (err) {
drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
cmdname(pi.cmd), err, pi.size);
goto err_out;
}
}
return;
err_out:
conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
static void conn_disconnect(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
enum drbd_conns oc;
int vnr;
if (connection->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
* Make sure drbd_make_request knows about that.
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* ack_receiver does not clean up anything. it must not interfere, either */
drbd_thread_stop(&connection->ack_receiver);
if (connection->ack_sender) {
destroy_workqueue(connection->ack_sender);
connection->ack_sender = NULL;
}
drbd_free_sock(connection);
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
drbd_disconnected(peer_device);
kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
if (!list_empty(&connection->current_epoch->list))
drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
atomic_set(&connection->current_epoch->epoch_size, 0);
connection->send.seen_any_write_yet = false;
drbd_info(connection, "Connection closed\n");
if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
conn_try_outdate_peer_async(connection);
spin_lock_irq(&connection->resource->req_lock);
oc = connection->cstate;
if (oc >= C_UNCONNECTED)
_conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
spin_unlock_irq(&connection->resource->req_lock);
if (oc == C_DISCONNECTING)
conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
}
static int drbd_disconnected(struct drbd_peer_device *peer_device)
{
struct drbd_device *device = peer_device->device;
unsigned int i;
/* wait for current activity to cease. */
spin_lock_irq(&device->resource->req_lock);
_drbd_wait_ee_list_empty(device, &device->active_ee);
_drbd_wait_ee_list_empty(device, &device->sync_ee);
_drbd_wait_ee_list_empty(device, &device->read_ee);
spin_unlock_irq(&device->resource->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
* * On C_SYNC_TARGET we do not have any data structures describing
* the pending RSDataRequest's we have sent.
* * On C_SYNC_SOURCE there is no data structure that tracks
* the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
* And no, it is not the sum of the reference counts in the
* resync_LRU. The resync_LRU tracks the whole operation including
* the disk-IO, while the rs_pending_cnt only tracks the blocks
* on the fly. */
drbd_rs_cancel_all(device);
device->rs_total = 0;
device->rs_failed = 0;
atomic_set(&device->rs_pending_cnt, 0);
wake_up(&device->misc_wait);
del_timer_sync(&device->resync_timer);
resync_timer_fn(&device->resync_timer);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue
* to be "canceled" */
drbd_flush_workqueue(&peer_device->connection->sender_work);
drbd_finish_peer_reqs(device);
/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
might have issued a work again. The one before drbd_finish_peer_reqs() is
necessary to reclain net_ee in drbd_finish_peer_reqs(). */
drbd_flush_workqueue(&peer_device->connection->sender_work);
/* need to do it again, drbd_finish_peer_reqs() may have populated it
* again via drbd_try_clear_on_disk_bm(). */
drbd_rs_cancel_all(device);
kfree(device->p_uuid);
device->p_uuid = NULL;
if (!drbd_suspended(device))
tl_clear(peer_device->connection);
drbd_md_sync(device);
if (get_ldev(device)) {
drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
"write from disconnected", BM_LOCKED_CHANGE_ALLOWED, NULL);
put_ldev(device);
}
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
* more than 20 seconds (longest time I checked).
*
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
i = drbd_free_peer_reqs(device, &device->net_ee);
if (i)
drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
i = atomic_read(&device->pp_in_use);
if (i)
drbd_info(device, "pp_in_use = %d, expected 0\n", i);
D_ASSERT(device, list_empty(&device->read_ee));
D_ASSERT(device, list_empty(&device->active_ee));
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
return 0;
}
/*
* We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
* we can agree on is stored in agreed_pro_version.
*
* feature flags and the reserved array should be enough room for future
* enhancements of the handshake protocol, and possible plugins...
*
* for now, they are expected to be zero, but ignored.
*/
static int drbd_send_features(struct drbd_connection *connection)
{
struct drbd_socket *sock;
struct p_connection_features *p;
sock = &connection->data;
p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
p->feature_flags = cpu_to_be32(PRO_FEATURES);
return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
* return values:
* 1 yes, we have a valid connection
* 0 oops, did not work out, please try again
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
static int drbd_do_features(struct drbd_connection *connection)
{
/* ASSERT current == connection->receiver ... */
struct p_connection_features *p;
const int expect = sizeof(struct p_connection_features);
struct packet_info pi;
int err;
err = drbd_send_features(connection);
if (err)
return 0;
err = drbd_recv_header(connection, &pi);
if (err)
return 0;
if (pi.cmd != P_CONNECTION_FEATURES) {
drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
return -1;
}
if (pi.size != expect) {
drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
expect, pi.size);
return -1;
}
p = pi.data;
err = drbd_recv_all_warn(connection, p, expect);
if (err)
return 0;
p->protocol_min = be32_to_cpu(p->protocol_min);
p->protocol_max = be32_to_cpu(p->protocol_max);
if (p->protocol_max == 0)
p->protocol_max = p->protocol_min;
if (PRO_VERSION_MAX < p->protocol_min ||
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
drbd_info(connection, "Handshake successful: "
"Agreed network protocol version %d\n", connection->agreed_pro_version);
drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s%s.\n",
connection->agreed_features,
connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : "",
connection->agreed_features & DRBD_FF_WZEROES ? " WRITE_ZEROES" :
connection->agreed_features ? "" : " none");
return 1;
incompat:
drbd_err(connection, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
return -1;
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
static int drbd_do_auth(struct drbd_connection *connection)
{
drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return -1;
}
#else
#define CHALLENGE_LEN 64
/* Return value:
1 - auth succeeded,
0 - failed, try again (network error),
-1 - auth failed, don't try again.
*/
static int drbd_do_auth(struct drbd_connection *connection)
{
struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
struct shash_desc *desc;
struct packet_info pi;
struct net_conf *nc;
int err, rv;
/* FIXME: Put the challenge/response into the preallocated socket buffer. */
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
key_len = strlen(nc->shared_secret);
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
desc = kmalloc(sizeof(struct shash_desc) +
crypto_shash_descsize(connection->cram_hmac_tfm),
GFP_KERNEL);
if (!desc) {
rv = -1;
goto fail;
}
desc->tfm = connection->cram_hmac_tfm;
rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
sock = &connection->data;
if (!conn_prepare_command(connection, sock)) {
rv = 0;
goto fail;
}
rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
err = drbd_recv_header(connection, &pi);
if (err) {
rv = 0;
goto fail;
}
if (pi.cmd != P_AUTH_CHALLENGE) {
drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
rv = -1;
goto fail;
}
if (pi.size > CHALLENGE_LEN * 2) {
drbd_err(connection, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
if (pi.size < CHALLENGE_LEN) {
drbd_err(connection, "AuthChallenge payload too small.\n");
rv = -1;
goto fail;
}
peers_ch = kmalloc(pi.size, GFP_NOIO);
if (!peers_ch) {
rv = -1;
goto fail;
}
err = drbd_recv_all_warn(connection, peers_ch, pi.size);
if (err) {
rv = 0;
goto fail;
}
if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
drbd_err(connection, "Peer presented the same challenge!\n");
rv = -1;
goto fail;
}
resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (!response) {
rv = -1;
goto fail;
}
rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
if (rv) {
drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
if (!conn_prepare_command(connection, sock)) {
rv = 0;
goto fail;
}
rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
response, resp_size);
if (!rv)
goto fail;
err = drbd_recv_header(connection, &pi);
if (err) {
rv = 0;
goto fail;
}
if (pi.cmd != P_AUTH_RESPONSE) {
drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
if (pi.size != resp_size) {
drbd_err(connection, "expected AuthResponse payload of wrong size\n");
rv = 0;
goto fail;
}
err = drbd_recv_all_warn(connection, response , resp_size);
if (err) {
rv = 0;
goto fail;
}
right_response = kmalloc(resp_size, GFP_NOIO);
if (!right_response) {
rv = -1;
goto fail;
}
rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
right_response);
if (rv) {
drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
rv = !memcmp(response, right_response, resp_size);
if (rv)
drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
resp_size);
else
rv = -1;
fail:
kfree(peers_ch);
kfree(response);
kfree(right_response);
if (desc) {
shash_desc_zero(desc);
kfree(desc);
}
return rv;
}
#endif
int drbd_receiver(struct drbd_thread *thi)
{
struct drbd_connection *connection = thi->connection;
int h;
drbd_info(connection, "receiver (re)started\n");
do {
h = conn_connect(connection);
if (h == 0) {
conn_disconnect(connection);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
drbd_warn(connection, "Discarding network configuration.\n");
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
if (h > 0) {
blk_start_plug(&connection->receiver_plug);
drbdd(connection);
blk_finish_plug(&connection->receiver_plug);
}
conn_disconnect(connection);
drbd_info(connection, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
if (retcode >= SS_SUCCESS) {
set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
} else {
set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
wake_up(&connection->ping_wait);
return 0;
}
static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
D_ASSERT(device, connection->agreed_pro_version < 100);
return got_conn_RqSReply(connection, pi);
}
if (retcode >= SS_SUCCESS) {
set_bit(CL_ST_CHG_SUCCESS, &device->flags);
} else {
set_bit(CL_ST_CHG_FAIL, &device->flags);
drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
wake_up(&device->state_wait);
return 0;
}
static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
{
return drbd_send_ping_ack(connection);
}
static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
{
/* restore idle timeout */
connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
wake_up(&connection->ping_wait);
return 0;
}
static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (get_ldev(device)) {
drbd_rs_complete_io(device, sector);
drbd_set_in_sync(peer_device, sector, blksize);
/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
put_ldev(device);
}
dec_rs_pending(peer_device);
atomic_add(blksize >> 9, &device->rs_sect_in);
return 0;
}
static int
validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct rb_root *root, const char *func,
enum drbd_req_event what, bool missing_ok)
{
struct drbd_device *device = peer_device->device;
struct drbd_request *req;
struct bio_and_error m;
spin_lock_irq(&device->resource->req_lock);
req = find_request(device, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
spin_unlock_irq(&device->resource->req_lock);
return -EIO;
}
__req_mod(req, what, peer_device, &m);
spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
complete_master_bio(device, &m);
return 0;
}
static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
drbd_set_in_sync(peer_device, sector, blksize);
dec_rs_pending(peer_device);
return 0;
}
switch (pi->cmd) {
case P_RS_WRITE_ACK:
what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
what = RECV_ACKED_BY_PEER;
break;
case P_SUPERSEDED:
what = CONFLICT_RESOLVED;
break;
case P_RETRY_WRITE:
what = POSTPONE_WRITE;
break;
default:
BUG();
}
return validate_req_change_req_state(peer_device, p->block_id, sector,
&device->write_requests, __func__,
what, false);
}
static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
int err;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
dec_rs_pending(peer_device);
drbd_rs_failed_io(peer_device, sector, size);
return 0;
}
err = validate_req_change_req_state(peer_device, p->block_id, sector,
&device->write_requests, __func__,
NEG_ACKED, true);
if (err) {
/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
The master bio might already be completed, therefore the
request is no longer in the collision hash. */
/* In Protocol B we might already have got a P_RECV_ACK
but then get a P_NEG_ACK afterwards. */
drbd_set_out_of_sync(peer_device, sector, size);
}
return 0;
}
static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
return validate_req_change_req_state(peer_device, p->block_id, sector,
&device->read_requests, __func__,
NEG_ACKED, false);
}
static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
sector_t sector;
int size;
struct p_block_ack *p = pi->data;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
dec_rs_pending(peer_device);
if (get_ldev_if_state(device, D_FAILED)) {
drbd_rs_complete_io(device, sector);
switch (pi->cmd) {
case P_NEG_RS_DREPLY:
drbd_rs_failed_io(peer_device, sector, size);
break;
case P_RS_CANCEL:
break;
default:
BUG();
}
put_ldev(device);
}
return 0;
}
static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_barrier_ack *p = pi->data;
struct drbd_peer_device *peer_device;
int vnr;
tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
if (device->state.conn == C_AHEAD &&
atomic_read(&device->ap_in_flight) == 0 &&
!test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
device->start_resync_timer.expires = jiffies + HZ;
add_timer(&device->start_resync_timer);
}
}
rcu_read_unlock();
return 0;
}
static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
{
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_block_ack *p = pi->data;
struct drbd_device_work *dw;
sector_t sector;
int size;
peer_device = conn_peer_device(connection, pi->vnr);
if (!peer_device)
return -EIO;
device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
drbd_ov_out_of_sync_found(peer_device, sector, size);
else
ov_out_of_sync_print(peer_device);
if (!get_ldev(device))
return 0;
drbd_rs_complete_io(device, sector);
dec_rs_pending(peer_device);
--device->ov_left;
/* let's advance progress step marks only for every other megabyte */
if ((device->ov_left & 0x200) == 0x200)
drbd_advance_rs_marks(peer_device, device->ov_left);
if (device->ov_left == 0) {
dw = kmalloc(sizeof(*dw), GFP_NOIO);
if (dw) {
dw->w.cb = w_ov_finished;
dw->device = device;
drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
} else {
drbd_err(device, "kmalloc(dw) failed.");
ov_out_of_sync_print(peer_device);
drbd_resync_finished(peer_device);
}
}
put_ldev(device);
return 0;
}
static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
{
return 0;
}
struct meta_sock_cmd {
size_t pkt_size;
int (*fn)(struct drbd_connection *connection, struct packet_info *);
};
static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
{
long t;
struct net_conf *nc;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
t = ping_timeout ? nc->ping_timeo : nc->ping_int;
rcu_read_unlock();
t *= HZ;
if (ping_timeout)
t /= 10;
connection->meta.socket->sk->sk_rcvtimeo = t;
}
static void set_ping_timeout(struct drbd_connection *connection)
{
set_rcvtimeo(connection, 1);
}
static void set_idle_timeout(struct drbd_connection *connection)
{
set_rcvtimeo(connection, 0);
}
static struct meta_sock_cmd ack_receiver_tbl[] = {
[P_PING] = { 0, got_Ping },
[P_PING_ACK] = { 0, got_PingAck },
[P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
[P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
[P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
[P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
[P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
[P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
[P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
[P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
};
int drbd_ack_receiver(struct drbd_thread *thi)
{
struct drbd_connection *connection = thi->connection;
struct meta_sock_cmd *cmd = NULL;
struct packet_info pi;
unsigned long pre_recv_jif;
int rv;
void *buf = connection->meta.rbuf;
int received = 0;
unsigned int header_size = drbd_header_size(connection);
int expect = header_size;
bool ping_timeout_active = false;
sched_set_fifo_low(current);
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
conn_reclaim_net_peer_reqs(connection);
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
goto reconnect;
}
set_ping_timeout(connection);
ping_timeout_active = true;
}
pre_recv_jif = jiffies;
rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
/* Note:
* -EINTR (on meta) we got a signal
* -EAGAIN (on meta) rcvtimeo expired
* -ECONNRESET other side closed the connection
* -ERESTARTSYS (on data) we got a signal
* rv < 0 other than above: unexpected error!
* rv == expected: full header or command
* rv < expected: "woken" by signal during receive
* rv == 0 : "connection shut down by peer"
*/
if (likely(rv > 0)) {
received += rv;
buf += rv;
} else if (rv == 0) {
if (test_bit(DISCONNECT_SENT, &connection->flags)) {
long t;
rcu_read_lock();
t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
rcu_read_unlock();
t = wait_event_timeout(connection->ping_wait,
connection->cstate < C_WF_REPORT_PARAMS,
t);
if (t)
break;
}
drbd_err(connection, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
if (time_after(connection->last_received, pre_recv_jif))
continue;
if (ping_timeout_active) {
drbd_err(connection, "PingAck did not arrive in time.\n");
goto reconnect;
}
set_bit(SEND_PING, &connection->flags);
continue;
} else if (rv == -EINTR) {
/* maybe drbd_thread_stop(): the while condition will notice.
* maybe woken for send_ping: we'll send a ping above,
* and change the rcvtimeo */
flush_signals(current);
continue;
} else {
drbd_err(connection, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
if (decode_header(connection, connection->meta.rbuf, &pi))
goto reconnect;
cmd = &ack_receiver_tbl[pi.cmd];
if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
expect = header_size + cmd->pkt_size;
if (pi.size != expect - header_size) {
drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
pi.cmd, pi.size);
goto reconnect;
}
}
if (received == expect) {
bool err;
err = cmd->fn(connection, &pi);
if (err) {
drbd_err(connection, "%ps failed\n", cmd->fn);
goto reconnect;
}
connection->last_received = jiffies;
if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
set_idle_timeout(connection);
ping_timeout_active = false;
}
buf = connection->meta.rbuf;
received = 0;
expect = header_size;
cmd = NULL;
}
}
if (0) {
reconnect:
conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
conn_md_sync(connection);
}
if (0) {
disconnect:
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
drbd_info(connection, "ack_receiver terminated\n");
return 0;
}
void drbd_send_acks_wf(struct work_struct *ws)
{
struct drbd_peer_device *peer_device =
container_of(ws, struct drbd_peer_device, send_acks_work);
struct drbd_connection *connection = peer_device->connection;
struct drbd_device *device = peer_device->device;
struct net_conf *nc;
int tcp_cork, err;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
tcp_cork = nc->tcp_cork;
rcu_read_unlock();
if (tcp_cork)
tcp_sock_set_cork(connection->meta.socket->sk, true);
err = drbd_finish_peer_reqs(device);
kref_put(&device->kref, drbd_destroy_device);
/* get is in drbd_endio_write_sec_final(). That is necessary to keep the
struct work_struct send_acks_work alive, which is in the peer_device object */
if (err) {
conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
return;
}
if (tcp_cork)
tcp_sock_set_cork(connection->meta.socket->sk, false);
return;
}
| linux-master | drivers/block/drbd/drbd_receiver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_req.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_req.h"
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
{
struct drbd_request *req;
req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
if (!req)
return NULL;
memset(req, 0, sizeof(*req));
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
| (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
req->device = device;
req->master_bio = bio_src;
req->epoch = 0;
drbd_clear_interval(&req->i);
req->i.sector = bio_src->bi_iter.bi_sector;
req->i.size = bio_src->bi_iter.bi_size;
req->i.local = true;
req->i.waiting = false;
INIT_LIST_HEAD(&req->tl_requests);
INIT_LIST_HEAD(&req->w.list);
INIT_LIST_HEAD(&req->req_pending_master_completion);
INIT_LIST_HEAD(&req->req_pending_local);
/* one reference to be put by __drbd_make_request */
atomic_set(&req->completion_ref, 1);
/* one kref as long as completion_ref > 0 */
kref_init(&req->kref);
return req;
}
static void drbd_remove_request_interval(struct rb_root *root,
struct drbd_request *req)
{
struct drbd_device *device = req->device;
struct drbd_interval *i = &req->i;
drbd_remove_interval(root, i);
/* Wake up any processes waiting for this request to complete. */
if (i->waiting)
wake_up(&device->misc_wait);
}
void drbd_req_destroy(struct kref *kref)
{
struct drbd_request *req = container_of(kref, struct drbd_request, kref);
struct drbd_device *device = req->device;
const unsigned s = req->rq_state;
if ((req->master_bio && !(s & RQ_POSTPONED)) ||
atomic_read(&req->completion_ref) ||
(s & RQ_LOCAL_PENDING) ||
((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
s, atomic_read(&req->completion_ref));
return;
}
/* If called from mod_rq_state (expected normal case) or
* drbd_send_and_submit (the less likely normal path), this holds the
* req_lock, and req->tl_requests will typicaly be on ->transfer_log,
* though it may be still empty (never added to the transfer log).
*
* If called from do_retry(), we do NOT hold the req_lock, but we are
* still allowed to unconditionally list_del(&req->tl_requests),
* because it will be on a local on-stack list only. */
list_del_init(&req->tl_requests);
/* finally remove the request from the conflict detection
* respective block_id verification interval tree. */
if (!drbd_interval_empty(&req->i)) {
struct rb_root *root;
if (s & RQ_WRITE)
root = &device->write_requests;
else
root = &device->read_requests;
drbd_remove_request_interval(root, req);
} else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
s, (unsigned long long)req->i.sector, req->i.size);
/* if it was a write, we may have to set the corresponding
* bit(s) out-of-sync first. If it had a local part, we need to
* release the reference to the activity log. */
if (s & RQ_WRITE) {
/* Set out-of-sync unless both OK flags are set
* (local only or remote failed).
* Other places where we set out-of-sync:
* READ with local io-error */
/* There is a special case:
* we may notice late that IO was suspended,
* and postpone, or schedule for retry, a write,
* before it even was submitted or sent.
* In that case we do not want to touch the bitmap at all.
*/
struct drbd_peer_device *peer_device = first_peer_device(device);
if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
drbd_set_in_sync(peer_device, req->i.sector, req->i.size);
}
/* one might be tempted to move the drbd_al_complete_io
* to the local io completion callback drbd_request_endio.
* but, if this was a mirror write, we may only
* drbd_al_complete_io after this is RQ_NET_DONE,
* otherwise the extent could be dropped from the al
* before it has actually been written on the peer.
* if we crash before our peer knows about the request,
* but after the extent has been dropped from the al,
* we would forget to resync the corresponding extent.
*/
if (s & RQ_IN_ACT_LOG) {
if (get_ldev_if_state(device, D_FAILED)) {
drbd_al_complete_io(device, &req->i);
put_ldev(device);
} else if (drbd_ratelimit()) {
drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
"but my Disk seems to have failed :(\n",
(unsigned long long) req->i.sector, req->i.size);
}
}
}
mempool_free(req, &drbd_request_mempool);
}
static void wake_all_senders(struct drbd_connection *connection)
{
wake_up(&connection->sender_work.q_wait);
}
/* must hold resource->req_lock */
void start_new_tl_epoch(struct drbd_connection *connection)
{
/* no point closing an epoch, if it is empty, anyways. */
if (connection->current_tle_writes == 0)
return;
connection->current_tle_writes = 0;
atomic_inc(&connection->current_tle_nr);
wake_all_senders(connection);
}
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
if (unlikely(m->error))
m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio);
dec_ap_bio(device);
}
/* Helper for __req_mod().
* Set m->bio to the master bio, if it is fit to be completed,
* or leave it alone (it is initialized to NULL in __req_mod),
* if it has already been completed, or cannot be completed yet.
* If m->bio is set, the error status to be returned is placed in m->error.
*/
static
void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
struct drbd_device *device = req->device;
int error, ok;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
* not yet acknowledged by the peer
* not yet completed by the local io subsystem
* these flags may get cleared in any order by
* the worker,
* the receiver,
* the bio_endio completion callbacks.
*/
if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
(s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
(s & RQ_COMPLETION_SUSP)) {
drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
return;
}
if (!req->master_bio) {
drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
return;
}
/*
* figure out whether to report success or failure.
*
* report success when at least one of the operations succeeded.
* or, to put the other way,
* only report failure, when both operations failed.
*
* what to do about the failures is handled elsewhere.
* what we need to do here is just: complete the master_bio.
*
* local completion error, if any, has been stored as ERR_PTR
* in private_bio within drbd_request_endio.
*/
ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
error = PTR_ERR(req->private_bio);
/* Before we can signal completion to the upper layers,
* we may need to close the current transfer log epoch.
* We are within the request lock, so we can simply compare
* the request epoch number with the current transfer log
* epoch number. If they match, increase the current_tle_nr,
* and reset the transfer log epoch write_cnt.
*/
if (op_is_write(bio_op(req->master_bio)) &&
req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
start_new_tl_epoch(first_peer_device(device)->connection);
/* Update disk stats */
bio_end_io_acct(req->master_bio, req->start_jif);
/* If READ failed,
* have it be pushed back to the retry work queue,
* so it will re-enter __drbd_make_request(),
* and be re-assigned to a suitable local or remote path,
* or failed if we do not have access to good data anymore.
*
* Unless it was failed early by __drbd_make_request(),
* because no path was available, in which case
* it was not even added to the transfer_log.
*
* read-ahead may fail, and will not be retried.
*
* WRITE should have used all available paths already.
*/
if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
!(req->master_bio->bi_opf & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
if (!(req->rq_state & RQ_POSTPONED)) {
m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio;
req->master_bio = NULL;
/* We leave it in the tree, to be able to verify later
* write-acks in protocol != C during resync.
* But we mark it as "complete", so it won't be counted as
* conflict in a multi-primary setup. */
req->i.completed = true;
}
if (req->i.waiting)
wake_up(&device->misc_wait);
/* Either we are about to complete to upper layers,
* or we will restart this request.
* In either case, the request object will be destroyed soon,
* so better remove it from all lists. */
list_del_init(&req->req_pending_master_completion);
}
/* still holds resource->req_lock */
static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
struct drbd_device *device = req->device;
D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
if (!put)
return;
if (!atomic_sub_and_test(put, &req->completion_ref))
return;
drbd_req_complete(req, m);
/* local completion may still come in later,
* we need to keep the req object around. */
if (req->rq_state & RQ_LOCAL_ABORTED)
return;
if (req->rq_state & RQ_POSTPONED) {
/* don't destroy the req object just yet,
* but queue it for retry */
drbd_restart_request(req);
return;
}
kref_put(&req->kref, drbd_req_destroy);
}
static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
if (!connection)
return;
if (connection->req_next == NULL)
connection->req_next = req;
}
static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_next != req)
return;
req = NULL;
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if (s & RQ_NET_QUEUED) {
req = iter;
break;
}
}
connection->req_next = req;
}
static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
if (!connection)
return;
if (connection->req_ack_pending == NULL)
connection->req_ack_pending = req;
}
static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_ack_pending != req)
return;
req = NULL;
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) {
req = iter;
break;
}
}
connection->req_ack_pending = req;
}
static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
if (!connection)
return;
if (connection->req_not_net_done == NULL)
connection->req_not_net_done = req;
}
static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_not_net_done != req)
return;
req = NULL;
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) {
req = iter;
break;
}
}
connection->req_not_net_done = req;
}
/* I'd like this to be the only place that manipulates
* req->completion_ref and req->kref. */
static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int clear, int set)
{
struct drbd_device *device = req->device;
struct drbd_peer_device *peer_device = first_peer_device(device);
unsigned s = req->rq_state;
int c_put = 0;
if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
set |= RQ_COMPLETION_SUSP;
/* apply */
req->rq_state &= ~clear;
req->rq_state |= set;
/* no change? */
if (req->rq_state == s)
return;
/* intent: get references */
kref_get(&req->kref);
if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
atomic_inc(&req->completion_ref);
if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
inc_ap_pending(device);
atomic_inc(&req->completion_ref);
}
if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
atomic_inc(&req->completion_ref);
set_if_null_req_next(peer_device, req);
}
if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
kref_get(&req->kref); /* wait for the DONE */
if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
/* potentially already completed in the ack_receiver thread */
if (!(s & RQ_NET_DONE)) {
atomic_add(req->i.size >> 9, &device->ap_in_flight);
set_if_null_req_not_net_done(peer_device, req);
}
if (req->rq_state & RQ_NET_PENDING)
set_if_null_req_ack_pending(peer_device, req);
}
if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
atomic_inc(&req->completion_ref);
/* progress: put references */
if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
++c_put;
if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
++c_put;
}
if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
if (req->rq_state & RQ_LOCAL_ABORTED)
kref_put(&req->kref, drbd_req_destroy);
else
++c_put;
list_del_init(&req->req_pending_local);
}
if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
dec_ap_pending(device);
++c_put;
req->acked_jif = jiffies;
advance_conn_req_ack_pending(peer_device, req);
}
if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
++c_put;
advance_conn_req_next(peer_device, req);
}
if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
if (s & RQ_NET_SENT)
atomic_sub(req->i.size >> 9, &device->ap_in_flight);
if (s & RQ_EXP_BARR_ACK)
kref_put(&req->kref, drbd_req_destroy);
req->net_done_jif = jiffies;
/* in ahead/behind mode, or just in case,
* before we finally destroy this request,
* the caching pointers must not reference it anymore */
advance_conn_req_next(peer_device, req);
advance_conn_req_ack_pending(peer_device, req);
advance_conn_req_not_net_done(peer_device, req);
}
/* potentially complete and destroy */
/* If we made progress, retry conflicting peer requests, if any. */
if (req->i.waiting)
wake_up(&device->misc_wait);
drbd_req_put_completion_ref(req, m, c_put);
kref_put(&req->kref, drbd_req_destroy);
}
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
if (!drbd_ratelimit())
return;
drbd_warn(device, "local %s IO error sector %llu+%u on %pg\n",
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
device->ldev->backing_bdev);
}
/* Helper for HANDED_OVER_TO_NETWORK.
* Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
* Is it also still "PENDING"?
* --> If so, clear PENDING and set NET_OK below.
* If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
* (and we must not set RQ_NET_OK) */
static inline bool is_pending_write_protocol_A(struct drbd_request *req)
{
return (req->rq_state &
(RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
== (RQ_WRITE|RQ_NET_PENDING);
}
/* obviously this could be coded as many single functions
* instead of one huge switch,
* or by putting the code directly in the respective locations
* (as it has been before).
*
* but having it this way
* enforces that it is all in this one place, where it is easier to audit,
* it makes it obvious that whatever "event" "happens" to a request should
* happen "atomically" within the req_lock,
* and it enforces that we have to think in a very structured manner
* about the "events" that may happen to a request during its life time ...
*
*
* peer_device == NULL means local disk
*/
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct drbd_peer_device *peer_device,
struct bio_and_error *m)
{
struct drbd_device *const device = req->device;
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
struct net_conf *nc;
int p, rv = 0;
if (m)
m->bio = NULL;
switch (what) {
default:
drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
break;
/* does not happen...
* initialization done in drbd_req_new
case CREATED:
break;
*/
case TO_BE_SENT: /* via network */
/* reached via __drbd_make_request
* and from w_read_retry_remote */
D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
p = nc->wire_protocol;
rcu_read_unlock();
req->rq_state |=
p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
mod_rq_state(req, m, 0, RQ_NET_PENDING);
break;
case TO_BE_SUBMITTED: /* locally */
/* reached via __drbd_make_request */
D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
break;
case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
device->writ_cnt += req->i.size >> 9;
else
device->read_cnt += req->i.size >> 9;
mod_rq_state(req, m, RQ_LOCAL_PENDING,
RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
break;
case ABORT_DISK_IO:
mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
break;
case WRITE_COMPLETED_WITH_ERROR:
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
case READ_COMPLETED_WITH_ERROR:
drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
fallthrough;
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
case DISCARD_COMPLETED_NOTSUPP:
case DISCARD_COMPLETED_WITH_ERROR:
/* I'd rather not detach from local disk just because it
* failed a REQ_OP_DISCARD. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
case QUEUE_FOR_NET_READ:
/* READ, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
/* from __drbd_make_request
* or from bio_endio during read io-error recovery */
/* So we can verify the handle in the answer packet.
* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
D_ASSERT(device, drbd_interval_empty(&req->i));
drbd_insert_interval(&device->read_requests, &req->i);
set_bit(UNPLUG_REMOTE, &device->flags);
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_read_req;
drbd_queue_work(&connection->sender_work,
&req->w);
break;
case QUEUE_FOR_NET_WRITE:
/* assert something? */
/* from __drbd_make_request only */
/* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
D_ASSERT(device, drbd_interval_empty(&req->i));
drbd_insert_interval(&device->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
* queued on the worker, it could lead to this request being
* missed during cleanup after connection loss.
* So we have to do both operations here,
* within the same lock that protects the transfer log.
*
* _req_add_to_epoch(req); this has to be after the
* _maybe_start_new_epoch(req); which happened in
* __drbd_make_request, because we now may set the bit
* again ourselves to close the current epoch.
*
* Add req to the (now) current epoch (barrier). */
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &device->flags);
/* queue work item to send data */
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
drbd_queue_work(&connection->sender_work,
&req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
p = nc->max_epoch_size;
rcu_read_unlock();
if (connection->current_tle_writes >= p)
start_new_tl_epoch(connection);
break;
case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync;
drbd_queue_work(&connection->sender_work,
&req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
case SEND_CANCELED:
case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
mod_rq_state(req, m, RQ_NET_QUEUED, 0);
break;
case HANDED_OVER_TO_NETWORK:
/* assert something? */
if (is_pending_write_protocol_A(req))
/* this is what is dangerous about protocol A:
* pretend it was successfully written on the peer. */
mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
RQ_NET_SENT|RQ_NET_OK);
else
mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
/* It is still not yet RQ_NET_DONE until the
* corresponding epoch barrier got acked as well,
* so we know what to dirty on connection loss. */
break;
case OOS_HANDED_TO_NETWORK:
/* Was not set PENDING, no longer QUEUED, so is now DONE
* as far as this connection is concerned. */
mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
break;
case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */
mod_rq_state(req, m,
RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
RQ_NET_DONE);
break;
case CONFLICT_RESOLVED:
/* for superseded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
* node crashes are covered by the activity log.
*
* If this request had been marked as RQ_POSTPONED before,
* it will actually not be completed, but "restarted",
* resubmitted from the retry worker context. */
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
break;
case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS;
fallthrough;
case WRITE_ACKED_BY_PEER:
/* Normal operation protocol C: successfully written on peer.
* During resync, even in protocol != C,
* we requested an explicit write ack anyways.
* Which means we cannot even assert anything here.
* Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
* for volatile write-back caches on lower level devices. */
goto ack_common;
case RECV_ACKED_BY_PEER:
D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
/* protocol B; pretends to be successfully written on peer.
* see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
ack_common:
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
break;
case POSTPONE_WRITE:
D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
/* If this node has already detected the write conflict, the
* worker will be waiting on misc_wait. Wake it up once this
* request has completed locally.
*/
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_POSTPONED;
if (req->i.waiting)
wake_up(&device->misc_wait);
/* Do not clear RQ_NET_PENDING. This request will make further
* progress via restart_conflicting_writes() or
* fail_postponed_requests(). Hopefully. */
break;
case NEG_ACKED:
mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
break;
case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
mod_rq_state(req, m,
RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
RQ_LOCAL_PENDING);
rv = MR_READ;
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
drbd_queue_work(&connection->sender_work,
&req->w);
break;
case RESEND:
/* Simply complete (local only) READs. */
if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
}
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
before the connection loss (B&C only); only P_BARRIER_ACK
(or the local completion?) was missing when we suspended.
Throwing them out of the TL here by pretending we got a BARRIER_ACK.
During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
/* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
* in that case we must not set RQ_NET_PENDING. */
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
/* w.cb expected to be w_send_dblock, or w_send_read_req */
drbd_queue_work(&connection->sender_work,
&req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */
break;
}
fallthrough; /* to BARRIER_ACKED */
case BARRIER_ACKED:
/* barrier ack for READ requests does not make sense */
if (!(req->rq_state & RQ_WRITE))
break;
if (req->rq_state & RQ_NET_PENDING) {
/* barrier came in before all requests were acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
}
/* Allowed to complete requests, even while suspended.
* As this is called for all requests within a matching epoch,
* we need to filter, and only set RQ_NET_DONE for those that
* have actually been on the wire. */
mod_rq_state(req, m, RQ_COMPLETION_SUSP,
(req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
break;
case DATA_RECEIVED:
D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
case QUEUE_AS_DRBD_BARRIER:
start_new_tl_epoch(connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break;
}
return rv;
}
/* we may do a local read if:
* - we are consistent (of course),
* - or we are generally inconsistent,
* BUT we are still/already IN SYNC for this area.
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
if (device->state.disk == D_UP_TO_DATE)
return true;
if (device->state.disk != D_INCONSISTENT)
return false;
esector = sector + (size >> 9) - 1;
nr_sectors = get_capacity(device->vdisk);
D_ASSERT(device, sector < nr_sectors);
D_ASSERT(device, esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
}
static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
int stripe_shift;
switch (rbm) {
case RB_CONGESTED_REMOTE:
return false;
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
case RB_32K_STRIPING: /* stripe_shift = 15 */
case RB_64K_STRIPING:
case RB_128K_STRIPING:
case RB_256K_STRIPING:
case RB_512K_STRIPING:
case RB_1M_STRIPING: /* stripe_shift = 20 */
stripe_shift = (rbm - RB_32K_STRIPING + 15);
return (sector >> (stripe_shift - 9)) & 1;
case RB_ROUND_ROBIN:
return test_and_change_bit(READ_BALANCE_RR, &device->flags);
case RB_PREFER_REMOTE:
return true;
case RB_PREFER_LOCAL:
default:
return false;
}
}
/*
* complete_conflicting_writes - wait for any conflicting write requests
*
* The write_requests tree contains all active write requests which we
* currently know about. Wait for any requests to complete which conflict with
* the new one.
*
* Only way out: remove the conflicting intervals from the tree.
*/
static void complete_conflicting_writes(struct drbd_request *req)
{
DEFINE_WAIT(wait);
struct drbd_device *device = req->device;
struct drbd_interval *i;
sector_t sector = req->i.sector;
int size = req->i.size;
for (;;) {
drbd_for_each_overlap(i, &device->write_requests, sector, size) {
/* Ignore, if already completed to upper layers. */
if (i->completed)
continue;
/* Handle the first found overlap. After the schedule
* we have to restart the tree walk. */
break;
}
if (!i) /* if any */
break;
/* Indicate to wake up device->misc_wait on progress. */
prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
i->waiting = true;
spin_unlock_irq(&device->resource->req_lock);
schedule();
spin_lock_irq(&device->resource->req_lock);
}
finish_wait(&device->misc_wait, &wait);
}
/* called within req_lock */
static void maybe_pull_ahead(struct drbd_device *device)
{
struct drbd_connection *connection = first_peer_device(device)->connection;
struct net_conf *nc;
bool congested = false;
enum drbd_on_congestion on_congestion;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
on_congestion = nc ? nc->on_congestion : OC_BLOCK;
rcu_read_unlock();
if (on_congestion == OC_BLOCK ||
connection->agreed_pro_version < 96)
return;
if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
return; /* nothing to do ... */
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
* sure device->act_log is there.
*/
if (!get_ldev_if_state(device, D_UP_TO_DATE))
return;
if (nc->cong_fill &&
atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
drbd_info(device, "Congestion-fill threshold reached\n");
congested = true;
}
if (device->act_log->used >= nc->cong_extents) {
drbd_info(device, "Congestion-extents threshold reached\n");
congested = true;
}
if (congested) {
/* start a new epoch for non-mirrored writes */
start_new_tl_epoch(first_peer_device(device)->connection);
if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
else /*nc->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
}
put_ldev(device);
}
/* If this returns false, and req->private_bio is still set,
* this should be submitted locally.
*
* If it returns false, but req->private_bio is not set,
* we do not have access to good data :(
*
* Otherwise, this destroys req->private_bio, if any,
* and returns true.
*/
static bool do_remote_read(struct drbd_request *req)
{
struct drbd_device *device = req->device;
enum drbd_read_balancing rbm;
if (req->private_bio) {
if (!drbd_may_do_local_read(device,
req->i.sector, req->i.size)) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(device);
}
}
if (device->state.pdsk != D_UP_TO_DATE)
return false;
if (req->private_bio == NULL)
return true;
/* TODO: improve read balancing decisions, take into account drbd
* protocol, pending requests etc. */
rcu_read_lock();
rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
rcu_read_unlock();
if (rbm == RB_PREFER_LOCAL && req->private_bio)
return false; /* submit locally */
if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(device);
}
return true;
}
return false;
}
bool drbd_should_do_remote(union drbd_dev_state s)
{
return s.pdsk == D_UP_TO_DATE ||
(s.pdsk >= D_INCONSISTENT &&
s.conn >= C_WF_BITMAP_T &&
s.conn < C_AHEAD);
/* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
states. */
}
static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
{
return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
since we enter state C_AHEAD only if proto >= 96 */
}
/* returns number of connections (== 1, for drbd 8.4)
* expected to actually write this data,
* which does NOT include those that we are L_AHEAD for. */
static int drbd_process_write_request(struct drbd_request *req)
{
struct drbd_device *device = req->device;
struct drbd_peer_device *peer_device = first_peer_device(device);
int remote, send_oos;
remote = drbd_should_do_remote(device->state);
send_oos = drbd_should_send_out_of_sync(device->state);
/* Need to replicate writes. Unless it is an empty flush,
* which is better mapped to a DRBD P_BARRIER packet,
* also for drbd wire protocol compatibility reasons.
* If this was a flush, just start a new epoch.
* Unless the current epoch was empty anyways, or we are not currently
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER, peer_device);
return remote;
}
if (!remote && !send_oos)
return 0;
D_ASSERT(device, !(remote && send_oos));
if (remote) {
_req_mod(req, TO_BE_SENT, peer_device);
_req_mod(req, QUEUE_FOR_NET_WRITE, peer_device);
} else if (drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size))
_req_mod(req, QUEUE_FOR_SEND_OOS, peer_device);
return remote;
}
static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags)
{
int err = drbd_issue_discard_or_zero_out(req->device,
req->i.sector, req->i.size >> 9, flags);
if (err)
req->private_bio->bi_status = BLK_STS_IOERR;
bio_endio(req->private_bio);
}
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
struct drbd_device *device = req->device;
struct bio *bio = req->private_bio;
unsigned int type;
if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
else if (bio->bi_opf & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(device)) {
if (drbd_insert_fault(device, type))
bio_io_error(bio);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT |
((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM));
else if (bio_op(bio) == REQ_OP_DISCARD)
drbd_process_discard_or_zeroes_req(req, EE_TRIM);
else
submit_bio_noacct(bio);
put_ldev(device);
} else
bio_io_error(bio);
}
static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
{
spin_lock_irq(&device->resource->req_lock);
list_add_tail(&req->tl_requests, &device->submit.writes);
list_add_tail(&req->req_pending_master_completion,
&device->pending_master_completion[1 /* WRITE */]);
spin_unlock_irq(&device->resource->req_lock);
queue_work(device->submit.wq, &device->submit.worker);
/* do_submit() may sleep internally on al_wait, too */
wake_up(&device->al_wait);
}
/* returns the new drbd_request pointer, if the caller is expected to
* drbd_send_and_submit() it (to save latency), or NULL if we queued the
* request on the submitter thread.
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
static struct drbd_request *
drbd_request_prepare(struct drbd_device *device, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
/* allocate outside of all locks; */
req = drbd_req_new(device, bio);
if (!req) {
dec_ap_bio(device);
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
drbd_err(device, "could not kmalloc() req\n");
bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
return ERR_PTR(-ENOMEM);
}
/* Update disk stats */
req->start_jif = bio_start_io_acct(req->master_bio);
if (get_ldev(device)) {
req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
bio, GFP_NOIO,
&drbd_io_bio_set);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
}
/* process discards always from our submitter thread */
if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD)
goto queue_for_submitter_thread;
if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &device->flags)) {
if (!drbd_al_begin_io_fastpath(device, &req->i))
goto queue_for_submitter_thread;
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
}
return req;
queue_for_submitter_thread:
atomic_inc(&device->ap_actlog_cnt);
drbd_queue_write(device, req);
return NULL;
}
/* Require at least one path to current data.
* We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
* We would not allow to read what was written,
* we would not have bumped the data generation uuids,
* we would cause data divergence for all the wrong reasons.
*
* If we don't see at least one D_UP_TO_DATE, we will fail this request,
* which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
* and queues for retry later.
*/
static bool may_do_writes(struct drbd_device *device)
{
const union drbd_dev_state s = device->state;
return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
}
struct drbd_plug_cb {
struct blk_plug_cb cb;
struct drbd_request *most_recent_req;
/* do we need more? */
};
static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
struct drbd_resource *resource = plug->cb.data;
struct drbd_request *req = plug->most_recent_req;
kfree(cb);
if (!req)
return;
spin_lock_irq(&resource->req_lock);
/* In case the sender did not process it yet, raise the flag to
* have it followed with P_UNPLUG_REMOTE just after. */
req->rq_state |= RQ_UNPLUG;
/* but also queue a generic unplug */
drbd_queue_unplug(req->device);
kref_put(&req->kref, drbd_req_destroy);
spin_unlock_irq(&resource->req_lock);
}
static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
{
/* A lot of text to say
* return (struct drbd_plug_cb*)blk_check_plugged(); */
struct drbd_plug_cb *plug;
struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
if (cb)
plug = container_of(cb, struct drbd_plug_cb, cb);
else
plug = NULL;
return plug;
}
static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
{
struct drbd_request *tmp = plug->most_recent_req;
/* Will be sent to some peer.
* Remember to tag it with UNPLUG_REMOTE on unplug */
kref_get(&req->kref);
plug->most_recent_req = req;
if (tmp)
kref_put(&tmp->kref, drbd_req_destroy);
}
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
struct drbd_peer_device *peer_device = first_peer_device(device);
const int rw = bio_data_dir(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
bool submit_private_bio = false;
spin_lock_irq(&resource->req_lock);
if (rw == WRITE) {
/* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here.
* Needs to be before the check on drbd_suspended() */
complete_conflicting_writes(req);
/* no more giving up req_lock from now on! */
/* check for congestion, and potentially stop sending
* full data updates, but start sending "dirty bits" only. */
maybe_pull_ahead(device);
}
if (drbd_suspended(device)) {
/* push back and retry: */
req->rq_state |= RQ_POSTPONED;
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(device);
}
goto out;
}
/* We fail READ early, if we can not serve it.
* We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */
if (rw != WRITE) {
if (!do_remote_read(req) && !req->private_bio)
goto nodata;
}
/* which transfer log epoch does this belong to? */
req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
if (likely(req->i.size!=0)) {
if (rw == WRITE)
first_peer_device(device)->connection->current_tle_writes++;
list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
}
if (rw == WRITE) {
if (req->private_bio && !may_do_writes(device)) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(device);
goto nodata;
}
if (!drbd_process_write_request(req))
no_remote = true;
} else {
/* We either have a private_bio, or we can read from remote.
* Otherwise we had done the goto nodata above. */
if (req->private_bio == NULL) {
_req_mod(req, TO_BE_SENT, peer_device);
_req_mod(req, QUEUE_FOR_NET_READ, peer_device);
} else
no_remote = true;
}
if (no_remote == false) {
struct drbd_plug_cb *plug = drbd_check_plugged(resource);
if (plug)
drbd_update_plug(plug, req);
}
/* If it took the fast path in drbd_request_prepare, add it here.
* The slow path has added it already. */
if (list_empty(&req->req_pending_master_completion))
list_add_tail(&req->req_pending_master_completion,
&device->pending_master_completion[rw == WRITE]);
if (req->private_bio) {
/* needs to be marked within the same spinlock */
req->pre_submit_jif = jiffies;
list_add_tail(&req->req_pending_local,
&device->pending_completion[rw == WRITE]);
_req_mod(req, TO_BE_SUBMITTED, NULL);
/* but we need to give up the spinlock to submit */
submit_private_bio = true;
} else if (no_remote) {
nodata:
if (drbd_ratelimit())
drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
(unsigned long long)req->i.sector, req->i.size >> 9);
/* A write may have been queued for send_oos, however.
* So we can not simply free it, we must go through drbd_req_put_completion_ref() */
}
out:
drbd_req_put_completion_ref(req, &m, 1);
spin_unlock_irq(&resource->req_lock);
/* Even though above is a kref_put(), this is safe.
* As long as we still need to submit our private bio,
* we hold a completion ref, and the request cannot disappear.
* If however this request did not even have a private bio to submit
* (e.g. remote read), req may already be invalid now.
* That's why we cannot check on req->private_bio. */
if (submit_private_bio)
drbd_submit_req_private_bio(req);
if (m.bio)
complete_master_bio(device, &m);
}
void __drbd_make_request(struct drbd_device *device, struct bio *bio)
{
struct drbd_request *req = drbd_request_prepare(device, bio);
if (IS_ERR_OR_NULL(req))
return;
drbd_send_and_submit(device, req);
}
static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
struct blk_plug plug;
struct drbd_request *req, *tmp;
blk_start_plug(&plug);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
const int rw = bio_data_dir(req->master_bio);
if (rw == WRITE /* rw != WRITE should not even end up here! */
&& req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &device->flags)) {
if (!drbd_al_begin_io_fastpath(device, &req->i))
continue;
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
atomic_dec(&device->ap_actlog_cnt);
}
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
blk_finish_plug(&plug);
}
static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *incoming,
struct list_head *pending,
struct list_head *later)
{
struct drbd_request *req;
int wake = 0;
int err;
spin_lock_irq(&device->al_lock);
while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -ENOBUFS)
break;
if (err == -EBUSY)
wake = 1;
if (err)
list_move_tail(&req->tl_requests, later);
else
list_move_tail(&req->tl_requests, pending);
}
spin_unlock_irq(&device->al_lock);
if (wake)
wake_up(&device->al_wait);
return !list_empty(pending);
}
static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
{
struct blk_plug plug;
struct drbd_request *req;
blk_start_plug(&plug);
while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
atomic_dec(&device->ap_actlog_cnt);
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
blk_finish_plug(&plug);
}
void do_submit(struct work_struct *ws)
{
struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
LIST_HEAD(incoming); /* from drbd_make_request() */
LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
LIST_HEAD(busy); /* blocked by resync requests */
/* grab new incoming requests */
spin_lock_irq(&device->resource->req_lock);
list_splice_tail_init(&device->submit.writes, &incoming);
spin_unlock_irq(&device->resource->req_lock);
for (;;) {
DEFINE_WAIT(wait);
/* move used-to-be-busy back to front of incoming */
list_splice_init(&busy, &incoming);
submit_fast_path(device, &incoming);
if (list_empty(&incoming))
break;
for (;;) {
prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
list_splice_init(&busy, &incoming);
prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
if (!list_empty(&pending))
break;
schedule();
/* If all currently "hot" activity log extents are kept busy by
* incoming requests, we still must not totally starve new
* requests to "cold" extents.
* Something left on &incoming means there had not been
* enough update slots available, and the activity log
* has been marked as "starving".
*
* Try again now, without looking for new requests,
* effectively blocking all new requests until we made
* at least _some_ progress with what we currently have.
*/
if (!list_empty(&incoming))
continue;
/* Nothing moved to pending, but nothing left
* on incoming: all moved to busy!
* Grab new and iterate. */
spin_lock_irq(&device->resource->req_lock);
list_splice_tail_init(&device->submit.writes, &incoming);
spin_unlock_irq(&device->resource->req_lock);
}
finish_wait(&device->al_wait, &wait);
/* If the transaction was full, before all incoming requests
* had been processed, skip ahead to commit, and iterate
* without splicing in more incoming requests from upper layers.
*
* Else, if all incoming have been processed,
* they have become either "pending" (to be submitted after
* next transaction commit) or "busy" (blocked by resync).
*
* Maybe more was queued, while we prepared the transaction?
* Try to stuff those into this transaction as well.
* Be strictly non-blocking here,
* we already have something to commit.
*
* Commit if we don't make any more progres.
*/
while (list_empty(&incoming)) {
LIST_HEAD(more_pending);
LIST_HEAD(more_incoming);
bool made_progress;
/* It is ok to look outside the lock,
* it's only an optimization anyways */
if (list_empty(&device->submit.writes))
break;
spin_lock_irq(&device->resource->req_lock);
list_splice_tail_init(&device->submit.writes, &more_incoming);
spin_unlock_irq(&device->resource->req_lock);
if (list_empty(&more_incoming))
break;
made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
list_splice_tail_init(&more_pending, &pending);
list_splice_tail_init(&more_incoming, &incoming);
if (!made_progress)
break;
}
drbd_al_begin_io_commit(device);
send_and_submit_pending(device, &pending);
}
}
void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
bio = bio_split_to_limits(bio);
if (!bio)
return;
/*
* what we "blindly" assume:
*/
D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(device);
__drbd_make_request(device, bio);
}
static bool net_timeout_reached(struct drbd_request *net_req,
struct drbd_connection *connection,
unsigned long now, unsigned long ent,
unsigned int ko_count, unsigned int timeout)
{
struct drbd_device *device = net_req->device;
if (!time_after(now, net_req->pre_send_jif + ent))
return false;
if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
return false;
if (net_req->rq_state & RQ_NET_PENDING) {
drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
return true;
}
/* We received an ACK already (or are using protocol A),
* but are waiting for the epoch closing barrier ack.
* Check if we sent the barrier already. We should not blame the peer
* for being unresponsive, if we did not even ask it yet. */
if (net_req->epoch == connection->send.current_epoch_nr) {
drbd_warn(device,
"We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
return false;
}
/* Worst case: we may have been blocked for whatever reason, then
* suddenly are able to send a lot of requests (and epoch separating
* barriers) in quick succession.
* The timestamp of the net_req may be much too old and not correspond
* to the sending time of the relevant unack'ed barrier packet, so
* would trigger a spurious timeout. The latest barrier packet may
* have a too recent timestamp to trigger the timeout, potentially miss
* a timeout. Right now we don't have a place to conveniently store
* these timestamps.
* But in this particular situation, the application requests are still
* completed to upper layers, DRBD should still "feel" responsive.
* No need yet to kill this connection, it may still recover.
* If not, eventually we will have queued enough into the network for
* us to block. From that point of view, the timestamp of the last sent
* barrier packet is relevant enough.
*/
if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
connection->send.last_sent_barrier_jif, now,
jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
return true;
}
return false;
}
/* A request is considered timed out, if
* - we have some effective timeout from the configuration,
* with some state restrictions applied,
* - the oldest request is waiting for a response from the network
* resp. the local disk,
* - the oldest request is in fact older than the effective timeout,
* - the connection was established (resp. disk was attached)
* for longer than the timeout already.
* Note that for 32bit jiffies and very stable connections/disks,
* we may have a wrap around, which is catched by
* !time_in_range(now, last_..._jif, last_..._jif + timeout).
*
* Side effect: once per 32bit wrap-around interval, which means every
* ~198 days with 250 HZ, we have a window where the timeout would need
* to expire twice (worst case) to become effective. Good enough.
*/
void request_timer_fn(struct timer_list *t)
{
struct drbd_device *device = from_timer(device, t, request_timer);
struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
struct net_conf *nc;
unsigned long oldest_submit_jif;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
unsigned int ko_count = 0, timeout = 0;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
ko_count = nc->ko_count;
timeout = nc->timeout;
}
if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
put_ldev(device);
}
rcu_read_unlock();
ent = timeout * HZ/10 * ko_count;
et = min_not_zero(dt, ent);
if (!et)
return; /* Recurring timer stopped */
now = jiffies;
nt = now + et;
spin_lock_irq(&device->resource->req_lock);
req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
/* maybe the oldest request waiting for the peer is in fact still
* blocking in tcp sendmsg. That's ok, though, that's handled via the
* socket send timeout, requesting a ping, and bumping ko-count in
* we_should_drop_the_connection().
*/
/* check the oldest request we did successfully sent,
* but which is still waiting for an ACK. */
req_peer = connection->req_ack_pending;
/* if we don't have such request (e.g. protocoll A)
* check the oldest requests which is still waiting on its epoch
* closing barrier ack. */
if (!req_peer)
req_peer = connection->req_not_net_done;
/* evaluate the oldest peer request only in one timer! */
if (req_peer && req_peer->device != device)
req_peer = NULL;
/* do we have something to evaluate? */
if (req_peer == NULL && req_write == NULL && req_read == NULL)
goto out;
oldest_submit_jif =
(req_write && req_read)
? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
? req_write->pre_submit_jif : req_read->pre_submit_jif )
: req_write ? req_write->pre_submit_jif
: req_read ? req_read->pre_submit_jif : now;
if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
if (dt && oldest_submit_jif != now &&
time_after(now, oldest_submit_jif + dt) &&
!time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
}
/* Reschedule timer for the nearest not already expired timeout.
* Fallback to now + min(effective network timeout, disk timeout). */
ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
? req_peer->pre_send_jif + ent : now + et;
dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
? oldest_submit_jif + dt : now + et;
nt = time_before(ent, dt) ? ent : dt;
out:
spin_unlock_irq(&device->resource->req_lock);
mod_timer(&device->request_timer, nt);
}
| linux-master | drivers/block/drbd/drbd_req.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
drbd_actlog.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2003-2008, Philipp Reisner <[email protected]>.
Copyright (C) 2003-2008, Lars Ellenberg <[email protected]>.
*/
#include <linux/slab.h>
#include <linux/crc32c.h>
#include <linux/drbd.h>
#include <linux/drbd_limits.h>
#include "drbd_int.h"
enum al_transaction_types {
AL_TR_UPDATE = 0,
AL_TR_INITIALIZED = 0xffff
};
/* all fields on disc in big endian */
struct __packed al_transaction_on_disk {
/* don't we all like magic */
__be32 magic;
/* to identify the most recent transaction block
* in the on disk ring buffer */
__be32 tr_number;
/* checksum on the full 4k block, with this field set to 0. */
__be32 crc32c;
/* type of transaction, special transaction types like:
* purge-all, set-all-idle, set-all-active, ... to-be-defined
* see also enum al_transaction_types */
__be16 transaction_type;
/* we currently allow only a few thousand extents,
* so 16bit will be enough for the slot number. */
/* how many updates in this transaction */
__be16 n_updates;
/* maximum slot number, "al-extents" in drbd.conf speak.
* Having this in each transaction should make reconfiguration
* of that parameter easier. */
__be16 context_size;
/* slot number the context starts with */
__be16 context_start_slot_nr;
/* Some reserved bytes. Expected usage is a 64bit counter of
* sectors-written since device creation, and other data generation tag
* supporting usage */
__be32 __reserved[4];
/* --- 36 byte used --- */
/* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
* in one transaction, then use the remaining byte in the 4k block for
* context information. "Flexible" number of updates per transaction
* does not help, as we have to account for the case when all update
* slots are used anyways, so it would only complicate code without
* additional benefit.
*/
__be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
/* but the extent number is 32bit, which at an extent size of 4 MiB
* allows to cover device sizes of up to 2**54 Byte (16 PiB) */
__be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
/* --- 420 bytes used (36 + 64*6) --- */
/* 4096 - 420 = 3676 = 919 * 4 */
__be32 context[AL_CONTEXT_PER_TRANSACTION];
};
void *drbd_md_get_buffer(struct drbd_device *device, const char *intent)
{
int r;
wait_event(device->misc_wait,
(r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 ||
device->state.disk <= D_FAILED);
if (r)
return NULL;
device->md_io.current_use = intent;
device->md_io.start_jif = jiffies;
device->md_io.submit_jif = device->md_io.start_jif - 1;
return page_address(device->md_io.page);
}
void drbd_md_put_buffer(struct drbd_device *device)
{
if (atomic_dec_and_test(&device->md_io.in_use))
wake_up(&device->misc_wait);
}
void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
unsigned int *done)
{
long dt;
rcu_read_lock();
dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
rcu_read_unlock();
dt = dt * HZ / 10;
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
dt = wait_event_timeout(device->misc_wait,
*done || test_bit(FORCE_DETACH, &device->flags), dt);
if (dt == 0) {
drbd_err(device, "meta-data IO operation timed out\n");
drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
}
}
static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
sector_t sector, enum req_op op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
int err;
blk_opf_t op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
&drbd_md_io_bio_set);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
goto out;
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
;
else if (!get_ldev_if_state(device, D_ATTACHING)) {
/* Corresponding put_ldev in drbd_md_endio() */
drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
err = -ENODEV;
goto out;
}
bio_get(bio); /* one bio_put() is in the completion handler */
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
device->md_io.submit_jif = jiffies;
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_io_error(bio);
else
submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (!bio->bi_status)
err = device->md_io.error;
out:
bio_put(bio);
return err;
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t sector, enum req_op op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
BUG_ON(!bdev->md_bdev);
dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
(void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector,
(op == REQ_OP_WRITE) ? "WRITE" : "READ");
err = _drbd_md_sync_page_io(device, bdev, sector, op);
if (err) {
drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
(unsigned long long)sector,
(op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
}
return err;
}
static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
{
struct lc_element *tmp;
tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags))
return bm_ext;
}
return NULL;
}
static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
{
struct lc_element *al_ext;
struct bm_extent *bm_ext;
int wake;
spin_lock_irq(&device->al_lock);
bm_ext = find_active_resync_extent(device, enr);
if (bm_ext) {
wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
spin_unlock_irq(&device->al_lock);
if (wake)
wake_up(&device->al_wait);
return NULL;
}
if (nonblock)
al_ext = lc_try_get(device->act_log, enr);
else
al_ext = lc_get(device->act_log, enr);
spin_unlock_irq(&device->al_lock);
return al_ext;
}
bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
D_ASSERT(device, first <= last);
D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
/* FIXME figure out a fast path for bios crossing AL extent boundaries */
if (first != last)
return false;
return _al_get(device, first, true);
}
bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
unsigned enr;
bool need_transaction = false;
D_ASSERT(device, first <= last);
D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext;
wait_event(device->al_wait,
(al_ext = _al_get(device, enr, false)) != NULL);
if (al_ext->lc_number != enr)
need_transaction = true;
}
return need_transaction;
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
* are still coupled, or assume too much about their relation.
* Code below will not work if this is violated.
* Will be cleaned up with some followup patch.
*/
# error FIXME
#endif
static unsigned int al_extent_to_bm_page(unsigned int al_enr)
{
return al_enr >>
/* bit to page */
((PAGE_SHIFT + 3) -
/* al extent number to bit */
(AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
}
static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
{
const unsigned int stripes = device->ldev->md.al_stripes;
const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
/* transaction number, modulo on-disk ring buffer wrap around */
unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
/* ... to aligned 4k on disk block */
t = ((t % stripes) * stripe_size_4kB) + t/stripes;
/* ... to 512 byte sector in activity log */
t *= 8;
/* ... plus offset to the on disk position */
return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
}
static int __al_write_transaction(struct drbd_device *device, struct al_transaction_on_disk *buffer)
{
struct lc_element *e;
sector_t sector;
int i, mx;
unsigned extent_nr;
unsigned crc = 0;
int err = 0;
memset(buffer, 0, sizeof(*buffer));
buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
buffer->tr_number = cpu_to_be32(device->al_tr_number);
i = 0;
drbd_bm_reset_al_hints(device);
/* Even though no one can start to change this list
* once we set the LC_LOCKED -- from drbd_al_begin_io(),
* lc_try_lock_for_transaction() --, someone may still
* be in the process of changing it. */
spin_lock_irq(&device->al_lock);
list_for_each_entry(e, &device->act_log->to_be_changed, list) {
if (i == AL_UPDATES_PER_TRANSACTION) {
i++;
break;
}
buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
if (e->lc_number != LC_FREE)
drbd_bm_mark_for_writeout(device,
al_extent_to_bm_page(e->lc_number));
i++;
}
spin_unlock_irq(&device->al_lock);
BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
buffer->n_updates = cpu_to_be16(i);
for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
buffer->update_slot_nr[i] = cpu_to_be16(-1);
buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
}
buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
device->act_log->nr_elements - device->al_tr_cycle);
for (i = 0; i < mx; i++) {
unsigned idx = device->al_tr_cycle + i;
extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
buffer->context[i] = cpu_to_be32(extent_nr);
}
for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
buffer->context[i] = cpu_to_be32(LC_FREE);
device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
if (device->al_tr_cycle >= device->act_log->nr_elements)
device->al_tr_cycle = 0;
sector = al_tr_number_to_on_disk_sector(device);
crc = crc32c(0, buffer, 4096);
buffer->crc32c = cpu_to_be32(crc);
if (drbd_bm_write_hinted(device))
err = -EIO;
else {
bool write_al_updates;
rcu_read_lock();
write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
err = -EIO;
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {
device->al_tr_number++;
device->al_writ_cnt++;
}
}
}
return err;
}
static int al_write_transaction(struct drbd_device *device)
{
struct al_transaction_on_disk *buffer;
int err;
if (!get_ldev(device)) {
drbd_err(device, "disk is %s, cannot start al transaction\n",
drbd_disk_str(device->state.disk));
return -EIO;
}
/* The bitmap write may have failed, causing a state change. */
if (device->state.disk < D_INCONSISTENT) {
drbd_err(device,
"disk is %s, cannot write al transaction\n",
drbd_disk_str(device->state.disk));
put_ldev(device);
return -EIO;
}
/* protects md_io_buffer, al_tr_cycle, ... */
buffer = drbd_md_get_buffer(device, __func__);
if (!buffer) {
drbd_err(device, "disk failed while waiting for md_io buffer\n");
put_ldev(device);
return -ENODEV;
}
err = __al_write_transaction(device, buffer);
drbd_md_put_buffer(device);
put_ldev(device);
return err;
}
void drbd_al_begin_io_commit(struct drbd_device *device)
{
bool locked = false;
/* Serialize multiple transactions.
* This uses test_and_set_bit, memory barrier is implicit.
*/
wait_event(device->al_wait,
device->act_log->pending_changes == 0 ||
(locked = lc_try_lock_for_transaction(device->act_log)));
if (locked) {
/* Double check: it may have been committed by someone else,
* while we have been waiting for the lock. */
if (device->act_log->pending_changes) {
bool write_al_updates;
rcu_read_lock();
write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates)
al_write_transaction(device);
spin_lock_irq(&device->al_lock);
/* FIXME
if (err)
we need an "lc_cancel" here;
*/
lc_committed(device->act_log);
spin_unlock_irq(&device->al_lock);
}
lc_unlock(device->act_log);
wake_up(&device->al_wait);
}
}
/*
* @delegate: delegate activity log I/O to the worker thread
*/
void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
{
if (drbd_al_begin_io_prepare(device, i))
drbd_al_begin_io_commit(device);
}
int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
{
struct lru_cache *al = device->act_log;
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
unsigned nr_al_extents;
unsigned available_update_slots;
unsigned enr;
D_ASSERT(device, first <= last);
nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
available_update_slots = min(al->nr_elements - al->used,
al->max_pending_changes - al->pending_changes);
/* We want all necessary updates for a given request within the same transaction
* We could first check how many updates are *actually* needed,
* and use that instead of the worst-case nr_al_extents */
if (available_update_slots < nr_al_extents) {
/* Too many activity log extents are currently "hot".
*
* If we have accumulated pending changes already,
* we made progress.
*
* If we cannot get even a single pending change through,
* stop the fast path until we made some progress,
* or requests to "cold" extents could be starved. */
if (!al->pending_changes)
__set_bit(__LC_STARVING, &device->act_log->flags);
return -ENOBUFS;
}
/* Is resync active in this area? */
for (enr = first; enr <= last; enr++) {
struct lc_element *tmp;
tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
return -EBUSY;
return -EWOULDBLOCK;
}
}
}
/* Checkout the refcounts.
* Given that we checked for available elements and update slots above,
* this has to be successful. */
for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext;
al_ext = lc_get_cumulative(device->act_log, enr);
if (!al_ext)
drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
}
return 0;
}
void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
unsigned enr;
struct lc_element *extent;
unsigned long flags;
D_ASSERT(device, first <= last);
spin_lock_irqsave(&device->al_lock, flags);
for (enr = first; enr <= last; enr++) {
extent = lc_find(device->act_log, enr);
if (!extent) {
drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
continue;
}
lc_put(device->act_log, extent);
}
spin_unlock_irqrestore(&device->al_lock, flags);
wake_up(&device->al_wait);
}
static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
{
int rv;
spin_lock_irq(&device->al_lock);
rv = (al_ext->refcnt == 0);
if (likely(rv))
lc_del(device->act_log, al_ext);
spin_unlock_irq(&device->al_lock);
return rv;
}
/**
* drbd_al_shrink() - Removes all active extents form the activity log
* @device: DRBD device.
*
* Removes all active extents form the activity log, waiting until
* the reference count of each entry dropped to 0 first, of course.
*
* You need to lock device->act_log with lc_try_lock() / lc_unlock()
*/
void drbd_al_shrink(struct drbd_device *device)
{
struct lc_element *al_ext;
int i;
D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
for (i = 0; i < device->act_log->nr_elements; i++) {
al_ext = lc_element_by_index(device->act_log, i);
if (al_ext->lc_number == LC_FREE)
continue;
wait_event(device->al_wait, _try_lc_del(device, al_ext));
}
wake_up(&device->al_wait);
}
int drbd_al_initialize(struct drbd_device *device, void *buffer)
{
struct al_transaction_on_disk *al = buffer;
struct drbd_md *md = &device->ldev->md;
int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
int i;
__al_write_transaction(device, al);
/* There may or may not have been a pending transaction. */
spin_lock_irq(&device->al_lock);
lc_committed(device->act_log);
spin_unlock_irq(&device->al_lock);
/* The rest of the transactions will have an empty "updates" list, and
* are written out only to provide the context, and to initialize the
* on-disk ring buffer. */
for (i = 1; i < al_size_4k; i++) {
int err = __al_write_transaction(device, al);
if (err)
return err;
}
return 0;
}
static const char *drbd_change_sync_fname[] = {
[RECORD_RS_FAILED] = "drbd_rs_failed_io",
[SET_IN_SYNC] = "drbd_set_in_sync",
[SET_OUT_OF_SYNC] = "drbd_set_out_of_sync"
};
/* ATTENTION. The AL's extents are 4MB each, while the extents in the
* resync LRU-cache are 16MB each.
* The caller of this function has to hold an get_ldev() reference.
*
* Adjusts the caching members ->rs_left (success) or ->rs_failed (!success),
* potentially pulling in (and recounting the corresponding bits)
* this resync extent into the resync extent lru cache.
*
* Returns whether all bits have been cleared for this resync extent,
* precisely: (rs_left <= rs_failed)
*
* TODO will be obsoleted once we have a caching lru of the on disk bitmap
*/
static bool update_rs_extent(struct drbd_device *device,
unsigned int enr, int count,
enum update_sync_bits_mode mode)
{
struct lc_element *e;
D_ASSERT(device, atomic_read(&device->local_cnt));
/* When setting out-of-sync bits,
* we don't need it cached (lc_find).
* But if it is present in the cache,
* we should update the cached bit count.
* Otherwise, that extent should be in the resync extent lru cache
* already -- or we want to pull it in if necessary -- (lc_get),
* then update and check rs_left and rs_failed. */
if (mode == SET_OUT_OF_SYNC)
e = lc_find(device->resync, enr);
else
e = lc_get(device->resync, enr);
if (e) {
struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
if (ext->lce.lc_number == enr) {
if (mode == SET_IN_SYNC)
ext->rs_left -= count;
else if (mode == SET_OUT_OF_SYNC)
ext->rs_left += count;
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
drbd_warn(device, "BAD! enr=%u rs_left=%d "
"rs_failed=%d count=%d cstate=%s\n",
ext->lce.lc_number, ext->rs_left,
ext->rs_failed, count,
drbd_conn_str(device->state.conn));
/* We don't expect to be able to clear more bits
* than have been set when we originally counted
* the set bits to cache that value in ext->rs_left.
* Whatever the reason (disconnect during resync,
* delayed local completion of an application write),
* try to fix it up by recounting here. */
ext->rs_left = drbd_bm_e_weight(device, enr);
}
} else {
/* Normally this element should be in the cache,
* since drbd_rs_begin_io() pulled it already in.
*
* But maybe an application write finished, and we set
* something outside the resync lru_cache in sync.
*/
int rs_left = drbd_bm_e_weight(device, enr);
if (ext->flags != 0) {
drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
" -> %d[%u;00]\n",
ext->lce.lc_number, ext->rs_left,
ext->flags, enr, rs_left);
ext->flags = 0;
}
if (ext->rs_failed) {
drbd_warn(device, "Kicking resync_lru element enr=%u "
"out with rs_failed=%d\n",
ext->lce.lc_number, ext->rs_failed);
}
ext->rs_left = rs_left;
ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0;
/* we don't keep a persistent log of the resync lru,
* we can commit any change right away. */
lc_committed(device->resync);
}
if (mode != SET_OUT_OF_SYNC)
lc_put(device->resync, &ext->lce);
/* no race, we are within the al_lock! */
if (ext->rs_left <= ext->rs_failed) {
ext->rs_failed = 0;
return true;
}
} else if (mode != SET_OUT_OF_SYNC) {
/* be quiet if lc_find() did not find it. */
drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
device->resync_locked,
device->resync->nr_elements,
device->resync->flags);
}
return false;
}
void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go)
{
struct drbd_device *device = peer_device->device;
unsigned long now = jiffies;
unsigned long last = device->rs_mark_time[device->rs_last_mark];
int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
device->state.conn != C_PAUSED_SYNC_T &&
device->state.conn != C_PAUSED_SYNC_S) {
device->rs_mark_time[next] = now;
device->rs_mark_left[next] = still_to_go;
device->rs_last_mark = next;
}
}
}
/* It is called lazy update, so don't do write-out too often. */
static bool lazy_bitmap_update_due(struct drbd_device *device)
{
return time_after(jiffies, device->rs_last_bcast + 2*HZ);
}
static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
{
if (rs_done) {
struct drbd_connection *connection = first_peer_device(device)->connection;
if (connection->agreed_pro_version <= 95 ||
is_sync_target_state(device->state.conn))
set_bit(RS_DONE, &device->flags);
/* and also set RS_PROGRESS below */
/* Else: rather wait for explicit notification via receive_state,
* to avoid uuids-rotated-too-fast causing full resync
* in next handshake, in case the replication link breaks
* at the most unfortunate time... */
} else if (!lazy_bitmap_update_due(device))
return;
drbd_device_post_work(device, RS_PROGRESS);
}
static int update_sync_bits(struct drbd_device *device,
unsigned long sbnr, unsigned long ebnr,
enum update_sync_bits_mode mode)
{
/*
* We keep a count of set bits per resync-extent in the ->rs_left
* caching member, so we need to loop and work within the resync extent
* alignment. Typically this loop will execute exactly once.
*/
unsigned long flags;
unsigned long count = 0;
unsigned int cleared = 0;
while (sbnr <= ebnr) {
/* set temporary boundary bit number to last bit number within
* the resync extent of the current start bit number,
* but cap at provided end bit number */
unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK);
unsigned long c;
if (mode == RECORD_RS_FAILED)
/* Only called from drbd_rs_failed_io(), bits
* supposedly still set. Recount, maybe some
* of the bits have been successfully cleared
* by application IO meanwhile.
*/
c = drbd_bm_count_bits(device, sbnr, tbnr);
else if (mode == SET_IN_SYNC)
c = drbd_bm_clear_bits(device, sbnr, tbnr);
else /* if (mode == SET_OUT_OF_SYNC) */
c = drbd_bm_set_bits(device, sbnr, tbnr);
if (c) {
spin_lock_irqsave(&device->al_lock, flags);
cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode);
spin_unlock_irqrestore(&device->al_lock, flags);
count += c;
}
sbnr = tbnr + 1;
}
if (count) {
if (mode == SET_IN_SYNC) {
unsigned long still_to_go = drbd_bm_total_weight(device);
bool rs_is_done = (still_to_go <= device->rs_failed);
drbd_advance_rs_marks(first_peer_device(device), still_to_go);
if (cleared || rs_is_done)
maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
} else if (mode == RECORD_RS_FAILED)
device->rs_failed += count;
wake_up(&device->al_wait);
}
return count;
}
static bool plausible_request_size(int size)
{
return size > 0
&& size <= DRBD_MAX_BATCH_BIO_SIZE
&& IS_ALIGNED(size, 512);
}
/* clear the bit corresponding to the piece of storage in question:
* size byte of data starting from sector. Only clear a bits of the affected
* one ore more _aligned_ BM_BLOCK_SIZE blocks.
*
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
enum update_sync_bits_mode mode)
{
/* Is called from worker and receiver context _only_ */
struct drbd_device *device = peer_device->device;
unsigned long sbnr, ebnr, lbnr;
unsigned long count = 0;
sector_t esector, nr_sectors;
/* This would be an empty REQ_PREFLUSH, be silent. */
if ((mode == SET_OUT_OF_SYNC) && size == 0)
return 0;
if (!plausible_request_size(size)) {
drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
drbd_change_sync_fname[mode],
(unsigned long long)sector, size);
return 0;
}
if (!get_ldev(device))
return 0; /* no disk, no metadata, no bitmap to manipulate bits in */
nr_sectors = get_capacity(device->vdisk);
esector = sector + (size >> 9) - 1;
if (!expect(device, sector < nr_sectors))
goto out;
if (!expect(device, esector < nr_sectors))
esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
if (mode == SET_IN_SYNC) {
/* Round up start sector, round down end sector. We make sure
* we only clear full, aligned, BM_BLOCK_SIZE blocks. */
if (unlikely(esector < BM_SECT_PER_BIT-1))
goto out;
if (unlikely(esector == (nr_sectors-1)))
ebnr = lbnr;
else
ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
} else {
/* We set it out of sync, or record resync failure.
* Should not round anything here. */
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
}
count = update_sync_bits(device, sbnr, ebnr, mode);
out:
put_ldev(device);
return count;
}
static
struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int wakeup = 0;
unsigned long rs_flags;
spin_lock_irq(&device->al_lock);
if (device->resync_locked > device->resync->nr_elements/2) {
spin_unlock_irq(&device->al_lock);
return NULL;
}
e = lc_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(device, enr);
bm_ext->rs_failed = 0;
lc_committed(device->resync);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
device->resync_locked++;
set_bit(BME_NO_WRITES, &bm_ext->flags);
}
rs_flags = device->resync->flags;
spin_unlock_irq(&device->al_lock);
if (wakeup)
wake_up(&device->al_wait);
if (!bm_ext) {
if (rs_flags & LC_STARVING)
drbd_warn(device, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_LOCKED);
}
return bm_ext;
}
static int _is_in_al(struct drbd_device *device, unsigned int enr)
{
int rv;
spin_lock_irq(&device->al_lock);
rv = lc_is_used(device->act_log, enr);
spin_unlock_irq(&device->al_lock);
return rv;
}
/**
* drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
* @device: DRBD device.
* @sector: The sector number.
*
* This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
*/
int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
int i, sig;
bool sa;
retry:
sig = wait_event_interruptible(device->al_wait,
(bm_ext = _bme_get(device, enr)));
if (sig)
return -EINTR;
if (test_bit(BME_LOCKED, &bm_ext->flags))
return 0;
/* step aside only while we are above c-min-rate; unless disabled. */
sa = drbd_rs_c_min_rate_throttle(device);
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
sig = wait_event_interruptible(device->al_wait,
!_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
(sa && test_bit(BME_PRIORITY, &bm_ext->flags)));
if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) {
spin_lock_irq(&device->al_lock);
if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
device->resync_locked--;
wake_up(&device->al_wait);
}
spin_unlock_irq(&device->al_lock);
if (sig)
return -EINTR;
if (schedule_timeout_interruptible(HZ/10))
return -EINTR;
goto retry;
}
}
set_bit(BME_LOCKED, &bm_ext->flags);
return 0;
}
/**
* drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
* @device: DRBD device.
* @sector: The sector number.
*
* Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector)
{
struct drbd_device *device = peer_device->device;
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
bool throttle = drbd_rs_should_slow_down(peer_device, sector, true);
/* If we need to throttle, a half-locked (only marked BME_NO_WRITES,
* not yet BME_LOCKED) extent needs to be kicked out explicitly if we
* need to throttle. There is at most one such half-locked extent,
* which is remembered in resync_wenr. */
if (throttle && device->resync_wenr != enr)
return -EAGAIN;
spin_lock_irq(&device->al_lock);
if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
/* in case you have very heavy scattered io, it may
* stall the syncer undefined if we give up the ref count
* when we try again and requeue.
*
* if we don't give up the refcount, but the next time
* we are scheduled this extent has been "synced" by new
* application writes, we'd miss the lc_put on the
* extent we keep the refcount on.
* so we remembered which extent we had to try again, and
* if the next requested one is something else, we do
* the lc_put here...
* we also have to wake_up
*/
e = lc_find(device->resync, device->resync_wenr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
device->resync_wenr = LC_FREE;
if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0;
device->resync_locked--;
}
wake_up(&device->al_wait);
} else {
drbd_alert(device, "LOGIC BUG\n");
}
}
/* TRY. */
e = lc_try_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (test_bit(BME_LOCKED, &bm_ext->flags))
goto proceed;
if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
device->resync_locked++;
} else {
/* we did set the BME_NO_WRITES,
* but then could not set BME_LOCKED,
* so we tried again.
* drop the extra reference. */
bm_ext->lce.refcnt--;
D_ASSERT(device, bm_ext->lce.refcnt > 0);
}
goto check_al;
} else {
/* do we rather want to try later? */
if (device->resync_locked > device->resync->nr_elements-3)
goto try_again;
/* Do or do not. There is no try. -- Yoda */
e = lc_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
const unsigned long rs_flags = device->resync->flags;
if (rs_flags & LC_STARVING)
drbd_warn(device, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_LOCKED);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(device, enr);
bm_ext->rs_failed = 0;
lc_committed(device->resync);
wake_up(&device->al_wait);
D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
set_bit(BME_NO_WRITES, &bm_ext->flags);
D_ASSERT(device, bm_ext->lce.refcnt == 1);
device->resync_locked++;
goto check_al;
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
if (lc_is_used(device->act_log, al_enr+i))
goto try_again;
}
set_bit(BME_LOCKED, &bm_ext->flags);
proceed:
device->resync_wenr = LC_FREE;
spin_unlock_irq(&device->al_lock);
return 0;
try_again:
if (bm_ext) {
if (throttle) {
D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
device->resync_wenr = LC_FREE;
if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0;
device->resync_locked--;
}
wake_up(&device->al_wait);
} else
device->resync_wenr = enr;
}
spin_unlock_irq(&device->al_lock);
return -EAGAIN;
}
void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct lc_element *e;
struct bm_extent *bm_ext;
unsigned long flags;
spin_lock_irqsave(&device->al_lock, flags);
e = lc_find(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
spin_unlock_irqrestore(&device->al_lock, flags);
if (drbd_ratelimit())
drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
return;
}
if (bm_ext->lce.refcnt == 0) {
spin_unlock_irqrestore(&device->al_lock, flags);
drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
"but refcnt is 0!?\n",
(unsigned long long)sector, enr);
return;
}
if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
device->resync_locked--;
wake_up(&device->al_wait);
}
spin_unlock_irqrestore(&device->al_lock, flags);
}
/**
* drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
* @device: DRBD device.
*/
void drbd_rs_cancel_all(struct drbd_device *device)
{
spin_lock_irq(&device->al_lock);
if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
lc_reset(device->resync);
put_ldev(device);
}
device->resync_locked = 0;
device->resync_wenr = LC_FREE;
spin_unlock_irq(&device->al_lock);
wake_up(&device->al_wait);
}
/**
* drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
* @device: DRBD device.
*
* Returns 0 upon success, -EAGAIN if at least one reference count was
* not zero.
*/
int drbd_rs_del_all(struct drbd_device *device)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
spin_lock_irq(&device->al_lock);
if (get_ldev_if_state(device, D_FAILED)) {
/* ok, ->resync is there. */
for (i = 0; i < device->resync->nr_elements; i++) {
e = lc_element_by_index(device->resync, i);
bm_ext = lc_entry(e, struct bm_extent, lce);
if (bm_ext->lce.lc_number == LC_FREE)
continue;
if (bm_ext->lce.lc_number == device->resync_wenr) {
drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
" got 'synced' by application io\n",
device->resync_wenr);
D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
device->resync_wenr = LC_FREE;
lc_put(device->resync, &bm_ext->lce);
}
if (bm_ext->lce.refcnt != 0) {
drbd_info(device, "Retrying drbd_rs_del_all() later. "
"refcnt=%d\n", bm_ext->lce.refcnt);
put_ldev(device);
spin_unlock_irq(&device->al_lock);
return -EAGAIN;
}
D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
lc_del(device->resync, &bm_ext->lce);
}
D_ASSERT(device, device->resync->used == 0);
put_ldev(device);
}
spin_unlock_irq(&device->al_lock);
wake_up(&device->al_wait);
return 0;
}
| linux-master | drivers/block/drbd/drbd_actlog.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <net/netlink.h>
#include <linux/drbd_genl_api.h>
#include "drbd_nla.h"
static int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
{
struct nlattr *head = nla_data(nla);
int len = nla_len(nla);
int rem;
/*
* validate_nla (called from nla_parse_nested) ignores attributes
* beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
* In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
* flag set also, check and remove that flag before calling
* nla_parse_nested.
*/
nla_for_each_attr(nla, head, len, rem) {
if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
if (nla_type(nla) > maxtype)
return -EOPNOTSUPP;
}
}
return 0;
}
int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
const struct nla_policy *policy)
{
int err;
err = drbd_nla_check_mandatory(maxtype, nla);
if (!err)
err = nla_parse_nested_deprecated(tb, maxtype, nla, policy,
NULL);
return err;
}
struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
{
int err;
/*
* If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
* we don't know about that attribute, reject all the nested
* attributes.
*/
err = drbd_nla_check_mandatory(maxtype, nla);
if (err)
return ERR_PTR(err);
return nla_find_nested(nla, attrtype);
}
| linux-master | drivers/block/drbd/drbd_nla.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/drbd_config.h>
#include <linux/module.h>
const char *drbd_buildtag(void)
{
/* DRBD built from external sources has here a reference to the
* git hash of the source code.
*/
static char buildtag[38] = "\0uilt-in";
if (buildtag[0] == 0) {
#ifdef MODULE
sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
#else
buildtag[0] = 'b';
#endif
}
return buildtag;
}
| linux-master | drivers/block/drbd/drbd_buildtag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Micron P320 SSD
* Copyright (C) 2011 Micron Technology, Inc.
*
* Portions of this code were derived from works subjected to the
* following copyright:
* Copyright (C) 2009 Integrated Device Technology, Inc.
*/
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/ata.h>
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/bio.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/prefetch.h>
#include <linux/numa.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
#define AHCI_RX_FIS_SZ 0x100
#define AHCI_RX_FIS_OFFSET 0x0
#define AHCI_IDFY_SZ ATA_SECT_SIZE
#define AHCI_IDFY_OFFSET 0x400
#define AHCI_SECTBUF_SZ ATA_SECT_SIZE
#define AHCI_SECTBUF_OFFSET 0x800
#define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
#define AHCI_SMARTBUF_OFFSET 0xC00
/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
#define BLOCK_DMA_ALLOC_SZ 4096
/* DMA region containing command table (should be 8192 bytes) */
#define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
#define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
#define AHCI_CMD_TBL_OFFSET 0x0
/* DMA region per command (contains header and SGL) */
#define AHCI_CMD_TBL_HDR_SZ 0x80
#define AHCI_CMD_TBL_HDR_OFFSET 0x0
#define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
#define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
#define HOST_CAP_NZDMA (1 << 19)
#define HOST_HSORG 0xFC
#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
#define HSORG_HWREV 0xFF00
#define HSORG_STYLE 0x8
#define HSORG_SLOTGROUPS 0x7
#define PORT_COMMAND_ISSUE 0x38
#define PORT_SDBV 0x7C
#define PORT_OFFSET 0x100
#define PORT_MEM_SIZE 0x80
#define PORT_IRQ_ERR \
(PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
PORT_IRQ_OVERFLOW)
#define PORT_IRQ_LEGACY \
(PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
#define PORT_IRQ_HANDLED \
(PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
#define DEF_PORT_IRQ \
(PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
/* product numbers */
#define MTIP_PRODUCT_UNKNOWN 0x00
#define MTIP_PRODUCT_ASICFPGA 0x11
/* Device instance number, incremented each time a device is probed. */
static int instance;
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
static u32 cpu_use[NR_CPUS];
static DEFINE_IDA(rssd_index_ida);
static int mtip_block_initialize(struct driver_data *dd);
#ifdef CONFIG_COMPAT
struct mtip_compat_ide_task_request_s {
__u8 io_ports[8];
__u8 hob_ports[8];
ide_reg_valid_t out_flags;
ide_reg_valid_t in_flags;
int data_phase;
int req_cmd;
compat_ulong_t out_size;
compat_ulong_t in_size;
};
#endif
/*
* This function check_for_surprise_removal is called
* while card is removed from the system and it will
* read the vendor id from the configuration space
*
* @pdev Pointer to the pci_dev structure.
*
* return value
* true if device removed, else false
*/
static bool mtip_check_surprise_removal(struct driver_data *dd)
{
u16 vendor_id = 0;
if (dd->sr)
return true;
/* Read the vendorID from the configuration space */
pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) {
dd->sr = true;
if (dd->disk)
blk_mark_disk_dead(dd->disk);
return true; /* device removed */
}
return false; /* device present */
}
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
unsigned int tag)
{
return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(dd->tags.tags[0], tag));
}
/*
* Reset the HBA (without sleeping)
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 The reset was successful.
* -1 The HBA Reset bit did not clear.
*/
static int mtip_hba_reset(struct driver_data *dd)
{
unsigned long timeout;
/* Set the reset bit */
writel(HOST_RESET, dd->mmio + HOST_CTL);
/* Flush */
readl(dd->mmio + HOST_CTL);
/*
* Spin for up to 10 seconds waiting for reset acknowledgement. Spec
* is 1 sec but in LUN failure conditions, up to 10 secs are required
*/
timeout = jiffies + msecs_to_jiffies(10000);
do {
mdelay(10);
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
return -1;
} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
&& time_before(jiffies, timeout));
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
return -1;
return 0;
}
/*
* Issue a command to the hardware.
*
* Set the appropriate bit in the s_active and Command Issue hardware
* registers, causing hardware command processing to begin.
*
* @port Pointer to the port structure.
* @tag The tag of the command to be issued.
*
* return value
* None
*/
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
int group = tag >> 5;
/* guard SACT and CI registers */
spin_lock(&port->cmd_issue_lock[group]);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
spin_unlock(&port->cmd_issue_lock[group]);
}
/*
* Enable/disable the reception of FIS
*
* @port Pointer to the port data structure
* @enable 1 to enable, 0 to disable
*
* return value
* Previous state: 1 enabled, 0 disabled
*/
static int mtip_enable_fis(struct mtip_port *port, int enable)
{
u32 tmp;
/* enable FIS reception */
tmp = readl(port->mmio + PORT_CMD);
if (enable)
writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
else
writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
/* Flush */
readl(port->mmio + PORT_CMD);
return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
}
/*
* Enable/disable the DMA engine
*
* @port Pointer to the port data structure
* @enable 1 to enable, 0 to disable
*
* return value
* Previous state: 1 enabled, 0 disabled.
*/
static int mtip_enable_engine(struct mtip_port *port, int enable)
{
u32 tmp;
/* enable FIS reception */
tmp = readl(port->mmio + PORT_CMD);
if (enable)
writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
else
writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
readl(port->mmio + PORT_CMD);
return (((tmp & PORT_CMD_START) == PORT_CMD_START));
}
/*
* Enables the port DMA engine and FIS reception.
*
* return value
* None
*/
static inline void mtip_start_port(struct mtip_port *port)
{
/* Enable FIS reception */
mtip_enable_fis(port, 1);
/* Enable the DMA engine */
mtip_enable_engine(port, 1);
}
/*
* Deinitialize a port by disabling port interrupts, the DMA engine,
* and FIS reception.
*
* @port Pointer to the port structure
*
* return value
* None
*/
static inline void mtip_deinit_port(struct mtip_port *port)
{
/* Disable interrupts on this port */
writel(0, port->mmio + PORT_IRQ_MASK);
/* Disable the DMA engine */
mtip_enable_engine(port, 0);
/* Disable FIS reception */
mtip_enable_fis(port, 0);
}
/*
* Initialize a port.
*
* This function deinitializes the port by calling mtip_deinit_port() and
* then initializes it by setting the command header and RX FIS addresses,
* clearing the SError register and any pending port interrupts before
* re-enabling the default set of port interrupts.
*
* @port Pointer to the port structure.
*
* return value
* None
*/
static void mtip_init_port(struct mtip_port *port)
{
int i;
mtip_deinit_port(port);
/* Program the command list base and FIS base addresses */
if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
writel((port->command_list_dma >> 16) >> 16,
port->mmio + PORT_LST_ADDR_HI);
writel((port->rxfis_dma >> 16) >> 16,
port->mmio + PORT_FIS_ADDR_HI);
set_bit(MTIP_PF_HOST_CAP_64, &port->flags);
}
writel(port->command_list_dma & 0xFFFFFFFF,
port->mmio + PORT_LST_ADDR);
writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
/* Clear SError */
writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
/* reset the completed registers.*/
for (i = 0; i < port->dd->slot_groups; i++)
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
/* Clear any pending interrupts on the HBA. */
writel(readl(port->dd->mmio + HOST_IRQ_STAT),
port->dd->mmio + HOST_IRQ_STAT);
/* Enable port interrupts */
writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
}
/*
* Restart a port
*
* @port Pointer to the port data structure.
*
* return value
* None
*/
static void mtip_restart_port(struct mtip_port *port)
{
unsigned long timeout;
/* Disable the DMA engine */
mtip_enable_engine(port, 0);
/* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
timeout = jiffies + msecs_to_jiffies(500);
while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
&& time_before(jiffies, timeout))
;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
return;
/*
* Chip quirk: escalate to hba reset if
* PxCMD.CR not clear after 500 ms
*/
if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
dev_warn(&port->dd->pdev->dev,
"PxCMD.CR not clear, escalating reset\n");
if (mtip_hba_reset(port->dd))
dev_err(&port->dd->pdev->dev,
"HBA reset escalation failed.\n");
/* 30 ms delay before com reset to quiesce chip */
mdelay(30);
}
dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
/* Set PxSCTL.DET */
writel(readl(port->mmio + PORT_SCR_CTL) |
1, port->mmio + PORT_SCR_CTL);
readl(port->mmio + PORT_SCR_CTL);
/* Wait 1 ms to quiesce chip function */
timeout = jiffies + msecs_to_jiffies(1);
while (time_before(jiffies, timeout))
;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
return;
/* Clear PxSCTL.DET */
writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
port->mmio + PORT_SCR_CTL);
readl(port->mmio + PORT_SCR_CTL);
/* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
timeout = jiffies + msecs_to_jiffies(500);
while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
&& time_before(jiffies, timeout))
;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
return;
if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
dev_warn(&port->dd->pdev->dev,
"COM reset failed\n");
mtip_init_port(port);
mtip_start_port(port);
}
static int mtip_device_reset(struct driver_data *dd)
{
int rv = 0;
if (mtip_check_surprise_removal(dd))
return 0;
if (mtip_hba_reset(dd) < 0)
rv = -EFAULT;
mdelay(1);
mtip_init_port(dd->port);
mtip_start_port(dd->port);
/* Enable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd->mmio + HOST_CTL);
return rv;
}
/*
* Helper function for tag logging
*/
static void print_tags(struct driver_data *dd,
char *msg,
unsigned long *tagbits,
int cnt)
{
unsigned char tagmap[128];
int group, tagmap_len = 0;
memset(tagmap, 0, sizeof(tagmap));
for (group = SLOTBITS_IN_LONGS; group > 0; group--)
tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
tagbits[group-1]);
dev_warn(&dd->pdev->dev,
"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
}
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
dma_addr_t buffer_dma, unsigned int sectors);
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
struct smart_attr *attrib);
static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
cmd->status = status;
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
/*
* Handle an error.
*
* @dd Pointer to the DRIVER_DATA structure.
*
* return value
* None
*/
static void mtip_handle_tfe(struct driver_data *dd)
{
int group, tag, bit, reissue, rv;
struct mtip_port *port;
struct mtip_cmd *cmd;
u32 completed;
struct host_to_dev_fis *fis;
unsigned long tagaccum[SLOTBITS_IN_LONGS];
unsigned int cmd_cnt = 0;
unsigned char *buf;
char *fail_reason = NULL;
int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
dev_warn(&dd->pdev->dev, "Taskfile error\n");
port = dd->port;
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
mtip_complete_command(cmd, BLK_STS_IOERR);
return;
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
/* Process successfully completed commands */
for (bit = 0; bit < 32 && completed; bit++) {
if (!(completed & (1<<bit)))
continue;
tag = (group << 5) + bit;
/* Skip the internal command slot */
if (tag == MTIP_TAG_INTERNAL)
continue;
cmd = mtip_cmd_from_tag(dd, tag);
mtip_complete_command(cmd, 0);
set_bit(tag, tagaccum);
cmd_cnt++;
}
}
print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
/* Restart the port */
mdelay(20);
mtip_restart_port(port);
/* Trying to determine the cause of the error */
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
dd->port->log_buf,
dd->port->log_buf_dma, 1);
if (rv) {
dev_warn(&dd->pdev->dev,
"Error in READ LOG EXT (10h) command\n");
/* non-critical error, don't fail the load */
} else {
buf = (unsigned char *)dd->port->log_buf;
if (buf[259] & 0x1) {
dev_info(&dd->pdev->dev,
"Write protect bit is set.\n");
set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
fail_all_ncq_write = 1;
fail_reason = "write protect";
}
if (buf[288] == 0xF7) {
dev_info(&dd->pdev->dev,
"Exceeded Tmax, drive in thermal shutdown.\n");
set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
fail_all_ncq_cmds = 1;
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
fail_reason = "rebuild failed";
}
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
for (bit = 0; bit < 32; bit++) {
reissue = 1;
tag = (group << 5) + bit;
cmd = mtip_cmd_from_tag(dd, tag);
fis = (struct host_to_dev_fis *)cmd->command;
/* Should re-issue? */
if (tag == MTIP_TAG_INTERNAL ||
fis->command == ATA_CMD_SET_FEATURES)
reissue = 0;
else {
if (fail_all_ncq_cmds ||
(fail_all_ncq_write &&
fis->command == ATA_CMD_FPDMA_WRITE)) {
dev_warn(&dd->pdev->dev,
" Fail: %s w/tag %d [%s].\n",
fis->command == ATA_CMD_FPDMA_WRITE ?
"write" : "read",
tag,
fail_reason != NULL ?
fail_reason : "unknown");
mtip_complete_command(cmd, BLK_STS_MEDIUM);
continue;
}
}
/*
* First check if this command has
* exceeded its retries.
*/
if (reissue && (cmd->retries-- > 0)) {
set_bit(tag, tagaccum);
/* Re-issue the command. */
mtip_issue_ncq_command(port, tag);
continue;
}
/* Retire a command that will not be reissued */
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
mtip_complete_command(cmd, BLK_STS_IOERR);
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
}
/*
* Handle a set device bits interrupt
*/
static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
u32 completed)
{
struct driver_data *dd = port->dd;
int tag, bit;
struct mtip_cmd *command;
if (!completed) {
WARN_ON_ONCE(!completed);
return;
}
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
/* Process completed commands. */
for (bit = 0; (bit < 32) && completed; bit++) {
if (completed & 0x01) {
tag = (group << 5) | bit;
/* skip internal command slot. */
if (unlikely(tag == MTIP_TAG_INTERNAL))
continue;
command = mtip_cmd_from_tag(dd, tag);
mtip_complete_command(command, 0);
}
completed >>= 1;
}
/* If last, re-enable interrupts */
if (atomic_dec_return(&dd->irq_workers_active) == 0)
writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
}
/*
* Process legacy pio and d2h interrupts
*/
static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
{
struct mtip_port *port = dd->port;
struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) {
int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL);
int status = readl(port->cmd_issue[group]);
if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))))
mtip_complete_command(cmd, 0);
}
}
/*
* Demux and handle errors
*/
static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
{
if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
dev_warn(&dd->pdev->dev,
"Clearing PxSERR.DIAG.x\n");
writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
}
if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
dev_warn(&dd->pdev->dev,
"Clearing PxSERR.DIAG.n\n");
writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
}
if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
dev_warn(&dd->pdev->dev,
"Port stat errors %x unhandled\n",
(port_stat & ~PORT_IRQ_HANDLED));
if (mtip_check_surprise_removal(dd))
return;
}
if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
}
}
static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
{
struct driver_data *dd = (struct driver_data *) data;
struct mtip_port *port = dd->port;
u32 hba_stat, port_stat;
int rv = IRQ_NONE;
int do_irq_enable = 1, i, workers;
struct mtip_work *twork;
hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
if (hba_stat) {
rv = IRQ_HANDLED;
/* Acknowledge the interrupt status on the port.*/
port_stat = readl(port->mmio + PORT_IRQ_STAT);
if (unlikely(port_stat == 0xFFFFFFFF)) {
mtip_check_surprise_removal(dd);
return IRQ_HANDLED;
}
writel(port_stat, port->mmio + PORT_IRQ_STAT);
/* Demux port status */
if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
do_irq_enable = 0;
WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
/* Start at 1: group zero is always local? */
for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
i++) {
twork = &dd->work[i];
twork->completed = readl(port->completed[i]);
if (twork->completed)
workers++;
}
atomic_set(&dd->irq_workers_active, workers);
if (workers) {
for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
twork = &dd->work[i];
if (twork->completed)
queue_work_on(
twork->cpu_binding,
dd->isr_workq,
&twork->work);
}
if (likely(dd->work[0].completed))
mtip_workq_sdbfx(port, 0,
dd->work[0].completed);
} else {
/*
* Chip quirk: SDB interrupt but nothing
* to complete
*/
do_irq_enable = 1;
}
}
if (unlikely(port_stat & PORT_IRQ_ERR)) {
if (unlikely(mtip_check_surprise_removal(dd))) {
/* don't proceed further */
return IRQ_HANDLED;
}
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag))
return rv;
mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
}
if (unlikely(port_stat & PORT_IRQ_LEGACY))
mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
}
/* acknowledge interrupt */
if (unlikely(do_irq_enable))
writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
return rv;
}
/*
* HBA interrupt subroutine.
*
* @irq IRQ number.
* @instance Pointer to the driver data structure.
*
* return value
* IRQ_HANDLED A HBA interrupt was pending and handled.
* IRQ_NONE This interrupt was not for the HBA.
*/
static irqreturn_t mtip_irq_handler(int irq, void *instance)
{
struct driver_data *dd = instance;
return mtip_handle_irq(dd);
}
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
{
writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]);
}
static bool mtip_pause_ncq(struct mtip_port *port,
struct host_to_dev_fis *fis)
{
unsigned long task_file_data;
task_file_data = readl(port->mmio+PORT_TFDATA);
if ((task_file_data & 1))
return false;
if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
port->ic_pause_timer = jiffies;
return true;
} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
(fis->features == 0x03)) {
set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
port->ic_pause_timer = jiffies;
return true;
} else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
((fis->command == 0xFC) &&
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
return false;
}
return false;
}
static bool mtip_commands_active(struct mtip_port *port)
{
unsigned int active;
unsigned int n;
/*
* Ignore s_active bit 0 of array element 0.
* This bit will always be set
*/
active = readl(port->s_active[0]) & 0xFFFFFFFE;
for (n = 1; n < port->dd->slot_groups; n++)
active |= readl(port->s_active[n]);
return active != 0;
}
/*
* Wait for port to quiesce
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
{
unsigned long to;
bool active = true;
blk_mq_quiesce_queue(port->dd->queue);
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
msleep(100);
if (mtip_check_surprise_removal(port->dd))
goto err_fault;
active = mtip_commands_active(port);
if (!active)
break;
} while (time_before(jiffies, to));
blk_mq_unquiesce_queue(port->dd->queue);
return active ? -EBUSY : 0;
err_fault:
blk_mq_unquiesce_queue(port->dd->queue);
return -EFAULT;
}
struct mtip_int_cmd {
int fis_len;
dma_addr_t buffer;
int buf_len;
u32 opts;
};
/*
* Execute an internal command and wait for the completion.
*
* @port Pointer to the port data structure.
* @fis Pointer to the FIS that describes the command.
* @fis_len Length in WORDS of the FIS.
* @buffer DMA accessible for command data.
* @buf_len Length, in bytes, of the data buffer.
* @opts Command header options, excluding the FIS length
* and the number of PRD entries.
* @timeout Time in ms to wait for the command to complete.
*
* return value
* 0 Command completed successfully.
* -EFAULT The buffer address is not correctly aligned.
* -EBUSY Internal command or other IO in progress.
* -EAGAIN Time out waiting for command to complete.
*/
static int mtip_exec_internal_command(struct mtip_port *port,
struct host_to_dev_fis *fis,
int fis_len,
dma_addr_t buffer,
int buf_len,
u32 opts,
unsigned long timeout)
{
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
struct request *rq;
struct mtip_int_cmd icmd = {
.fis_len = fis_len,
.buffer = buffer,
.buf_len = buf_len,
.opts = opts
};
int rv = 0;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
return -EFAULT;
}
if (mtip_check_surprise_removal(dd))
return -EFAULT;
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
if (IS_ERR(rq)) {
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
return -EFAULT;
}
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
if (fis->command == ATA_CMD_SEC_ERASE_PREP)
set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
blk_mq_free_request(rq);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
return -EBUSY;
}
}
/* Copy the command to the command table */
int_cmd = blk_mq_rq_to_pdu(rq);
int_cmd->icmd = &icmd;
memcpy(int_cmd->command, fis, fis_len*4);
rq->timeout = timeout;
/* insert request and run queue */
blk_execute_rq(rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
fis->command, int_cmd->status);
rv = -EIO;
if (mtip_check_surprise_removal(dd) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)) {
dev_err(&dd->pdev->dev,
"Internal command [%02X] wait returned due to SR\n",
fis->command);
rv = -ENXIO;
goto exec_ic_exit;
}
mtip_device_reset(dd); /* recover from timeout issue */
rv = -EAGAIN;
goto exec_ic_exit;
}
if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
& (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
rv = -ENXIO;
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
mtip_device_reset(dd);
rv = -EAGAIN;
}
}
exec_ic_exit:
/* Clear the allocated and active bits for the internal command. */
blk_mq_free_request(rq);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
if (rv >= 0 && mtip_pause_ncq(port, fis)) {
/* NCQ paused */
return rv;
}
wake_up_interruptible(&port->svc_wait);
return rv;
}
/*
* Byte-swap ATA ID strings.
*
* ATA identify data contains strings in byte-swapped 16-bit words.
* They must be swapped (on all architectures) to be usable as C strings.
* This function swaps bytes in-place.
*
* @buf The buffer location of the string
* @len The number of bytes to swap
*
* return value
* None
*/
static inline void ata_swap_string(u16 *buf, unsigned int len)
{
int i;
for (i = 0; i < (len/2); i++)
be16_to_cpus(&buf[i]);
}
static void mtip_set_timeout(struct driver_data *dd,
struct host_to_dev_fis *fis,
unsigned int *timeout, u8 erasemode)
{
switch (fis->command) {
case ATA_CMD_DOWNLOAD_MICRO:
*timeout = 120000; /* 2 minutes */
break;
case ATA_CMD_SEC_ERASE_UNIT:
case 0xFC:
if (erasemode)
*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
else
*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
break;
case ATA_CMD_STANDBYNOW1:
*timeout = 120000; /* 2 minutes */
break;
case 0xF7:
case 0xFA:
*timeout = 60000; /* 60 seconds */
break;
case ATA_CMD_SMART:
*timeout = 15000; /* 15 seconds */
break;
default:
*timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
break;
}
}
/*
* Request the device identity information.
*
* If a user space buffer is not specified, i.e. is NULL, the
* identify information is still read from the drive and placed
* into the identify data buffer (@e port->identify) in the
* port data structure.
* When the identify buffer contains valid identify information @e
* port->identify_valid is non-zero.
*
* @port Pointer to the port structure.
* @user_buffer A user space buffer where the identify data should be
* copied.
*
* return value
* 0 Command completed successfully.
* -EFAULT An error occurred while coping data to the user buffer.
* -1 Command failed.
*/
static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
{
int rv = 0;
struct host_to_dev_fis fis;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
return -EFAULT;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = ATA_CMD_ID_ATA;
/* Set the identify information as invalid. */
port->identify_valid = 0;
/* Clear the identify information. */
memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
/* Execute the command. */
if (mtip_exec_internal_command(port,
&fis,
5,
port->identify_dma,
sizeof(u16) * ATA_ID_WORDS,
0,
MTIP_INT_CMD_TIMEOUT_MS)
< 0) {
rv = -1;
goto out;
}
/*
* Perform any necessary byte-swapping. Yes, the kernel does in fact
* perform field-sensitive swapping on the string fields.
* See the kernel use of ata_id_string() for proof of this.
*/
#ifdef __LITTLE_ENDIAN
ata_swap_string(port->identify + 27, 40); /* model string*/
ata_swap_string(port->identify + 23, 8); /* firmware string*/
ata_swap_string(port->identify + 10, 20); /* serial# string*/
#else
{
int i;
for (i = 0; i < ATA_ID_WORDS; i++)
port->identify[i] = le16_to_cpu(port->identify[i]);
}
#endif
/* Check security locked state */
if (port->identify[128] & 0x4)
set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
else
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
/* Set the identify buffer as valid. */
port->identify_valid = 1;
if (user_buffer) {
if (copy_to_user(
user_buffer,
port->identify,
ATA_ID_WORDS * sizeof(u16))) {
rv = -EFAULT;
goto out;
}
}
out:
return rv;
}
/*
* Issue a standby immediate command to the device.
*
* @port Pointer to the port structure.
*
* return value
* 0 Command was executed successfully.
* -1 An error occurred while executing the command.
*/
static int mtip_standby_immediate(struct mtip_port *port)
{
int rv;
struct host_to_dev_fis fis;
unsigned long __maybe_unused start;
unsigned int timeout;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = ATA_CMD_STANDBYNOW1;
mtip_set_timeout(port->dd, &fis, &timeout, 0);
start = jiffies;
rv = mtip_exec_internal_command(port,
&fis,
5,
0,
0,
0,
timeout);
dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
jiffies_to_msecs(jiffies - start));
if (rv)
dev_warn(&port->dd->pdev->dev,
"STANDBY IMMEDIATE command failed.\n");
return rv;
}
/*
* Issue a READ LOG EXT command to the device.
*
* @port pointer to the port structure.
* @page page number to fetch
* @buffer pointer to buffer
* @buffer_dma dma address corresponding to @buffer
* @sectors page length to fetch, in sectors
*
* return value
* @rv return value from mtip_exec_internal_command()
*/
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
dma_addr_t buffer_dma, unsigned int sectors)
{
struct host_to_dev_fis fis;
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = ATA_CMD_READ_LOG_EXT;
fis.sect_count = sectors & 0xFF;
fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
fis.lba_low = page;
fis.lba_mid = 0;
fis.device = ATA_DEVICE_OBS;
memset(buffer, 0, sectors * ATA_SECT_SIZE);
return mtip_exec_internal_command(port,
&fis,
5,
buffer_dma,
sectors * ATA_SECT_SIZE,
0,
MTIP_INT_CMD_TIMEOUT_MS);
}
/*
* Issue a SMART READ DATA command to the device.
*
* @port pointer to the port structure.
* @buffer pointer to buffer
* @buffer_dma dma address corresponding to @buffer
*
* return value
* @rv return value from mtip_exec_internal_command()
*/
static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
dma_addr_t buffer_dma)
{
struct host_to_dev_fis fis;
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = ATA_CMD_SMART;
fis.features = 0xD0;
fis.sect_count = 1;
fis.lba_mid = 0x4F;
fis.lba_hi = 0xC2;
fis.device = ATA_DEVICE_OBS;
return mtip_exec_internal_command(port,
&fis,
5,
buffer_dma,
ATA_SECT_SIZE,
0,
15000);
}
/*
* Get the value of a smart attribute
*
* @port pointer to the port structure
* @id attribute number
* @attrib pointer to return attrib information corresponding to @id
*
* return value
* -EINVAL NULL buffer passed or unsupported attribute @id.
* -EPERM Identify data not valid, SMART not supported or not enabled
*/
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
struct smart_attr *attrib)
{
int rv, i;
struct smart_attr *pattr;
if (!attrib)
return -EINVAL;
if (!port->identify_valid) {
dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
return -EPERM;
}
if (!(port->identify[82] & 0x1)) {
dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
return -EPERM;
}
if (!(port->identify[85] & 0x1)) {
dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
return -EPERM;
}
memset(port->smart_buf, 0, ATA_SECT_SIZE);
rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
if (rv) {
dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
return rv;
}
pattr = (struct smart_attr *)(port->smart_buf + 2);
for (i = 0; i < 29; i++, pattr++)
if (pattr->attr_id == id) {
memcpy(attrib, pattr, sizeof(struct smart_attr));
break;
}
if (i == 29) {
dev_warn(&port->dd->pdev->dev,
"Query for invalid SMART attribute ID\n");
rv = -EINVAL;
}
return rv;
}
/*
* Get the drive capacity.
*
* @dd Pointer to the device data structure.
* @sectors Pointer to the variable that will receive the sector count.
*
* return value
* 1 Capacity was returned successfully.
* 0 The identify information is invalid.
*/
static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
{
struct mtip_port *port = dd->port;
u64 total, raw0, raw1, raw2, raw3;
raw0 = port->identify[100];
raw1 = port->identify[101];
raw2 = port->identify[102];
raw3 = port->identify[103];
total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
*sectors = total;
return (bool) !!port->identify_valid;
}
/*
* Display the identify command data.
*
* @port Pointer to the port data structure.
*
* return value
* None
*/
static void mtip_dump_identify(struct mtip_port *port)
{
sector_t sectors;
unsigned short revid;
char cbuf[42];
if (!port->identify_valid)
return;
strscpy(cbuf, (char *)(port->identify + 10), 21);
dev_info(&port->dd->pdev->dev,
"Serial No.: %s\n", cbuf);
strscpy(cbuf, (char *)(port->identify + 23), 9);
dev_info(&port->dd->pdev->dev,
"Firmware Ver.: %s\n", cbuf);
strscpy(cbuf, (char *)(port->identify + 27), 41);
dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
port->identify[128],
port->identify[128] & 0x4 ? "(LOCKED)" : "");
if (mtip_hw_get_capacity(port->dd, §ors))
dev_info(&port->dd->pdev->dev,
"Capacity: %llu sectors (%llu MB)\n",
(u64)sectors,
((u64)sectors) * ATA_SECT_SIZE >> 20);
pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
switch (revid & 0xFF) {
case 0x1:
strscpy(cbuf, "A0", 3);
break;
case 0x3:
strscpy(cbuf, "A2", 3);
break;
default:
strscpy(cbuf, "?", 2);
break;
}
dev_info(&port->dd->pdev->dev,
"Card Type: %s\n", cbuf);
}
/*
* Map the commands scatter list into the command table.
*
* @command Pointer to the command.
* @nents Number of scatter list entries.
*
* return value
* None
*/
static inline void fill_command_sg(struct driver_data *dd,
struct mtip_cmd *command,
int nents)
{
int n;
unsigned int dma_len;
struct mtip_cmd_sg *command_sg;
struct scatterlist *sg;
command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
for_each_sg(command->sg, sg, nents, n) {
dma_len = sg_dma_len(sg);
if (dma_len > 0x400000)
dev_err(&dd->pdev->dev,
"DMA segment length truncated\n");
command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF);
command_sg->dba = cpu_to_le32(sg_dma_address(sg));
command_sg->dba_upper =
cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
command_sg++;
}
}
/*
* @brief Execute a drive command.
*
* return value 0 The command completed successfully.
* return value -1 An error occurred while executing the command.
*/
static int exec_drive_task(struct mtip_port *port, u8 *command)
{
struct host_to_dev_fis fis;
struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
unsigned int to;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = command[0];
fis.features = command[1];
fis.sect_count = command[2];
fis.sector = command[3];
fis.cyl_low = command[4];
fis.cyl_hi = command[5];
fis.device = command[6] & ~0x10; /* Clear the dev bit*/
mtip_set_timeout(port->dd, &fis, &to, 0);
dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
__func__,
command[0],
command[1],
command[2],
command[3],
command[4],
command[5],
command[6]);
/* Execute the command. */
if (mtip_exec_internal_command(port,
&fis,
5,
0,
0,
0,
to) < 0) {
return -1;
}
command[0] = reply->command; /* Status*/
command[1] = reply->features; /* Error*/
command[4] = reply->cyl_low;
command[5] = reply->cyl_hi;
dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
__func__,
command[0],
command[1],
command[4],
command[5]);
return 0;
}
/*
* @brief Execute a drive command.
*
* @param port Pointer to the port data structure.
* @param command Pointer to the user specified command parameters.
* @param user_buffer Pointer to the user space buffer where read sector
* data should be copied.
*
* return value 0 The command completed successfully.
* return value -EFAULT An error occurred while copying the completion
* data to the user space buffer.
* return value -1 An error occurred while executing the command.
*/
static int exec_drive_command(struct mtip_port *port, u8 *command,
void __user *user_buffer)
{
struct host_to_dev_fis fis;
struct host_to_dev_fis *reply;
u8 *buf = NULL;
dma_addr_t dma_addr = 0;
int rv = 0, xfer_sz = command[3];
unsigned int to;
if (xfer_sz) {
if (!user_buffer)
return -EFAULT;
buf = dma_alloc_coherent(&port->dd->pdev->dev,
ATA_SECT_SIZE * xfer_sz,
&dma_addr,
GFP_KERNEL);
if (!buf) {
dev_err(&port->dd->pdev->dev,
"Memory allocation failed (%d bytes)\n",
ATA_SECT_SIZE * xfer_sz);
return -ENOMEM;
}
}
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = command[0];
fis.features = command[2];
fis.sect_count = command[3];
if (fis.command == ATA_CMD_SMART) {
fis.sector = command[1];
fis.cyl_low = 0x4F;
fis.cyl_hi = 0xC2;
}
mtip_set_timeout(port->dd, &fis, &to, 0);
if (xfer_sz)
reply = (port->rxfis + RX_FIS_PIO_SETUP);
else
reply = (port->rxfis + RX_FIS_D2H_REG);
dbg_printk(MTIP_DRV_NAME
" %s: User Command: cmd %x, sect %x, "
"feat %x, sectcnt %x\n",
__func__,
command[0],
command[1],
command[2],
command[3]);
/* Execute the command. */
if (mtip_exec_internal_command(port,
&fis,
5,
(xfer_sz ? dma_addr : 0),
(xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
to)
< 0) {
rv = -EFAULT;
goto exit_drive_command;
}
/* Collect the completion status. */
command[0] = reply->command; /* Status*/
command[1] = reply->features; /* Error*/
command[2] = reply->sect_count;
dbg_printk(MTIP_DRV_NAME
" %s: Completion Status: stat %x, "
"err %x, nsect %x\n",
__func__,
command[0],
command[1],
command[2]);
if (xfer_sz) {
if (copy_to_user(user_buffer,
buf,
ATA_SECT_SIZE * command[3])) {
rv = -EFAULT;
goto exit_drive_command;
}
}
exit_drive_command:
if (buf)
dma_free_coherent(&port->dd->pdev->dev,
ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
return rv;
}
/*
* Indicates whether a command has a single sector payload.
*
* @command passed to the device to perform the certain event.
* @features passed to the device to perform the certain event.
*
* return value
* 1 command is one that always has a single sector payload,
* regardless of the value in the Sector Count field.
* 0 otherwise
*
*/
static unsigned int implicit_sector(unsigned char command,
unsigned char features)
{
unsigned int rv = 0;
/* list of commands that have an implicit sector count of 1 */
switch (command) {
case ATA_CMD_SEC_SET_PASS:
case ATA_CMD_SEC_UNLOCK:
case ATA_CMD_SEC_ERASE_PREP:
case ATA_CMD_SEC_ERASE_UNIT:
case ATA_CMD_SEC_FREEZE_LOCK:
case ATA_CMD_SEC_DISABLE_PASS:
case ATA_CMD_PMP_READ:
case ATA_CMD_PMP_WRITE:
rv = 1;
break;
case ATA_CMD_SET_MAX:
if (features == ATA_SET_MAX_UNLOCK)
rv = 1;
break;
case ATA_CMD_SMART:
if ((features == ATA_SMART_READ_VALUES) ||
(features == ATA_SMART_READ_THRESHOLDS))
rv = 1;
break;
case ATA_CMD_CONF_OVERLAY:
if ((features == ATA_DCO_IDENTIFY) ||
(features == ATA_DCO_SET))
rv = 1;
break;
}
return rv;
}
/*
* Executes a taskfile
* See ide_taskfile_ioctl() for derivation
*/
static int exec_drive_taskfile(struct driver_data *dd,
void __user *buf,
ide_task_request_t *req_task,
int outtotal)
{
struct host_to_dev_fis fis;
struct host_to_dev_fis *reply;
u8 *outbuf = NULL;
u8 *inbuf = NULL;
dma_addr_t outbuf_dma = 0;
dma_addr_t inbuf_dma = 0;
dma_addr_t dma_buffer = 0;
int err = 0;
unsigned int taskin = 0;
unsigned int taskout = 0;
u8 nsect = 0;
unsigned int timeout;
unsigned int force_single_sector;
unsigned int transfer_size;
unsigned long task_file_data;
int intotal = outtotal + req_task->out_size;
int erasemode = 0;
taskout = req_task->out_size;
taskin = req_task->in_size;
/* 130560 = 512 * 0xFF*/
if (taskin > 130560 || taskout > 130560)
return -EINVAL;
if (taskout) {
outbuf = memdup_user(buf + outtotal, taskout);
if (IS_ERR(outbuf))
return PTR_ERR(outbuf);
outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
taskout, DMA_TO_DEVICE);
if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
err = -ENOMEM;
goto abort;
}
dma_buffer = outbuf_dma;
}
if (taskin) {
inbuf = memdup_user(buf + intotal, taskin);
if (IS_ERR(inbuf)) {
err = PTR_ERR(inbuf);
inbuf = NULL;
goto abort;
}
inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
taskin, DMA_FROM_DEVICE);
if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
err = -ENOMEM;
goto abort;
}
dma_buffer = inbuf_dma;
}
/* only supports PIO and non-data commands from this ioctl. */
switch (req_task->data_phase) {
case TASKFILE_OUT:
nsect = taskout / ATA_SECT_SIZE;
reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
break;
case TASKFILE_IN:
reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
break;
case TASKFILE_NO_DATA:
reply = (dd->port->rxfis + RX_FIS_D2H_REG);
break;
default:
err = -EINVAL;
goto abort;
}
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
fis.opts = 1 << 7;
fis.command = req_task->io_ports[7];
fis.features = req_task->io_ports[1];
fis.sect_count = req_task->io_ports[2];
fis.lba_low = req_task->io_ports[3];
fis.lba_mid = req_task->io_ports[4];
fis.lba_hi = req_task->io_ports[5];
/* Clear the dev bit*/
fis.device = req_task->io_ports[6] & ~0x10;
if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
req_task->in_flags.all =
IDE_TASKFILE_STD_IN_FLAGS |
(IDE_HOB_STD_IN_FLAGS << 8);
fis.lba_low_ex = req_task->hob_ports[3];
fis.lba_mid_ex = req_task->hob_ports[4];
fis.lba_hi_ex = req_task->hob_ports[5];
fis.features_ex = req_task->hob_ports[1];
fis.sect_cnt_ex = req_task->hob_ports[2];
} else {
req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
}
force_single_sector = implicit_sector(fis.command, fis.features);
if ((taskin || taskout) && (!fis.sect_count)) {
if (nsect)
fis.sect_count = nsect;
else {
if (!force_single_sector) {
dev_warn(&dd->pdev->dev,
"data movement but "
"sect_count is 0\n");
err = -EINVAL;
goto abort;
}
}
}
dbg_printk(MTIP_DRV_NAME
" %s: cmd %x, feat %x, nsect %x,"
" sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
" head/dev %x\n",
__func__,
fis.command,
fis.features,
fis.sect_count,
fis.lba_low,
fis.lba_mid,
fis.lba_hi,
fis.device);
/* check for erase mode support during secure erase.*/
if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
(outbuf[0] & MTIP_SEC_ERASE_MODE)) {
erasemode = 1;
}
mtip_set_timeout(dd, &fis, &timeout, erasemode);
/* Determine the correct transfer size.*/
if (force_single_sector)
transfer_size = ATA_SECT_SIZE;
else
transfer_size = ATA_SECT_SIZE * fis.sect_count;
/* Execute the command.*/
if (mtip_exec_internal_command(dd->port,
&fis,
5,
dma_buffer,
transfer_size,
0,
timeout) < 0) {
err = -EIO;
goto abort;
}
task_file_data = readl(dd->port->mmio+PORT_TFDATA);
if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
req_task->io_ports[7] = reply->control;
} else {
reply = dd->port->rxfis + RX_FIS_D2H_REG;
req_task->io_ports[7] = reply->command;
}
/* reclaim the DMA buffers.*/
if (inbuf_dma)
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
DMA_FROM_DEVICE);
if (outbuf_dma)
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
DMA_TO_DEVICE);
inbuf_dma = 0;
outbuf_dma = 0;
/* return the ATA registers to the caller.*/
req_task->io_ports[1] = reply->features;
req_task->io_ports[2] = reply->sect_count;
req_task->io_ports[3] = reply->lba_low;
req_task->io_ports[4] = reply->lba_mid;
req_task->io_ports[5] = reply->lba_hi;
req_task->io_ports[6] = reply->device;
if (req_task->out_flags.all & 1) {
req_task->hob_ports[3] = reply->lba_low_ex;
req_task->hob_ports[4] = reply->lba_mid_ex;
req_task->hob_ports[5] = reply->lba_hi_ex;
req_task->hob_ports[1] = reply->features_ex;
req_task->hob_ports[2] = reply->sect_cnt_ex;
}
dbg_printk(MTIP_DRV_NAME
" %s: Completion: stat %x,"
"err %x, sect_cnt %x, lbalo %x,"
"lbamid %x, lbahi %x, dev %x\n",
__func__,
req_task->io_ports[7],
req_task->io_ports[1],
req_task->io_ports[2],
req_task->io_ports[3],
req_task->io_ports[4],
req_task->io_ports[5],
req_task->io_ports[6]);
if (taskout) {
if (copy_to_user(buf + outtotal, outbuf, taskout)) {
err = -EFAULT;
goto abort;
}
}
if (taskin) {
if (copy_to_user(buf + intotal, inbuf, taskin)) {
err = -EFAULT;
goto abort;
}
}
abort:
if (inbuf_dma)
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
DMA_FROM_DEVICE);
if (outbuf_dma)
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
DMA_TO_DEVICE);
kfree(outbuf);
kfree(inbuf);
return err;
}
/*
* Handle IOCTL calls from the Block Layer.
*
* This function is called by the Block Layer when it receives an IOCTL
* command that it does not understand. If the IOCTL command is not supported
* this function returns -ENOTTY.
*
* @dd Pointer to the driver data structure.
* @cmd IOCTL command passed from the Block Layer.
* @arg IOCTL argument passed from the Block Layer.
*
* return value
* 0 The IOCTL completed successfully.
* -ENOTTY The specified command is not supported.
* -EFAULT An error occurred copying data to a user space buffer.
* -EIO An error occurred while executing the command.
*/
static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case HDIO_GET_IDENTITY:
{
if (copy_to_user((void __user *)arg, dd->port->identify,
sizeof(u16) * ATA_ID_WORDS))
return -EFAULT;
break;
}
case HDIO_DRIVE_CMD:
{
u8 drive_command[4];
/* Copy the user command info to our buffer. */
if (copy_from_user(drive_command,
(void __user *) arg,
sizeof(drive_command)))
return -EFAULT;
/* Execute the drive command. */
if (exec_drive_command(dd->port,
drive_command,
(void __user *) (arg+4)))
return -EIO;
/* Copy the status back to the users buffer. */
if (copy_to_user((void __user *) arg,
drive_command,
sizeof(drive_command)))
return -EFAULT;
break;
}
case HDIO_DRIVE_TASK:
{
u8 drive_command[7];
/* Copy the user command info to our buffer. */
if (copy_from_user(drive_command,
(void __user *) arg,
sizeof(drive_command)))
return -EFAULT;
/* Execute the drive command. */
if (exec_drive_task(dd->port, drive_command))
return -EIO;
/* Copy the status back to the users buffer. */
if (copy_to_user((void __user *) arg,
drive_command,
sizeof(drive_command)))
return -EFAULT;
break;
}
case HDIO_DRIVE_TASKFILE: {
ide_task_request_t req_task;
int ret, outtotal;
if (copy_from_user(&req_task, (void __user *) arg,
sizeof(req_task)))
return -EFAULT;
outtotal = sizeof(req_task);
ret = exec_drive_taskfile(dd, (void __user *) arg,
&req_task, outtotal);
if (copy_to_user((void __user *) arg, &req_task,
sizeof(req_task)))
return -EFAULT;
return ret;
}
default:
return -EINVAL;
}
return 0;
}
/*
* Submit an IO to the hw
*
* This function is called by the block layer to issue an io
* to the device. Upon completion, the callback function will
* be called with the data parameter passed as the callback data.
*
* @dd Pointer to the driver data structure.
* @start First sector to read.
* @nsect Number of sectors to read.
* @tag The tag of this read command.
* @callback Pointer to the function that should be called
* when the read completes.
* @data Callback data passed to the callback function
* when the read completes.
* @dir Direction (read or write)
*
* return value
* None
*/
static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
struct mtip_cmd *command,
struct blk_mq_hw_ctx *hctx)
{
struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
u64 start = blk_rq_pos(rq);
unsigned int nsect = blk_rq_sectors(rq);
unsigned int nents;
/* Map the scatter list for DMA access */
nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
prefetch(&port->flags);
command->scatter_ents = nents;
/*
* The number of retries for this command before it is
* reported as a failure to the upper layers.
*/
command->retries = MTIP_MAX_RETRIES;
/* Fill out fis */
fis = command->command;
fis->type = 0x27;
fis->opts = 1 << 7;
if (dma_dir == DMA_FROM_DEVICE)
fis->command = ATA_CMD_FPDMA_READ;
else
fis->command = ATA_CMD_FPDMA_WRITE;
fis->lba_low = start & 0xFF;
fis->lba_mid = (start >> 8) & 0xFF;
fis->lba_hi = (start >> 16) & 0xFF;
fis->lba_low_ex = (start >> 24) & 0xFF;
fis->lba_mid_ex = (start >> 32) & 0xFF;
fis->lba_hi_ex = (start >> 40) & 0xFF;
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
fis->sect_cnt_ex = 0;
fis->control = 0;
fis->res2 = 0;
fis->res3 = 0;
fill_command_sg(dd, command, nents);
if (unlikely(command->unaligned))
fis->device |= 1 << 7;
/* Populate the command header */
hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF);
if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16);
hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH);
hdr->byte_count = 0;
command->direction = dma_dir;
/*
* To prevent this command from being issued
* if an internal command is in progress or error handling is active.
*/
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
return;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, rq->tag);
}
/*
* Sysfs status dump.
*
* @dev Pointer to the device structure, passed by the kernrel.
* @attr Pointer to the device_attribute structure passed by the kernel.
* @buf Pointer to the char buffer that will receive the stats info.
*
* return value
* The size, in bytes, of the data copied into buf.
*/
static ssize_t mtip_hw_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct driver_data *dd = dev_to_disk(dev)->private_data;
int size = 0;
if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
size += sprintf(buf, "%s", "thermal_shutdown\n");
else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
size += sprintf(buf, "%s", "write_protect\n");
else
size += sprintf(buf, "%s", "online\n");
return size;
}
static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL);
static struct attribute *mtip_disk_attrs[] = {
&dev_attr_status.attr,
NULL,
};
static const struct attribute_group mtip_disk_attr_group = {
.attrs = mtip_disk_attrs,
};
static const struct attribute_group *mtip_disk_attr_groups[] = {
&mtip_disk_attr_group,
NULL,
};
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
struct driver_data *dd = (struct driver_data *)f->private_data;
char *buf;
u32 group_allocated;
int size = *offset;
int n, rv = 0;
if (!len || size)
return 0;
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->s_active[n]));
size += sprintf(&buf[size], "]\n");
size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->cmd_issue[n]));
size += sprintf(&buf[size], "]\n");
size += sprintf(&buf[size], "H/ Completed : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->completed[n]));
size += sprintf(&buf[size], "]\n");
size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
readl(dd->port->mmio + PORT_IRQ_STAT));
size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
readl(dd->mmio + HOST_IRQ_STAT));
size += sprintf(&buf[size], "\n");
size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
group_allocated =
dd->port->cmds_to_issue[n/2] >> (32*(n&1));
else
group_allocated = dd->port->cmds_to_issue[n];
size += sprintf(&buf[size], "%08X ", group_allocated);
}
size += sprintf(&buf[size], "]\n");
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
rv = -EFAULT;
kfree(buf);
return rv ? rv : *offset;
}
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
struct driver_data *dd = (struct driver_data *)f->private_data;
char *buf;
int size = *offset;
int rv = 0;
if (!len || size)
return 0;
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
dd->dd_flag);
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
rv = -EFAULT;
kfree(buf);
return rv ? rv : *offset;
}
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_registers,
.llseek = no_llseek,
};
static const struct file_operations mtip_flags_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_flags,
.llseek = no_llseek,
};
static int mtip_hw_debugfs_init(struct driver_data *dd)
{
if (!dfs_parent)
return -1;
dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
if (IS_ERR_OR_NULL(dd->dfs_node)) {
dev_warn(&dd->pdev->dev,
"Error creating node %s under debugfs\n",
dd->disk->disk_name);
dd->dfs_node = NULL;
return -1;
}
debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
debugfs_create_file("registers", 0444, dd->dfs_node, dd,
&mtip_regs_fops);
return 0;
}
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
debugfs_remove_recursive(dd->dfs_node);
}
/*
* Perform any init/resume time hardware setup
*
* @dd Pointer to the driver data structure.
*
* return value
* None
*/
static inline void hba_setup(struct driver_data *dd)
{
u32 hwdata;
hwdata = readl(dd->mmio + HOST_HSORG);
/* interrupt bug workaround: use only 1 IS bit.*/
writel(hwdata |
HSORG_DISABLE_SLOTGRP_INTR |
HSORG_DISABLE_SLOTGRP_PXIS,
dd->mmio + HOST_HSORG);
}
static int mtip_device_unaligned_constrained(struct driver_data *dd)
{
return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
}
/*
* Detect the details of the product, and store anything needed
* into the driver data structure. This includes product type and
* version and number of slot groups.
*
* @dd Pointer to the driver data structure.
*
* return value
* None
*/
static void mtip_detect_product(struct driver_data *dd)
{
u32 hwdata;
unsigned int rev, slotgroups;
/*
* HBA base + 0xFC [15:0] - vendor-specific hardware interface
* info register:
* [15:8] hardware/software interface rev#
* [ 3] asic-style interface
* [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
*/
hwdata = readl(dd->mmio + HOST_HSORG);
dd->product_type = MTIP_PRODUCT_UNKNOWN;
dd->slot_groups = 1;
if (hwdata & 0x8) {
dd->product_type = MTIP_PRODUCT_ASICFPGA;
rev = (hwdata & HSORG_HWREV) >> 8;
slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
dev_info(&dd->pdev->dev,
"ASIC-FPGA design, HS rev 0x%x, "
"%i slot groups [%i slots]\n",
rev,
slotgroups,
slotgroups * 32);
if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
dev_warn(&dd->pdev->dev,
"Warning: driver only supports "
"%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
slotgroups = MTIP_MAX_SLOT_GROUPS;
}
dd->slot_groups = slotgroups;
return;
}
dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
}
/*
* Blocking wait for FTL rebuild to complete
*
* @dd Pointer to the DRIVER_DATA structure.
*
* return value
* 0 FTL rebuild completed successfully
* -EFAULT FTL rebuild error/timeout/interruption
*/
static int mtip_ftl_rebuild_poll(struct driver_data *dd)
{
unsigned long timeout, cnt = 0, start;
dev_warn(&dd->pdev->dev,
"FTL rebuild in progress. Polling for completion.\n");
start = jiffies;
timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
do {
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)))
return -EFAULT;
if (mtip_check_surprise_removal(dd))
return -EFAULT;
if (mtip_get_identify(dd->port, NULL) < 0)
return -EFAULT;
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
ssleep(1);
/* Print message every 3 minutes */
if (cnt++ >= 180) {
dev_warn(&dd->pdev->dev,
"FTL rebuild in progress (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
cnt = 0;
}
} else {
dev_warn(&dd->pdev->dev,
"FTL rebuild complete (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
mtip_block_initialize(dd);
return 0;
}
} while (time_before(jiffies, timeout));
/* Check for timeout */
dev_err(&dd->pdev->dev,
"Timed out waiting for FTL rebuild to complete (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
return -EFAULT;
}
static void mtip_softirq_done_fn(struct request *rq)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct driver_data *dd = rq->q->queuedata;
/* Unmap the DMA scatter list entries */
dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
cmd->direction);
if (unlikely(cmd->unaligned))
atomic_inc(&dd->port->cmd_slot_unal);
blk_mq_end_request(rq, cmd->status);
}
static bool mtip_abort_cmd(struct request *req, void *data)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data;
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
clear_bit(req->tag, dd->port->cmds_to_issue);
cmd->status = BLK_STS_IOERR;
mtip_softirq_done_fn(req);
return true;
}
static bool mtip_queue_cmd(struct request *req, void *data)
{
struct driver_data *dd = data;
set_bit(req->tag, dd->port->cmds_to_issue);
blk_abort_request(req);
return true;
}
/*
* service thread to issue queued commands
*
* @data Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_service_thread(void *data)
{
struct driver_data *dd = (struct driver_data *)data;
unsigned long slot, slot_start, slot_wrap, to;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
while (1) {
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
goto st_out;
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
/*
* the condition is to check neither an internal command is
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
(port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
goto st_out;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)))
goto st_out;
set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
restart_eh:
/* Demux bits: start with error handling */
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
mtip_handle_tfe(dd);
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
}
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
goto restart_eh;
if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
to = jiffies + msecs_to_jiffies(5000);
do {
mdelay(100);
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
if (atomic_read(&dd->irq_workers_active) != 0)
dev_warn(&dd->pdev->dev,
"Completion workers still active!");
blk_mq_quiesce_queue(dd->queue);
blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
if (mtip_device_reset(dd))
blk_mq_tagset_busy_iter(&dd->tags,
mtip_abort_cmd, dd);
clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
blk_mq_unquiesce_queue(dd->queue);
}
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
slot_start = num_cmd_slots;
slot_wrap = 0;
while (1) {
slot = find_next_bit(port->cmds_to_issue,
num_cmd_slots, slot);
if (slot_wrap == 1) {
if ((slot_start >= slot) ||
(slot >= num_cmd_slots))
break;
}
if (unlikely(slot_start == num_cmd_slots))
slot_start = slot;
if (unlikely(slot == num_cmd_slots)) {
slot = 1;
slot_wrap = 1;
continue;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, slot);
clear_bit(slot, port->cmds_to_issue);
}
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
}
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
if (mtip_ftl_rebuild_poll(dd) == 0)
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
}
st_out:
return 0;
}
/*
* DMA region teardown
*
* @dd Pointer to driver_data structure
*
* return value
* None
*/
static void mtip_dma_free(struct driver_data *dd)
{
struct mtip_port *port = dd->port;
if (port->block1)
dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
port->block1, port->block1_dma);
if (port->command_list) {
dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
port->command_list, port->command_list_dma);
}
}
/*
* DMA region setup
*
* @dd Pointer to driver_data structure
*
* return value
* -ENOMEM Not enough free DMA region space to initialize driver
*/
static int mtip_dma_alloc(struct driver_data *dd)
{
struct mtip_port *port = dd->port;
/* Allocate dma memory for RX Fis, Identify, and Sector Buffer */
port->block1 =
dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
&port->block1_dma, GFP_KERNEL);
if (!port->block1)
return -ENOMEM;
/* Allocate dma memory for command list */
port->command_list =
dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
&port->command_list_dma, GFP_KERNEL);
if (!port->command_list) {
dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
port->block1, port->block1_dma);
port->block1 = NULL;
port->block1_dma = 0;
return -ENOMEM;
}
/* Setup all pointers into first DMA region */
port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET;
port->identify = port->block1 + AHCI_IDFY_OFFSET;
port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET;
port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET;
port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET;
port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
return 0;
}
static int mtip_hw_get_identify(struct driver_data *dd)
{
struct smart_attr attr242;
unsigned char *buf;
int rv;
if (mtip_get_identify(dd->port, NULL) < 0)
return -EFAULT;
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
return MTIP_FTL_REBUILD_MAGIC;
}
mtip_dump_identify(dd->port);
/* check write protect, over temp and rebuild statuses */
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
dd->port->log_buf,
dd->port->log_buf_dma, 1);
if (rv) {
dev_warn(&dd->pdev->dev,
"Error in READ LOG EXT (10h) command\n");
/* non-critical error, don't fail the load */
} else {
buf = (unsigned char *)dd->port->log_buf;
if (buf[259] & 0x1) {
dev_info(&dd->pdev->dev,
"Write protect bit is set.\n");
set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
}
if (buf[288] == 0xF7) {
dev_info(&dd->pdev->dev,
"Exceeded Tmax, drive in thermal shutdown.\n");
set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
}
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
/* get write protect progess */
memset(&attr242, 0, sizeof(struct smart_attr));
if (mtip_get_smart_attr(dd->port, 242, &attr242))
dev_warn(&dd->pdev->dev,
"Unable to check write protect progress\n");
else
dev_info(&dd->pdev->dev,
"Write protect progress: %u%% (%u blocks)\n",
attr242.cur, le32_to_cpu(attr242.data));
return rv;
}
/*
* Called once for each card.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 on success, else an error code.
*/
static int mtip_hw_init(struct driver_data *dd)
{
int i;
int rv;
unsigned long timeout, timetaken;
dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
mtip_detect_product(dd);
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
rv = -EIO;
goto out1;
}
hba_setup(dd);
dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
dd->numa_node);
if (!dd->port)
return -ENOMEM;
/* Continue workqueue setup */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
dd->work[i].port = dd->port;
/* Enable unaligned IO constraints for some devices */
if (mtip_device_unaligned_constrained(dd))
dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
else
dd->unal_qdepth = 0;
atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
/* Spinlock to prevent concurrent issue */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
spin_lock_init(&dd->port->cmd_issue_lock[i]);
/* Set the port mmio base address. */
dd->port->mmio = dd->mmio + PORT_OFFSET;
dd->port->dd = dd;
/* DMA allocations */
rv = mtip_dma_alloc(dd);
if (rv < 0)
goto out1;
/* Setup the pointers to the extended s_active and CI registers. */
for (i = 0; i < dd->slot_groups; i++) {
dd->port->s_active[i] =
dd->port->mmio + i*0x80 + PORT_SCR_ACT;
dd->port->cmd_issue[i] =
dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
dd->port->completed[i] =
dd->port->mmio + i*0x80 + PORT_SDBV;
}
timetaken = jiffies;
timeout = jiffies + msecs_to_jiffies(30000);
while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
time_before(jiffies, timeout)) {
mdelay(100);
}
if (unlikely(mtip_check_surprise_removal(dd))) {
timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev,
"Surprise removal detected at %u ms\n",
jiffies_to_msecs(timetaken));
rv = -ENODEV;
goto out2 ;
}
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev,
"Removal detected at %u ms\n",
jiffies_to_msecs(timetaken));
rv = -EFAULT;
goto out2;
}
/* Conditionally reset the HBA. */
if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
if (mtip_hba_reset(dd) < 0) {
dev_err(&dd->pdev->dev,
"Card did not reset within timeout\n");
rv = -EIO;
goto out2;
}
} else {
/* Clear any pending interrupts on the HBA */
writel(readl(dd->mmio + HOST_IRQ_STAT),
dd->mmio + HOST_IRQ_STAT);
}
mtip_init_port(dd->port);
mtip_start_port(dd->port);
/* Setup the ISR and enable interrupts. */
rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
dev_driver_string(&dd->pdev->dev), dd);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate IRQ %d\n", dd->pdev->irq);
goto out2;
}
irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
/* Enable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd->mmio + HOST_CTL);
init_waitqueue_head(&dd->port->svc_wait);
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
rv = -EFAULT;
goto out3;
}
return rv;
out3:
/* Disable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
/* Release the IRQ. */
irq_set_affinity_hint(dd->pdev->irq, NULL);
free_irq(dd->pdev->irq, dd);
out2:
mtip_deinit_port(dd->port);
mtip_dma_free(dd);
out1:
/* Free the memory allocated for the for structure. */
kfree(dd->port);
return rv;
}
static int mtip_standby_drive(struct driver_data *dd)
{
int rv = 0;
if (dd->sr || !dd->port)
return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
!test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
rv = mtip_standby_immediate(dd->port);
if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
}
return rv;
}
/*
* Called to deinitialize an interface.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_hw_exit(struct driver_data *dd)
{
if (!dd->sr) {
/* de-initialize the port. */
mtip_deinit_port(dd->port);
/* Disable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
}
/* Release the IRQ. */
irq_set_affinity_hint(dd->pdev->irq, NULL);
free_irq(dd->pdev->irq, dd);
msleep(1000);
/* Free dma regions */
mtip_dma_free(dd);
/* Free the memory allocated for the for structure. */
kfree(dd->port);
dd->port = NULL;
return 0;
}
/*
* Issue a Standby Immediate command to the device.
*
* This function is called by the Block Layer just before the
* system powers off during a shutdown.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_hw_shutdown(struct driver_data *dd)
{
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
mtip_standby_drive(dd);
return 0;
}
/*
* Suspend function
*
* This function is called by the Block Layer just before the
* system hibernates.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 Suspend was successful
* -EFAULT Suspend was not successful
*/
static int mtip_hw_suspend(struct driver_data *dd)
{
/*
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
}
/* Disable interrupts on the HBA.*/
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
mtip_deinit_port(dd->port);
return 0;
}
/*
* Resume function
*
* This function is called by the Block Layer as the
* system resumes.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 Resume was successful
* -EFAULT Resume was not successful
*/
static int mtip_hw_resume(struct driver_data *dd)
{
/* Perform any needed hardware setup steps */
hba_setup(dd);
/* Reset the HBA */
if (mtip_hba_reset(dd) != 0) {
dev_err(&dd->pdev->dev,
"Unable to reset the HBA\n");
return -EFAULT;
}
/*
* Enable the port, DMA engine, and FIS reception specific
* h/w in controller.
*/
mtip_init_port(dd->port);
mtip_start_port(dd->port);
/* Enable interrupts on the HBA.*/
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd->mmio + HOST_CTL);
return 0;
}
/*
* Helper function for reusing disk name
* upon hot insertion.
*/
static int rssd_disk_name_format(char *prefix,
int index,
char *buf,
int buflen)
{
const int base = 'z' - 'a' + 1;
char *begin = buf + strlen(prefix);
char *end = buf + buflen;
char *p;
int unit;
p = end - 1;
*p = '\0';
unit = base;
do {
if (p == begin)
return -EINVAL;
*--p = 'a' + (index % unit);
index = (index / unit) - 1;
} while (index >= 0);
memmove(begin, p, end - p);
memcpy(buf, prefix, strlen(prefix));
return 0;
}
/*
* Block layer IOCTL handler.
*
* @dev Pointer to the block_device structure.
* @mode ignored
* @cmd IOCTL command passed from the user application.
* @arg Argument passed from the user application.
*
* return value
* 0 IOCTL completed successfully.
* -ENOTTY IOCTL not supported or invalid driver data
* structure pointer.
*/
static int mtip_block_ioctl(struct block_device *dev,
blk_mode_t mode,
unsigned cmd,
unsigned long arg)
{
struct driver_data *dd = dev->bd_disk->private_data;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!dd)
return -ENOTTY;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
return -ENOTTY;
switch (cmd) {
case BLKFLSBUF:
return -ENOTTY;
default:
return mtip_hw_ioctl(dd, cmd, arg);
}
}
#ifdef CONFIG_COMPAT
/*
* Block layer compat IOCTL handler.
*
* @dev Pointer to the block_device structure.
* @mode ignored
* @cmd IOCTL command passed from the user application.
* @arg Argument passed from the user application.
*
* return value
* 0 IOCTL completed successfully.
* -ENOTTY IOCTL not supported or invalid driver data
* structure pointer.
*/
static int mtip_block_compat_ioctl(struct block_device *dev,
blk_mode_t mode,
unsigned cmd,
unsigned long arg)
{
struct driver_data *dd = dev->bd_disk->private_data;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!dd)
return -ENOTTY;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
return -ENOTTY;
switch (cmd) {
case BLKFLSBUF:
return -ENOTTY;
case HDIO_DRIVE_TASKFILE: {
struct mtip_compat_ide_task_request_s __user *compat_req_task;
ide_task_request_t req_task;
int compat_tasksize, outtotal, ret;
compat_tasksize =
sizeof(struct mtip_compat_ide_task_request_s);
compat_req_task =
(struct mtip_compat_ide_task_request_s __user *) arg;
if (copy_from_user(&req_task, (void __user *) arg,
compat_tasksize - (2 * sizeof(compat_long_t))))
return -EFAULT;
if (get_user(req_task.out_size, &compat_req_task->out_size))
return -EFAULT;
if (get_user(req_task.in_size, &compat_req_task->in_size))
return -EFAULT;
outtotal = sizeof(struct mtip_compat_ide_task_request_s);
ret = exec_drive_taskfile(dd, (void __user *) arg,
&req_task, outtotal);
if (copy_to_user((void __user *) arg, &req_task,
compat_tasksize -
(2 * sizeof(compat_long_t))))
return -EFAULT;
if (put_user(req_task.out_size, &compat_req_task->out_size))
return -EFAULT;
if (put_user(req_task.in_size, &compat_req_task->in_size))
return -EFAULT;
return ret;
}
default:
return mtip_hw_ioctl(dd, cmd, arg);
}
}
#endif
/*
* Obtain the geometry of the device.
*
* You may think that this function is obsolete, but some applications,
* fdisk for example still used CHS values. This function describes the
* device as having 224 heads and 56 sectors per cylinder. These values are
* chosen so that each cylinder is aligned on a 4KB boundary. Since a
* partition is described in terms of a start and end cylinder this means
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
* @dev Pointer to the block_device strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
static int mtip_block_getgeo(struct block_device *dev,
struct hd_geometry *geo)
{
struct driver_data *dd = dev->bd_disk->private_data;
sector_t capacity;
if (!dd)
return -ENOTTY;
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
"Could not get drive capacity.\n");
return -ENOTTY;
}
geo->heads = 224;
geo->sectors = 56;
sector_div(capacity, (geo->heads * geo->sectors));
geo->cylinders = capacity;
return 0;
}
static void mtip_block_free_disk(struct gendisk *disk)
{
struct driver_data *dd = disk->private_data;
ida_free(&rssd_index_ida, dd->index);
kfree(dd);
}
/*
* Block device operation function.
*
* This structure contains pointers to the functions required by the block
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
#endif
.getgeo = mtip_block_getgeo,
.free_disk = mtip_block_free_disk,
.owner = THIS_MODULE
};
static inline bool is_se_active(struct driver_data *dd)
{
if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
if (dd->port->ic_pause_timer) {
unsigned long to = dd->port->ic_pause_timer +
msecs_to_jiffies(1000);
if (time_after(jiffies, to)) {
clear_bit(MTIP_PF_SE_ACTIVE_BIT,
&dd->port->flags);
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
dd->port->ic_pause_timer = 0;
wake_up_interruptible(&dd->port->svc_wait);
return false;
}
}
return true;
}
return false;
}
static inline bool is_stopped(struct driver_data *dd, struct request *rq)
{
if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
return false;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
return true;
if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
return true;
if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
rq_data_dir(rq))
return true;
if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
return true;
if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
return true;
return false;
}
static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct driver_data *dd = hctx->queue->queuedata;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
return false;
/*
* If unaligned depth must be limited on this controller, mark it
* as unaligned if the IO isn't on a 4k boundary (start of length).
*/
if (blk_rq_sectors(rq) <= 64) {
if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
cmd->unaligned = 1;
}
if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
return true;
return false;
}
static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct driver_data *dd = hctx->queue->queuedata;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct mtip_int_cmd *icmd = cmd->icmd;
struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
struct mtip_cmd_sg *command_sg;
if (mtip_commands_active(dd->port))
return BLK_STS_DEV_RESOURCE;
hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16);
/* Populate the SG list */
hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len);
if (icmd->buf_len) {
command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
command_sg->dba_upper =
cpu_to_le32((icmd->buffer >> 16) >> 16);
hdr->opts |= cpu_to_le32((1 << 16));
}
/* Populate the command header */
hdr->byte_count = 0;
blk_mq_start_request(rq);
mtip_issue_non_ncq_command(dd->port, rq->tag);
return 0;
}
static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct driver_data *dd = hctx->queue->queuedata;
struct request *rq = bd->rq;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (blk_rq_is_passthrough(rq))
return mtip_issue_reserved_cmd(hctx, rq);
if (unlikely(mtip_check_unal_depth(hctx, rq)))
return BLK_STS_DEV_RESOURCE;
if (is_se_active(dd) || is_stopped(dd, rq))
return BLK_STS_IOERR;
blk_mq_start_request(rq);
mtip_hw_submit_io(dd, rq, cmd, hctx);
return BLK_STS_OK;
}
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
struct driver_data *dd = set->driver_data;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (!cmd->command)
return;
dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
cmd->command_dma);
}
static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct driver_data *dd = set->driver_data;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
if (!cmd->command)
return -ENOMEM;
sg_init_table(cmd->sg, MTIP_MAX_SG);
return 0;
}
static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req)
{
struct driver_data *dd = req->q->queuedata;
if (blk_mq_is_reserved_rq(req)) {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_TIMEOUT;
blk_mq_complete_request(req);
return BLK_EH_DONE;
}
if (test_bit(req->tag, dd->port->cmds_to_issue))
goto exit_handler;
if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
goto exit_handler;
wake_up_interruptible(&dd->port->svc_wait);
exit_handler:
return BLK_EH_RESET_TIMER;
}
static const struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
.complete = mtip_softirq_done_fn,
.timeout = mtip_cmd_timeout,
};
/*
* Block layer initialization function.
*
* This function is called once by the PCI layer for each P320
* device that is connected to the system.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 on success else an error code.
*/
static int mtip_block_initialize(struct driver_data *dd)
{
int rv = 0, wait_for_rebuild = 0;
sector_t capacity;
unsigned int index = 0;
if (dd->disk)
goto skip_create_disk; /* hw init done, before rebuild */
if (mtip_hw_init(dd)) {
rv = -EINVAL;
goto protocol_init_error;
}
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
rv = blk_mq_alloc_tag_set(&dd->tags);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
goto block_queue_alloc_tag_error;
}
dd->disk = blk_mq_alloc_disk(&dd->tags, dd);
if (IS_ERR(dd->disk)) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
rv = -ENOMEM;
goto block_queue_alloc_init_error;
}
dd->queue = dd->disk->queue;
rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
if (rv < 0)
goto ida_get_error;
index = rv;
rv = rssd_disk_name_format("rssd",
index,
dd->disk->disk_name,
DISK_NAME_LEN);
if (rv)
goto disk_index_error;
dd->disk->major = dd->major;
dd->disk->first_minor = index * MTIP_MAX_MINORS;
dd->disk->minors = MTIP_MAX_MINORS;
dd->disk->fops = &mtip_block_ops;
dd->disk->private_data = dd;
dd->index = index;
mtip_hw_debugfs_init(dd);
skip_create_disk:
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_get_identify(dd);
if (wait_for_rebuild < 0) {
dev_err(&dd->pdev->dev,
"Protocol layer initialization failed\n");
rv = -EINVAL;
goto init_hw_cmds_error;
}
/*
* if rebuild pending, start the service thread, and delay the block
* queue creation and device_add_disk()
*/
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
goto start_service_thread;
/* Set device limits. */
blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
blk_queue_io_min(dd->queue, 4096);
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
"Could not read drive capacity\n");
rv = -EIO;
goto read_capacity_error;
}
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
rv = device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
if (rv)
goto read_capacity_error;
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
return rv; /* service thread created for handling rebuild */
}
start_service_thread:
dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
dd, dd->numa_node,
"mtip_svc_thd_%02d", index);
if (IS_ERR(dd->mtip_svc_handler)) {
dev_err(&dd->pdev->dev, "service thread failed to start\n");
dd->mtip_svc_handler = NULL;
rv = -EFAULT;
goto kthread_run_error;
}
wake_up_process(dd->mtip_svc_handler);
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
rv = wait_for_rebuild;
return rv;
kthread_run_error:
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
read_capacity_error:
init_hw_cmds_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
ida_free(&rssd_index_ida, index);
ida_get_error:
put_disk(dd->disk);
block_queue_alloc_init_error:
blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_tag_error:
mtip_hw_exit(dd); /* De-initialize the protocol layer. */
protocol_init_error:
return rv;
}
/*
* Function called by the PCI layer when just before the
* machine shuts down.
*
* If a protocol layer shutdown function is present it will be called
* by this function.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_block_shutdown(struct driver_data *dd)
{
mtip_hw_shutdown(dd);
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
blk_mq_free_tag_set(&dd->tags);
put_disk(dd->disk);
return 0;
}
static int mtip_block_suspend(struct driver_data *dd)
{
dev_info(&dd->pdev->dev,
"Suspending %s ...\n", dd->disk->disk_name);
mtip_hw_suspend(dd);
return 0;
}
static int mtip_block_resume(struct driver_data *dd)
{
dev_info(&dd->pdev->dev, "Resuming %s ...\n",
dd->disk->disk_name);
mtip_hw_resume(dd);
return 0;
}
static void drop_cpu(int cpu)
{
cpu_use[cpu]--;
}
static int get_least_used_cpu_on_node(int node)
{
int cpu, least_used_cpu, least_cnt;
const struct cpumask *node_mask;
node_mask = cpumask_of_node(node);
least_used_cpu = cpumask_first(node_mask);
least_cnt = cpu_use[least_used_cpu];
cpu = least_used_cpu;
for_each_cpu(cpu, node_mask) {
if (cpu_use[cpu] < least_cnt) {
least_used_cpu = cpu;
least_cnt = cpu_use[cpu];
}
}
cpu_use[least_used_cpu]++;
return least_used_cpu;
}
/* Helper for selecting a node in round robin mode */
static inline int mtip_get_next_rr_node(void)
{
static int next_node = NUMA_NO_NODE;
if (next_node == NUMA_NO_NODE) {
next_node = first_online_node;
return next_node;
}
next_node = next_online_node(next_node);
if (next_node == MAX_NUMNODES)
next_node = first_online_node;
return next_node;
}
static DEFINE_HANDLER(0);
static DEFINE_HANDLER(1);
static DEFINE_HANDLER(2);
static DEFINE_HANDLER(3);
static DEFINE_HANDLER(4);
static DEFINE_HANDLER(5);
static DEFINE_HANDLER(6);
static DEFINE_HANDLER(7);
static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
{
unsigned short pcie_dev_ctrl;
if (pci_is_pcie(pdev)) {
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_dev_ctrl);
if (pcie_dev_ctrl & PCI_EXP_DEVCTL_NOSNOOP_EN ||
pcie_dev_ctrl & PCI_EXP_DEVCTL_RELAX_EN) {
dev_info(&dd->pdev->dev,
"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
pdev->vendor, pdev->device);
pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
PCI_EXP_DEVCTL_RELAX_EN);
pcie_capability_write_word(pdev, PCI_EXP_DEVCTL,
pcie_dev_ctrl);
}
}
}
static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
{
/*
* This workaround is specific to AMD/ATI chipset with a PCI upstream
* device with device id 0x5aXX
*/
if (pdev->bus && pdev->bus->self) {
if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
((pdev->bus->self->device & 0xff00) == 0x5a00)) {
mtip_disable_link_opts(dd, pdev->bus->self);
} else {
/* Check further up the topology */
struct pci_dev *parent_dev = pdev->bus->self;
if (parent_dev->bus &&
parent_dev->bus->parent &&
parent_dev->bus->parent->self &&
parent_dev->bus->parent->self->vendor ==
PCI_VENDOR_ID_ATI &&
(parent_dev->bus->parent->self->device &
0xff00) == 0x5a00) {
mtip_disable_link_opts(dd,
parent_dev->bus->parent->self);
}
}
}
}
/*
* Called for each supported PCI device detected.
*
* This function allocates the private data structure, enables the
* PCI device and then calls the block layer initialization function.
*
* return value
* 0 on success else an error code.
*/
static int mtip_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rv = 0;
struct driver_data *dd = NULL;
char cpu_list[256];
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
if (my_node != NUMA_NO_NODE) {
if (!node_online(my_node))
my_node = mtip_get_next_rr_node();
} else {
dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
my_node = mtip_get_next_rr_node();
}
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (!dd)
return -ENOMEM;
/* Attach the private data to this PCI device. */
pci_set_drvdata(pdev, dd);
rv = pcim_enable_device(pdev);
if (rv < 0) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto iomap_err;
}
/* Map BAR5 to memory. */
rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
if (rv < 0) {
dev_err(&pdev->dev, "Unable to map regions\n");
goto iomap_err;
}
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
goto setmask_err;
}
/* Copy the info we may need later into the private data structure. */
dd->major = mtip_major;
dd->instance = instance;
dd->pdev = pdev;
dd->numa_node = my_node;
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
dd->isr_workq = create_workqueue(dd->workq_name);
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
goto setmask_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
node_mask = cpumask_of_node(dd->numa_node);
if (!cpumask_empty(node_mask)) {
for_each_cpu(cpu, node_mask)
{
snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
j = strlen(cpu_list);
}
dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
dd->numa_node,
topology_physical_package_id(cpumask_first(node_mask)),
nr_cpus_node(dd->numa_node),
cpu_list);
} else
dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
cpu_to_node(dd->isr_binding), dd->isr_binding);
/* first worker context always runs in ISR */
dd->work[0].cpu_binding = dd->isr_binding;
dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
dd->work[3].cpu_binding = dd->work[0].cpu_binding;
dd->work[4].cpu_binding = dd->work[1].cpu_binding;
dd->work[5].cpu_binding = dd->work[2].cpu_binding;
dd->work[6].cpu_binding = dd->work[2].cpu_binding;
dd->work[7].cpu_binding = dd->work[1].cpu_binding;
/* Log the bindings */
for_each_present_cpu(cpu) {
memset(cpu_list, 0, sizeof(cpu_list));
for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
if (dd->work[i].cpu_binding == cpu) {
snprintf(&cpu_list[j], 256 - j, "%d ", i);
j = strlen(cpu_list);
}
}
if (j)
dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
}
INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
pci_set_master(pdev);
rv = pci_enable_msi(pdev);
if (rv) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
goto msi_initialize_err;
}
mtip_fix_ero_nosnoop(dd, pdev);
/* Initialize the block layer. */
rv = mtip_block_initialize(dd);
if (rv < 0) {
dev_err(&pdev->dev,
"Unable to initialize block layer\n");
goto block_initialize_err;
}
/*
* Increment the instance count so that each device has a unique
* instance number.
*/
instance++;
if (rv != MTIP_FTL_REBUILD_MAGIC)
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
else
rv = 0; /* device in rebuild state, return 0 from probe */
goto done;
block_initialize_err:
pci_disable_msi(pdev);
msi_initialize_err:
if (dd->isr_workq) {
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
setmask_err:
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
iomap_err:
kfree(dd);
pci_set_drvdata(pdev, NULL);
return rv;
done:
return rv;
}
/*
* Called for each probed device when the device is removed or the
* driver is unloaded.
*
* return value
* None
*/
static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long to;
mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq);
/* Spin until workers are done */
to = jiffies + msecs_to_jiffies(4000);
do {
msleep(20);
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
mtip_hw_debugfs_exit(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
if (!dd->sr) {
/*
* Explicitly wait here for IOs to quiesce,
* as mtip_standby_drive usually won't wait for IOs.
*/
if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
mtip_standby_drive(dd);
}
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
blk_mq_free_tag_set(&dd->tags);
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
if (dd->isr_workq) {
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
pci_disable_msi(pdev);
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
}
/*
* Called for each probed device when the device is suspended.
*
* return value
* 0 Success
* <0 Error
*/
static int __maybe_unused mtip_pci_suspend(struct device *dev)
{
int rv = 0;
struct driver_data *dd = dev_get_drvdata(dev);
set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
/* Disable ports & interrupts then send standby immediate */
rv = mtip_block_suspend(dd);
if (rv < 0)
dev_err(dev, "Failed to suspend controller\n");
return rv;
}
/*
* Called for each probed device when the device is resumed.
*
* return value
* 0 Success
* <0 Error
*/
static int __maybe_unused mtip_pci_resume(struct device *dev)
{
int rv = 0;
struct driver_data *dd = dev_get_drvdata(dev);
/*
* Calls hbaReset, initPort, & startPort function
* then enables interrupts
*/
rv = mtip_block_resume(dd);
if (rv < 0)
dev_err(dev, "Unable to resume\n");
clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
return rv;
}
/*
* Shutdown routine
*
* return value
* None
*/
static void mtip_pci_shutdown(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
if (dd)
mtip_block_shutdown(dd);
}
/* Table of device ids supported by this driver. */
static const struct pci_device_id mtip_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
{ 0 }
};
static SIMPLE_DEV_PM_OPS(mtip_pci_pm_ops, mtip_pci_suspend, mtip_pci_resume);
/* Structure that describes the PCI driver functions. */
static struct pci_driver mtip_pci_driver = {
.name = MTIP_DRV_NAME,
.id_table = mtip_pci_tbl,
.probe = mtip_pci_probe,
.remove = mtip_pci_remove,
.driver.pm = &mtip_pci_pm_ops,
.shutdown = mtip_pci_shutdown,
};
MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
/*
* Module initialization function.
*
* Called once when the module is loaded. This function allocates a major
* block device number to the Cyclone devices and registers the PCI layer
* of the driver.
*
* Return value
* 0 on success else error code.
*/
static int __init mtip_init(void)
{
int error;
pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
/* Allocate a major block device number to use with this driver. */
error = register_blkdev(0, MTIP_DRV_NAME);
if (error <= 0) {
pr_err("Unable to register block device (%d)\n",
error);
return -EBUSY;
}
mtip_major = error;
dfs_parent = debugfs_create_dir("rssd", NULL);
if (IS_ERR_OR_NULL(dfs_parent)) {
pr_warn("Error creating debugfs parent\n");
dfs_parent = NULL;
}
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
if (error) {
debugfs_remove(dfs_parent);
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
}
return error;
}
/*
* Module de-initialization function.
*
* Called once when the module is unloaded. This function deallocates
* the major block device number allocated by mtip_init() and
* unregisters the PCI layer of the driver.
*
* Return value
* none
*/
static void __exit mtip_exit(void)
{
/* Release the allocated major block device number. */
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
/* Unregister the PCI driver. */
pci_unregister_driver(&mtip_pci_driver);
debugfs_remove_recursive(dfs_parent);
}
MODULE_AUTHOR("Micron Technology, Inc");
MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(MTIP_DRV_VERSION);
module_init(mtip_init);
module_exit(mtip_exit);
| linux-master | drivers/block/mtip32xx/mtip32xx.c |
/*======================================================================
A driver for PCMCIA parallel port adapters
(specifically, for the Quatech SPP-100 EPP card: other cards will
probably require driver tweaks)
parport_cs.c 1.29 2002/10/11 06:57:41
The contents of this file are subject to the Mozilla Public
License Version 1.1 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
implied. See the License for the specific language governing
rights and limitations under the License.
The initial developer of the original code is David A. Hinds
<[email protected]>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU General Public License version 2 (the "GPL"), in
which case the provisions of the GPL are applicable instead of the
above. If you wish to allow the use of your version of this file
only under the terms of the GPL and not to allow others to use
your version of this file under the MPL, indicate your decision
by deleting the provisions above and replace them with the notice
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
======================================================================*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
/*====================================================================*/
/* Module parameters */
MODULE_AUTHOR("David Hinds <[email protected]>");
MODULE_DESCRIPTION("PCMCIA parallel port card driver");
MODULE_LICENSE("Dual MPL/GPL");
#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
INT_MODULE_PARM(epp_mode, 1);
/*====================================================================*/
#define FORCE_EPP_MODE 0x08
typedef struct parport_info_t {
struct pcmcia_device *p_dev;
int ndev;
struct parport *port;
} parport_info_t;
static void parport_detach(struct pcmcia_device *p_dev);
static int parport_config(struct pcmcia_device *link);
static void parport_cs_release(struct pcmcia_device *);
static int parport_probe(struct pcmcia_device *link)
{
parport_info_t *info;
dev_dbg(&link->dev, "parport_attach()\n");
/* Create new parport device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
link->priv = info;
info->p_dev = link;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
return parport_config(link);
} /* parport_attach */
static void parport_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "parport_detach\n");
parport_cs_release(link);
kfree(link->priv);
} /* parport_detach */
static int parport_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
return pcmcia_request_io(p_dev);
}
static int parport_config(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
struct parport *p;
int ret;
dev_dbg(&link->dev, "parport_config\n");
if (epp_mode)
link->config_index |= FORCE_EPP_MODE;
ret = pcmcia_loop_config(link, parport_config_check, NULL);
if (ret)
goto failed;
if (!link->irq)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
p = parport_pc_probe_port(link->resource[0]->start,
link->resource[1]->start,
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
pr_notice("parport_cs: parport_pc_probe_port() at 0x%3x, irq %u failed\n",
(unsigned int)link->resource[0]->start, link->irq);
goto failed;
}
p->modes |= PARPORT_MODE_PCSPP;
if (epp_mode)
p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
info->ndev = 1;
info->port = p;
return 0;
failed:
parport_cs_release(link);
kfree(link->priv);
return -ENODEV;
} /* parport_config */
static void parport_cs_release(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
dev_dbg(&link->dev, "parport_release\n");
if (info->ndev) {
struct parport *p = info->port;
parport_pc_unregister_port(p);
}
info->ndev = 0;
pcmcia_disable_device(link);
} /* parport_cs_release */
static const struct pcmcia_device_id parport_ids[] = {
PCMCIA_DEVICE_FUNC_ID(3),
PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, parport_ids);
static struct pcmcia_driver parport_cs_driver = {
.owner = THIS_MODULE,
.name = "parport_cs",
.probe = parport_probe,
.remove = parport_detach,
.id_table = parport_ids,
};
module_pcmcia_driver(parport_cs_driver);
| linux-master | drivers/parport/parport_cs.c |
/*
* IEEE-1284 implementation for parport.
*
* Authors: Phil Blundell <[email protected]>
* Carsten Gross <[email protected]>
* Jose Renau <[email protected]>
* Tim Waugh <[email protected]> (largely rewritten)
*
* This file is responsible for IEEE 1284 negotiation, and for handing
* read/write requests to low-level drivers.
*
* Any part of this program may be used in documents licensed under
* the GNU Free Documentation License, Version 1.1 or any later version
* published by the Free Software Foundation.
*
* Various hacks, Fred Barnes <[email protected]>, 04/2000
*/
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/sched/signal.h>
#undef DEBUG /* undef me for production */
#ifdef CONFIG_LP_CONSOLE
#undef DEBUG /* Don't want a garbled console */
#endif
/* Make parport_wait_peripheral wake up.
* It will be useful to call this from an interrupt handler. */
static void parport_ieee1284_wakeup (struct parport *port)
{
up (&port->physport->ieee1284.irq);
}
static void timeout_waiting_on_port (struct timer_list *t)
{
struct parport *port = from_timer(port, t, timer);
parport_ieee1284_wakeup (port);
}
/**
* parport_wait_event - wait for an event on a parallel port
* @port: port to wait on
* @timeout: time to wait (in jiffies)
*
* This function waits for up to @timeout jiffies for an
* interrupt to occur on a parallel port. If the port timeout is
* set to zero, it returns immediately.
*
* If an interrupt occurs before the timeout period elapses, this
* function returns zero immediately. If it times out, it returns
* one. An error code less than zero indicates an error (most
* likely a pending signal), and the calling code should finish
* what it's doing as soon as it can.
*/
int parport_wait_event (struct parport *port, signed long timeout)
{
int ret;
if (!port->physport->cad->timeout)
/* Zero timeout is special, and we can't down() the
semaphore. */
return 1;
timer_setup(&port->timer, timeout_waiting_on_port, 0);
mod_timer(&port->timer, jiffies + timeout);
ret = down_interruptible (&port->physport->ieee1284.irq);
if (!del_timer_sync(&port->timer) && !ret)
/* Timed out. */
ret = 1;
return ret;
}
/**
* parport_poll_peripheral - poll status lines
* @port: port to watch
* @mask: status lines to watch
* @result: desired values of chosen status lines
* @usec: timeout
*
* This function busy-waits until the masked status lines have
* the desired values, or until the timeout period elapses. The
* @mask and @result parameters are bitmasks, with the bits
* defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
* and so on.
*
* This function does not call schedule(); instead it busy-waits
* using udelay(). It currently has a resolution of 5usec.
*
* If the status lines take on the desired values before the
* timeout period elapses, parport_poll_peripheral() returns zero
* immediately. A return value greater than zero indicates
* a timeout. An error code (less than zero) indicates an error,
* most likely a signal that arrived, and the caller should
* finish what it is doing as soon as possible.
*/
int parport_poll_peripheral(struct parport *port,
unsigned char mask,
unsigned char result,
int usec)
{
/* Zero return code is success, >0 is timeout. */
int count = usec / 5 + 2;
int i;
unsigned char status;
for (i = 0; i < count; i++) {
status = parport_read_status (port);
if ((status & mask) == result)
return 0;
if (signal_pending (current))
return -EINTR;
if (need_resched())
break;
if (i >= 2)
udelay (5);
}
return 1;
}
/**
* parport_wait_peripheral - wait for status lines to change in 35ms
* @port: port to watch
* @mask: status lines to watch
* @result: desired values of chosen status lines
*
* This function waits until the masked status lines have the
* desired values, or until 35ms have elapsed (see IEEE 1284-1994
* page 24 to 25 for why this value in particular is hardcoded).
* The @mask and @result parameters are bitmasks, with the bits
* defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
* and so on.
*
* The port is polled quickly to start off with, in anticipation
* of a fast response from the peripheral. This fast polling
* time is configurable (using /proc), and defaults to 500usec.
* If the timeout for this port (see parport_set_timeout()) is
* zero, the fast polling time is 35ms, and this function does
* not call schedule().
*
* If the timeout for this port is non-zero, after the fast
* polling fails it uses parport_wait_event() to wait for up to
* 10ms, waking up if an interrupt occurs.
*/
int parport_wait_peripheral(struct parport *port,
unsigned char mask,
unsigned char result)
{
int ret;
int usec;
unsigned long deadline;
unsigned char status;
usec = port->physport->spintime; /* usecs of fast polling */
if (!port->physport->cad->timeout)
/* A zero timeout is "special": busy wait for the
entire 35ms. */
usec = 35000;
/* Fast polling.
*
* This should be adjustable.
* How about making a note (in the device structure) of how long
* it takes, so we know for next time?
*/
ret = parport_poll_peripheral (port, mask, result, usec);
if (ret != 1)
return ret;
if (!port->physport->cad->timeout)
/* We may be in an interrupt handler, so we can't poll
* slowly anyway. */
return 1;
/* 40ms of slow polling. */
deadline = jiffies + msecs_to_jiffies(40);
while (time_before (jiffies, deadline)) {
if (signal_pending (current))
return -EINTR;
/* Wait for 10ms (or until an interrupt occurs if
* the handler is set) */
if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0)
return ret;
status = parport_read_status (port);
if ((status & mask) == result)
return 0;
if (!ret) {
/* parport_wait_event didn't time out, but the
* peripheral wasn't actually ready either.
* Wait for another 10ms. */
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
}
return 1;
}
#ifdef CONFIG_PARPORT_1284
/* Terminate a negotiated mode. */
static void parport_ieee1284_terminate (struct parport *port)
{
int r;
port = port->physport;
/* EPP terminates differently. */
switch (port->ieee1284.mode) {
case IEEE1284_MODE_EPP:
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
/* Terminate from EPP mode. */
/* Event 68: Set nInit low */
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
udelay (50);
/* Event 69: Set nInit high, nSelectIn low */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT);
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
case IEEE1284_MODE_ECPSWE:
/* In ECP we can only terminate from fwd idle phase. */
if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high */
parport_frob_control (port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high */
r = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
pr_debug("%s: Timeout at event 49\n",
port->name);
parport_data_forward (port);
pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
fallthrough;
default:
/* Terminate from all other modes. */
/* Event 22: Set nSelectIn low, nAutoFd high */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
/* Event 24: nAck goes low */
r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
if (r)
pr_debug("%s: Timeout at event 24\n", port->name);
/* Event 25: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 27: nAck goes high */
r = parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK);
if (r)
pr_debug("%s: Timeout at event 27\n", port->name);
/* Event 29: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
}
port->ieee1284.mode = IEEE1284_MODE_COMPAT;
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
pr_debug("%s: In compatibility (forward idle) mode\n", port->name);
}
#endif /* IEEE1284 support */
/**
* parport_negotiate - negotiate an IEEE 1284 mode
* @port: port to use
* @mode: mode to negotiate to
*
* Use this to negotiate to a particular IEEE 1284 transfer mode.
* The @mode parameter should be one of the constants in
* parport.h starting %IEEE1284_MODE_xxx.
*
* The return value is 0 if the peripheral has accepted the
* negotiation to the mode specified, -1 if the peripheral is not
* IEEE 1284 compliant (or not present), or 1 if the peripheral
* has rejected the negotiation.
*/
int parport_negotiate (struct parport *port, int mode)
{
#ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT)
return 0;
pr_err("parport: IEEE1284 not supported in this kernel\n");
return -1;
#else
int m = mode & ~IEEE1284_ADDR;
int r;
unsigned char xflag;
port = port->physport;
/* Is there anything to do? */
if (port->ieee1284.mode == mode)
return 0;
/* Is the difference just an address-or-not bit? */
if ((port->ieee1284.mode & ~IEEE1284_ADDR) == (mode & ~IEEE1284_ADDR)){
port->ieee1284.mode = mode;
return 0;
}
/* Go to compatibility forward idle mode */
if (port->ieee1284.mode != IEEE1284_MODE_COMPAT)
parport_ieee1284_terminate (port);
if (mode == IEEE1284_MODE_COMPAT)
/* Compatibility mode: no negotiation. */
return 0;
switch (mode) {
case IEEE1284_MODE_ECPSWE:
m = IEEE1284_MODE_ECP;
break;
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
m = IEEE1284_MODE_EPP;
break;
case IEEE1284_MODE_BECP:
return -ENOSYS; /* FIXME (implement BECP) */
}
if (mode & IEEE1284_EXT_LINK)
m = 1<<7; /* request extensibility link */
port->ieee1284.phase = IEEE1284_PH_NEGOTIATION;
/* Start off with nStrobe and nAutoFd high, and nSelectIn low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
udelay(1);
/* Event 0: Set data */
parport_data_forward (port);
parport_write_data (port, m);
udelay (400); /* Shouldn't need to wait this long. */
/* Event 1: Set nSelectIn high, nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 2: PError, Select, nFault go high, nAck goes low */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ERROR
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_ACK,
PARPORT_STATUS_ERROR
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_PAPEROUT)) {
/* Timeout */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
pr_debug("%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
port->name, parport_read_status (port));
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return -1; /* Not IEEE1284 compliant */
}
/* Event 3: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 4: Set nStrobe and nAutoFd high */
udelay (5);
parport_frob_control (port,
PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD,
0);
/* Event 6: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant device. */
pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode, port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
}
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high for all modes other than nibble (0). */
if (mode && !xflag) {
/* Mode not supported. */
pr_debug("%s: Mode 0x%02x rejected by peripheral\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* More to do if we've requested extensibility link. */
if (mode & IEEE1284_EXT_LINK) {
m = mode & 0x7f;
udelay (1);
parport_write_data (port, m);
udelay (1);
/* Event 51: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 52: nAck goes low */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* This peripheral is _very_ slow. */
pr_debug("%s: Event 52 didn't happen\n", port->name);
parport_ieee1284_terminate (port);
return 1;
}
/* Event 53: Set nStrobe high */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
0);
/* Event 55: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
port->ops->read_status(port));
parport_ieee1284_terminate (port);
return 1;
}
/* Event 54: Peripheral sets XFlag to reflect support */
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
pr_debug("%s: Extended mode 0x%02x not supported\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* Any further setup is left to the caller. */
}
/* Mode is supported */
pr_debug("%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
if (!(mode & IEEE1284_EXT_LINK) && (m & IEEE1284_MODE_ECP)) {
port->ieee1284.phase = IEEE1284_PH_ECP_SETUP;
/* Event 30: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 31: PError goes high. */
r = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
pr_debug("%s: Timeout at event 31\n", port->name);
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
pr_debug("%s: ECP direction: forward\n", port->name);
} else switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
break;
default:
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
return 0;
#endif /* IEEE1284 support */
}
/* Acknowledge that the peripheral has data available.
* Events 18-20, in order to get from Reverse Idle phase
* to Host Busy Data Available.
* This will most likely be called from an interrupt.
* Returns zero if data was available.
*/
#ifdef CONFIG_PARPORT_1284
static int parport_ieee1284_ack_data_avail (struct parport *port)
{
if (parport_read_status (port) & PARPORT_STATUS_ERROR)
/* Event 18 didn't happen. */
return -1;
/* Event 20: nAutoFd goes high. */
port->ops->frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
return 0;
}
#endif /* IEEE1284 support */
/* Handle an interrupt. */
void parport_ieee1284_interrupt (void *handle)
{
struct parport *port = handle;
parport_ieee1284_wakeup (port);
#ifdef CONFIG_PARPORT_1284
if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
/* An interrupt in this phase means that data
* is now available. */
pr_debug("%s: Data available\n", port->name);
parport_ieee1284_ack_data_avail (port);
}
#endif /* IEEE1284 support */
}
/**
* parport_write - write a block of data to a parallel port
* @port: port to write to
* @buffer: data buffer (in kernel space)
* @len: number of bytes of data to transfer
*
* This will write up to @len bytes of @buffer to the port
* specified, using the IEEE 1284 transfer mode most recently
* negotiated to (using parport_negotiate()), as long as that
* mode supports forward transfers (host to peripheral).
*
* It is the caller's responsibility to ensure that the first
* @len bytes of @buffer are valid.
*
* This function returns the number of bytes transferred (if zero
* or positive), or else an error code.
*/
ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
return port->ops->compat_write_data (port, buffer, len, 0);
#else
ssize_t retval;
int mode = port->ieee1284.mode;
int addr = mode & IEEE1284_ADDR;
size_t (*fn) (struct parport *, const void *, size_t, int);
/* Ignore the device-ID-request bit and the address bit. */
mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
/* Use the mode we're in. */
switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
fallthrough;
case IEEE1284_MODE_COMPAT:
pr_debug("%s: Using compatibility mode\n", port->name);
fn = port->ops->compat_write_data;
break;
case IEEE1284_MODE_EPP:
pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_write_addr;
} else {
fn = port->ops->epp_write_data;
}
break;
case IEEE1284_MODE_EPPSWE:
pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_write_addr;
} else {
fn = parport_ieee1284_epp_write_data;
}
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
pr_debug("%s: Using ECP mode\n", port->name);
if (addr) {
fn = port->ops->ecp_write_addr;
} else {
fn = port->ops->ecp_write_data;
}
break;
case IEEE1284_MODE_ECPSWE:
pr_debug("%s: Using software-emulated ECP mode\n", port->name);
/* The caller has specified that it must be emulated,
* even if we have ECP hardware! */
if (addr) {
fn = parport_ieee1284_ecp_write_addr;
} else {
fn = parport_ieee1284_ecp_write_data;
}
break;
default:
pr_debug("%s: Unknown mode 0x%02x\n",
port->name, port->ieee1284.mode);
return -ENOSYS;
}
retval = (*fn) (port, buffer, len, 0);
pr_debug("%s: wrote %zd/%zu bytes\n", port->name, retval, len);
return retval;
#endif /* IEEE1284 support */
}
/**
* parport_read - read a block of data from a parallel port
* @port: port to read from
* @buffer: data buffer (in kernel space)
* @len: number of bytes of data to transfer
*
* This will read up to @len bytes of @buffer to the port
* specified, using the IEEE 1284 transfer mode most recently
* negotiated to (using parport_negotiate()), as long as that
* mode supports reverse transfers (peripheral to host).
*
* It is the caller's responsibility to ensure that the first
* @len bytes of @buffer are available to write to.
*
* This function returns the number of bytes transferred (if zero
* or positive), or else an error code.
*/
ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
pr_err("parport: IEEE1284 not supported in this kernel\n");
return -ENODEV;
#else
int mode = port->physport->ieee1284.mode;
int addr = mode & IEEE1284_ADDR;
size_t (*fn) (struct parport *, void *, size_t, int);
/* Ignore the device-ID-request bit and the address bit. */
mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
/* Use the mode we're in. */
switch (mode) {
case IEEE1284_MODE_COMPAT:
/* if we can tri-state use BYTE mode instead of NIBBLE mode,
* if that fails, revert to NIBBLE mode -- ought to store somewhere
* the device's ability to do BYTE mode reverse transfers, so we don't
* end up needlessly calling negotiate(BYTE) repeately.. (fb)
*/
if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
!parport_negotiate (port, IEEE1284_MODE_BYTE)) {
/* got into BYTE mode OK */
pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
}
if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
return -EIO;
}
fallthrough; /* to NIBBLE */
case IEEE1284_MODE_NIBBLE:
pr_debug("%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
case IEEE1284_MODE_BYTE:
pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
case IEEE1284_MODE_EPP:
pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_read_addr;
} else {
fn = port->ops->epp_read_data;
}
break;
case IEEE1284_MODE_EPPSWE:
pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_read_addr;
} else {
fn = parport_ieee1284_epp_read_data;
}
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
pr_debug("%s: Using ECP mode\n", port->name);
fn = port->ops->ecp_read_data;
break;
case IEEE1284_MODE_ECPSWE:
pr_debug("%s: Using software-emulated ECP mode\n", port->name);
fn = parport_ieee1284_ecp_read_data;
break;
default:
pr_debug("%s: Unknown mode 0x%02x\n",
port->name, port->physport->ieee1284.mode);
return -ENOSYS;
}
return (*fn) (port, buffer, len, 0);
#endif /* IEEE1284 support */
}
/**
* parport_set_timeout - set the inactivity timeout for a device
* @dev: device on a port
* @inactivity: inactivity timeout (in jiffies)
*
* This sets the inactivity timeout for a particular device on a
* port. This affects functions like parport_wait_peripheral().
* The special value 0 means not to call schedule() while dealing
* with this device.
*
* The return value is the previous inactivity timeout.
*
* Any callers of parport_wait_event() for this device are woken
* up.
*/
long parport_set_timeout (struct pardevice *dev, long inactivity)
{
long int old = dev->timeout;
dev->timeout = inactivity;
if (dev->port->physport->cad == dev)
parport_ieee1284_wakeup (dev->port);
return old;
}
/* Exported symbols for modules. */
EXPORT_SYMBOL(parport_negotiate);
EXPORT_SYMBOL(parport_write);
EXPORT_SYMBOL(parport_read);
EXPORT_SYMBOL(parport_wait_peripheral);
EXPORT_SYMBOL(parport_wait_event);
EXPORT_SYMBOL(parport_set_timeout);
EXPORT_SYMBOL(parport_ieee1284_interrupt);
| linux-master | drivers/parport/ieee1284.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for common PCI multi-I/O cards (which is most of them)
*
* Copyright (C) 2001 Tim Waugh <[email protected]>
*
* Multi-function PCI cards are supposed to present separate logical
* devices on the bus. A common thing to do seems to be to just use
* one logical device with lots of base address registers for both
* parallel ports and serial ports. This driver is for dealing with
* that.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/8250_pci.h>
enum parport_pc_pci_cards {
titan_110l = 0,
titan_210l,
netmos_9xx5_combo,
netmos_9855,
netmos_9855_2p,
netmos_9900,
netmos_9900_2p,
netmos_99xx_1p,
avlab_1s1p,
avlab_1s2p,
avlab_2s1p,
siig_1s1p_10x,
siig_2s1p_10x,
siig_2p1s_20x,
siig_1s1p_20x,
siig_2s1p_20x,
timedia_4078a,
timedia_4079h,
timedia_4085h,
timedia_4088a,
timedia_4089a,
timedia_4095a,
timedia_4096a,
timedia_4078u,
timedia_4079a,
timedia_4085u,
timedia_4079r,
timedia_4079s,
timedia_4079d,
timedia_4079e,
timedia_4079f,
timedia_9079a,
timedia_9079b,
timedia_9079c,
wch_ch353_1s1p,
wch_ch353_2s1p,
wch_ch382_0s1p,
wch_ch382_2s1p,
brainboxes_5s1p,
sunix_4008a,
sunix_5069a,
sunix_5079a,
sunix_5099a,
};
/* each element directly indexed from enum list, above */
struct parport_pc_pci {
int numports;
struct { /* BAR (base address registers) numbers in the config
space header */
int lo;
int hi; /* -1 if not there, >6 for offset-method (max
BAR is 6) */
} addr[4];
/* If set, this is called immediately after pci_enable_device.
* If it returns non-zero, no probing will take place and the
* ports will not be used. */
int (*preinit_hook) (struct pci_dev *pdev, struct parport_pc_pci *card,
int autoirq, int autodma);
/* If set, this is called after probing for ports. If 'failed'
* is non-zero we couldn't use any of the ports. */
void (*postinit_hook) (struct pci_dev *pdev,
struct parport_pc_pci *card, int failed);
};
static int netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *par,
int autoirq, int autodma)
{
/* the rule described below doesn't hold for this device */
if (dev->device == PCI_DEVICE_ID_NETMOS_9835 &&
dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return -ENODEV;
if (dev->device == PCI_DEVICE_ID_NETMOS_9912) {
par->numports = 1;
} else {
/*
* Netmos uses the subdevice ID to indicate the number of parallel
* and serial ports. The form is 0x00PS, where <P> is the number of
* parallel ports and <S> is the number of serial ports.
*/
par->numports = (dev->subsystem_device & 0xf0) >> 4;
if (par->numports > ARRAY_SIZE(par->addr))
par->numports = ARRAY_SIZE(par->addr);
}
return 0;
}
static struct parport_pc_pci cards[] = {
/* titan_110l */ { 1, { { 3, -1 }, } },
/* titan_210l */ { 1, { { 3, -1 }, } },
/* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
/* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init },
/* netmos_9855_2p */ { 2, { { 0, -1 }, { 2, -1 }, } },
/* netmos_9900 */ {1, { { 3, 4 }, }, netmos_parallel_init },
/* netmos_9900_2p */ {2, { { 0, 1 }, { 3, 4 }, } },
/* netmos_99xx_1p */ {1, { { 0, 1 }, } },
/* avlab_1s1p */ { 1, { { 1, 2}, } },
/* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} },
/* avlab_2s1p */ { 1, { { 2, 3}, } },
/* siig_1s1p_10x */ { 1, { { 3, 4 }, } },
/* siig_2s1p_10x */ { 1, { { 4, 5 }, } },
/* siig_2p1s_20x */ { 2, { { 1, 2 }, { 3, 4 }, } },
/* siig_1s1p_20x */ { 1, { { 1, 2 }, } },
/* siig_2s1p_20x */ { 1, { { 2, 3 }, } },
/* timedia_4078a */ { 1, { { 2, -1 }, } },
/* timedia_4079h */ { 1, { { 2, 3 }, } },
/* timedia_4085h */ { 2, { { 2, -1 }, { 4, -1 }, } },
/* timedia_4088a */ { 2, { { 2, 3 }, { 4, 5 }, } },
/* timedia_4089a */ { 2, { { 2, 3 }, { 4, 5 }, } },
/* timedia_4095a */ { 2, { { 2, 3 }, { 4, 5 }, } },
/* timedia_4096a */ { 2, { { 2, 3 }, { 4, 5 }, } },
/* timedia_4078u */ { 1, { { 2, -1 }, } },
/* timedia_4079a */ { 1, { { 2, 3 }, } },
/* timedia_4085u */ { 2, { { 2, -1 }, { 4, -1 }, } },
/* timedia_4079r */ { 1, { { 2, 3 }, } },
/* timedia_4079s */ { 1, { { 2, 3 }, } },
/* timedia_4079d */ { 1, { { 2, 3 }, } },
/* timedia_4079e */ { 1, { { 2, 3 }, } },
/* timedia_4079f */ { 1, { { 2, 3 }, } },
/* timedia_9079a */ { 1, { { 2, 3 }, } },
/* timedia_9079b */ { 1, { { 2, 3 }, } },
/* timedia_9079c */ { 1, { { 2, 3 }, } },
/* wch_ch353_1s1p*/ { 1, { { 1, -1}, } },
/* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
/* wch_ch382_0s1p*/ { 1, { { 2, -1}, } },
/* wch_ch382_2s1p*/ { 1, { { 2, -1}, } },
/* brainboxes_5s1p */ { 1, { { 3, -1 }, } },
/* sunix_4008a */ { 1, { { 1, 2 }, } },
/* sunix_5069a */ { 1, { { 1, 2 }, } },
/* sunix_5079a */ { 1, { { 1, 2 }, } },
/* sunix_5099a */ { 1, { { 1, 2 }, } },
};
static struct pci_device_id parport_serial_pci_tbl[] = {
/* PCI cards */
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_110L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_110l },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_210L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_210l },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9735,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9745,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9845,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
0x1000, 0x0020, 0, 0, netmos_9855_2p },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
0x1000, 0x0022, 0, 0, netmos_9855_2p },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9855 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
0xA000, 0x3011, 0, 0, netmos_9900 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
0xA000, 0x3012, 0, 0, netmos_9900 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
0xA000, 0x3020, 0, 0, netmos_9900_2p },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912,
0xA000, 0x2000, 0, 0, netmos_99xx_1p },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
{ PCI_VENDOR_ID_AFAVLAB, 0x2110,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2111,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2112,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2140,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s2p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2141,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s2p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2142,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s2p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2s1p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2161,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2s1p },
{ PCI_VENDOR_ID_AFAVLAB, 0x2162,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2s1p },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_650,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_850,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_650,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_850,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p1s_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_650,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p1s_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_850,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p1s_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_650,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_850,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_650,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_850,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
/* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
{ 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
{ 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
{ 0x1409, 0x7168, 0x1409, 0x4085, 0, 0, timedia_4085h },
{ 0x1409, 0x7168, 0x1409, 0x4088, 0, 0, timedia_4088a },
{ 0x1409, 0x7168, 0x1409, 0x4089, 0, 0, timedia_4089a },
{ 0x1409, 0x7168, 0x1409, 0x4095, 0, 0, timedia_4095a },
{ 0x1409, 0x7168, 0x1409, 0x4096, 0, 0, timedia_4096a },
{ 0x1409, 0x7168, 0x1409, 0x5078, 0, 0, timedia_4078u },
{ 0x1409, 0x7168, 0x1409, 0x5079, 0, 0, timedia_4079a },
{ 0x1409, 0x7168, 0x1409, 0x5085, 0, 0, timedia_4085u },
{ 0x1409, 0x7168, 0x1409, 0x6079, 0, 0, timedia_4079r },
{ 0x1409, 0x7168, 0x1409, 0x7079, 0, 0, timedia_4079s },
{ 0x1409, 0x7168, 0x1409, 0x8079, 0, 0, timedia_4079d },
{ 0x1409, 0x7168, 0x1409, 0x9079, 0, 0, timedia_4079e },
{ 0x1409, 0x7168, 0x1409, 0xa079, 0, 0, timedia_4079f },
{ 0x1409, 0x7168, 0x1409, 0xb079, 0, 0, timedia_9079a },
{ 0x1409, 0x7168, 0x1409, 0xc079, 0, 0, timedia_9079b },
{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
/* WCH CARDS */
{ 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
{ 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
/* BrainBoxes PX272/PX306 MIO card */
{ PCI_VENDOR_ID_INTASHIELD, 0x4100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_5s1p },
/* Sunix boards */
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
0x0100, 0, 0, sunix_4008a },
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
0x0101, 0, 0, sunix_5069a },
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
0x0102, 0, 0, sunix_5079a },
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
0x0104, 0, 0, sunix_5099a },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
/*
* This table describes the serial "geometry" of these boards. Any
* quirks for these can be found in drivers/serial/8250_pci.c
*
* Cards not tested are marked n/t
* If you have one of these cards and it works for you, please tell me..
*/
static struct pciserial_board pci_parport_serial_boards[] = {
[titan_110l] = {
.flags = FL_BASE1 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[titan_210l] = {
.flags = FL_BASE1 | FL_BASE_BARS,
.num_ports = 2,
.base_baud = 921600,
.uart_offset = 8,
},
[netmos_9xx5_combo] = {
.flags = FL_BASE0 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[netmos_9855] = {
.flags = FL_BASE2 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[netmos_9855_2p] = {
.flags = FL_BASE4 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[netmos_9900] = { /* n/t */
.flags = FL_BASE0 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[netmos_9900_2p] = { /* parallel only */ /* n/t */
.flags = FL_BASE0,
.num_ports = 0,
.base_baud = 115200,
.uart_offset = 8,
},
[netmos_99xx_1p] = { /* parallel only */ /* n/t */
.flags = FL_BASE0,
.num_ports = 0,
.base_baud = 115200,
.uart_offset = 8,
},
[avlab_1s1p] = { /* n/t */
.flags = FL_BASE0 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[avlab_1s2p] = { /* n/t */
.flags = FL_BASE0 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[avlab_2s1p] = { /* n/t */
.flags = FL_BASE0 | FL_BASE_BARS,
.num_ports = 2,
.base_baud = 115200,
.uart_offset = 8,
},
[siig_1s1p_10x] = {
.flags = FL_BASE2,
.num_ports = 1,
.base_baud = 460800,
.uart_offset = 8,
},
[siig_2s1p_10x] = {
.flags = FL_BASE2,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[siig_2p1s_20x] = {
.flags = FL_BASE0,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[siig_1s1p_20x] = {
.flags = FL_BASE0,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[siig_2s1p_20x] = {
.flags = FL_BASE0,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4078a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079h] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4085h] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4088a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4089a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4095a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4096a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4078u] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4085u] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079r] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079s] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079d] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079e] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_4079f] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_9079a] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_9079b] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[timedia_9079c] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 8,
},
[wch_ch353_1s1p] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
},
[wch_ch353_2s1p] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 2,
.base_baud = 115200,
.uart_offset = 8,
},
[wch_ch382_0s1p] = {
.flags = FL_BASE0,
.num_ports = 0,
.base_baud = 115200,
.uart_offset = 8,
},
[wch_ch382_2s1p] = {
.flags = FL_BASE0,
.num_ports = 2,
.base_baud = 115200,
.uart_offset = 8,
.first_offset = 0xC0,
},
[brainboxes_5s1p] = {
.flags = FL_BASE2,
.num_ports = 5,
.base_baud = 921600,
.uart_offset = 8,
},
[sunix_4008a] = {
.num_ports = 0,
},
[sunix_5069a] = {
.num_ports = 1,
.base_baud = 921600,
.uart_offset = 0x8,
},
[sunix_5079a] = {
.num_ports = 2,
.base_baud = 921600,
.uart_offset = 0x8,
},
[sunix_5099a] = {
.num_ports = 4,
.base_baud = 921600,
.uart_offset = 0x8,
},
};
struct parport_serial_private {
struct serial_private *serial;
int num_par;
struct parport *port[PARPORT_MAX];
struct parport_pc_pci par;
};
/* Register the serial port(s) of a PCI card. */
static int serial_register(struct pci_dev *dev, const struct pci_device_id *id)
{
struct parport_serial_private *priv = pci_get_drvdata (dev);
struct pciserial_board *board;
struct serial_private *serial;
board = &pci_parport_serial_boards[id->driver_data];
if (board->num_ports == 0)
return 0;
serial = pciserial_init_ports(dev, board);
if (IS_ERR(serial))
return PTR_ERR(serial);
priv->serial = serial;
return 0;
}
/* Register the parallel port(s) of a PCI card. */
static int parport_register(struct pci_dev *dev, const struct pci_device_id *id)
{
struct parport_pc_pci *card;
struct parport_serial_private *priv = pci_get_drvdata (dev);
int n, success = 0;
priv->par = cards[id->driver_data];
card = &priv->par;
if (card->preinit_hook &&
card->preinit_hook (dev, card, PARPORT_IRQ_NONE, PARPORT_DMA_NONE))
return -ENODEV;
for (n = 0; n < card->numports; n++) {
struct parport *port;
int lo = card->addr[n].lo;
int hi = card->addr[n].hi;
unsigned long io_lo, io_hi;
int irq;
if (priv->num_par == ARRAY_SIZE (priv->port)) {
dev_warn(&dev->dev,
"only %zu parallel ports supported (%d reported)\n",
ARRAY_SIZE(priv->port), card->numports);
break;
}
io_lo = pci_resource_start (dev, lo);
io_hi = 0;
if ((hi >= 0) && (hi <= 6))
io_hi = pci_resource_start (dev, hi);
else if (hi > 6)
io_lo += hi; /* Reinterpret the meaning of
"hi" as an offset (see SYBA
def.) */
/* TODO: test if sharing interrupts works */
irq = pci_irq_vector(dev, 0);
if (irq < 0)
return irq;
if (irq == 0)
irq = PARPORT_IRQ_NONE;
if (irq == PARPORT_IRQ_NONE) {
dev_dbg(&dev->dev,
"PCI parallel port detected: I/O at %#lx(%#lx)\n",
io_lo, io_hi);
} else {
dev_dbg(&dev->dev,
"PCI parallel port detected: I/O at %#lx(%#lx), IRQ %d\n",
io_lo, io_hi, irq);
}
port = parport_pc_probe_port (io_lo, io_hi, irq,
PARPORT_DMA_NONE, &dev->dev, IRQF_SHARED);
if (port) {
priv->port[priv->num_par++] = port;
success = 1;
}
}
if (card->postinit_hook)
card->postinit_hook (dev, card, !success);
return 0;
}
static int parport_serial_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct parport_serial_private *priv;
int err;
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
pci_set_drvdata (dev, priv);
err = pcim_enable_device(dev);
if (err)
return err;
err = parport_register(dev, id);
if (err)
return err;
err = serial_register(dev, id);
if (err) {
int i;
for (i = 0; i < priv->num_par; i++)
parport_pc_unregister_port (priv->port[i]);
return err;
}
return 0;
}
static void parport_serial_pci_remove(struct pci_dev *dev)
{
struct parport_serial_private *priv = pci_get_drvdata (dev);
int i;
// Serial ports
if (priv->serial)
pciserial_remove_ports(priv->serial);
// Parallel ports
for (i = 0; i < priv->num_par; i++)
parport_pc_unregister_port (priv->port[i]);
return;
}
static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
{
struct parport_serial_private *priv = dev_get_drvdata(dev);
if (priv->serial)
pciserial_suspend_ports(priv->serial);
/* FIXME: What about parport? */
return 0;
}
static int __maybe_unused parport_serial_pci_resume(struct device *dev)
{
struct parport_serial_private *priv = dev_get_drvdata(dev);
if (priv->serial)
pciserial_resume_ports(priv->serial);
/* FIXME: What about parport? */
return 0;
}
static SIMPLE_DEV_PM_OPS(parport_serial_pm_ops,
parport_serial_pci_suspend, parport_serial_pci_resume);
static struct pci_driver parport_serial_pci_driver = {
.name = "parport_serial",
.id_table = parport_serial_pci_tbl,
.probe = parport_serial_pci_probe,
.remove = parport_serial_pci_remove,
.driver = {
.pm = &parport_serial_pm_ops,
},
};
module_pci_driver(parport_serial_pci_driver);
MODULE_AUTHOR("Tim Waugh <[email protected]>");
MODULE_DESCRIPTION("Driver for common parallel+serial multi-I/O PCI cards");
MODULE_LICENSE("GPL");
| linux-master | drivers/parport/parport_serial.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Low-level parallel port routines for the Multiface 3 card
*
* Author: Joerg Dorchain <[email protected]>
*
* (C) The elitist m68k Users(TM)
*
* based on the existing parport_amiga and lp_mfc
*
*
* From the MFC3 documentation:
*
* Miscellaneous PIA Details
* -------------------------
*
* The two open-drain interrupt outputs /IRQA and /IRQB are routed to
* /INT2 of the Z2 bus.
*
* The CPU data bus of the PIA (D0-D7) is connected to D8-D15 on the Z2
* bus. This means that any PIA registers are accessed at even addresses.
*
* Centronics Pin Connections for the PIA
* --------------------------------------
*
* The following table shows the connections between the PIA and the
* Centronics interface connector. These connections implement a single, but
* very complete, Centronics type interface. The Pin column gives the pin
* numbers of the PIA. The Centronics pin numbers can be found in the section
* "Parallel Connectors".
*
*
* Pin | PIA | Dir | Centronics Names
* -------+-----+-----+---------------------------------------------------------
* 19 | CB2 | --> | /STROBE (aka /DRDY)
* 10-17 | PBx | <-> | DATA0 - DATA7
* 18 | CB1 | <-- | /ACK
* 40 | CA1 | <-- | BUSY
* 3 | PA1 | <-- | PAPER-OUT (aka POUT)
* 4 | PA2 | <-- | SELECTED (aka SEL)
* 9 | PA7 | --> | /INIT (aka /RESET or /INPUT-PRIME)
* 6 | PA4 | <-- | /ERROR (aka /FAULT)
* 7 | PA5 | --> | DIR (aka /SELECT-IN)
* 8 | PA6 | --> | /AUTO-FEED-XT
* 39 | CA2 | --> | open
* 5 | PA3 | <-- | /ACK (same as CB1!)
* 2 | PA0 | <-- | BUSY (same as CA1!)
* -------+-----+-----+---------------------------------------------------------
*
* Should be enough to understand some of the driver.
*
* Per convention for normal use the port registers are visible.
* If you need the data direction registers, restore the value in the
* control register.
*/
#include "multiface.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/mc6821.h>
#include <linux/zorro.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
/* Maximum Number of Cards supported */
#define MAX_MFC 5
#undef DEBUG
static struct parport *this_port[MAX_MFC] = {NULL, };
static volatile int dummy; /* for trigger readds */
#define pia(dev) ((struct pia *)(dev->base))
static struct parport_operations pp_mfc3_ops;
static void mfc3_write_data(struct parport *p, unsigned char data)
{
pr_debug("write_data %c\n", data);
dummy = pia(p)->pprb; /* clears irq bit */
/* Triggers also /STROBE.*/
pia(p)->pprb = data;
}
static unsigned char mfc3_read_data(struct parport *p)
{
/* clears interrupt bit. Triggers also /STROBE. */
return pia(p)->pprb;
}
static unsigned char control_pc_to_mfc3(unsigned char control)
{
unsigned char ret = 32|64;
if (control & PARPORT_CONTROL_SELECT) /* XXX: What is SELECP? */
ret &= ~32; /* /SELECT_IN */
if (control & PARPORT_CONTROL_INIT) /* INITP */
ret |= 128;
if (control & PARPORT_CONTROL_AUTOFD) /* AUTOLF */
ret &= ~64;
if (control & PARPORT_CONTROL_STROBE) /* Strobe */
/* Handled directly by hardware */;
return ret;
}
static unsigned char control_mfc3_to_pc(unsigned char control)
{
unsigned char ret = PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT;
if (control & 128) /* /INITP */
ret |= PARPORT_CONTROL_INIT;
if (control & 64) /* /AUTOLF */
ret &= ~PARPORT_CONTROL_AUTOFD;
if (control & 32) /* /SELECT_IN */
ret &= ~PARPORT_CONTROL_SELECT;
return ret;
}
static void mfc3_write_control(struct parport *p, unsigned char control)
{
pr_debug("write_control %02x\n", control);
pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
}
static unsigned char mfc3_read_control( struct parport *p)
{
pr_debug("read_control\n");
return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
}
static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, unsigned char val)
{
unsigned char old;
pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = mfc3_read_control(p);
mfc3_write_control(p, (old & ~mask) ^ val);
return old;
}
static unsigned char status_mfc3_to_pc(unsigned char status)
{
unsigned char ret = PARPORT_STATUS_BUSY;
if (status & 1) /* Busy */
ret &= ~PARPORT_STATUS_BUSY;
if (status & 2) /* PaperOut */
ret |= PARPORT_STATUS_PAPEROUT;
if (status & 4) /* Selected */
ret |= PARPORT_STATUS_SELECT;
if (status & 8) /* Ack */
ret |= PARPORT_STATUS_ACK;
if (status & 16) /* /ERROR */
ret |= PARPORT_STATUS_ERROR;
return ret;
}
static unsigned char mfc3_read_status(struct parport *p)
{
unsigned char status;
status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
pr_debug("read_status %02x\n", status);
return status;
}
static int use_cnt;
static irqreturn_t mfc3_interrupt(int irq, void *dev_id)
{
int i;
for( i = 0; i < MAX_MFC; i++)
if (this_port[i] != NULL)
if (pia(this_port[i])->crb & 128) { /* Board caused interrupt */
dummy = pia(this_port[i])->pprb; /* clear irq bit */
parport_generic_irq(this_port[i]);
}
return IRQ_HANDLED;
}
static void mfc3_enable_irq(struct parport *p)
{
pia(p)->crb |= PIA_C1_ENABLE_IRQ;
}
static void mfc3_disable_irq(struct parport *p)
{
pia(p)->crb &= ~PIA_C1_ENABLE_IRQ;
}
static void mfc3_data_forward(struct parport *p)
{
pr_debug("forward\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 255; /* all pins output */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
}
static void mfc3_data_reverse(struct parport *p)
{
pr_debug("reverse\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 0; /* all pins input */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
}
static void mfc3_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.amiga.data = 0;
s->u.amiga.datadir = 255;
s->u.amiga.status = 0;
s->u.amiga.statusdir = 0xe0;
}
static void mfc3_save_state(struct parport *p, struct parport_state *s)
{
s->u.amiga.data = pia(p)->pprb;
pia(p)->crb &= ~PIA_DDR;
s->u.amiga.datadir = pia(p)->pddrb;
pia(p)->crb |= PIA_DDR;
s->u.amiga.status = pia(p)->ppra;
pia(p)->cra &= ~PIA_DDR;
s->u.amiga.statusdir = pia(p)->pddrb;
pia(p)->cra |= PIA_DDR;
}
static void mfc3_restore_state(struct parport *p, struct parport_state *s)
{
pia(p)->pprb = s->u.amiga.data;
pia(p)->crb &= ~PIA_DDR;
pia(p)->pddrb = s->u.amiga.datadir;
pia(p)->crb |= PIA_DDR;
pia(p)->ppra = s->u.amiga.status;
pia(p)->cra &= ~PIA_DDR;
pia(p)->pddrb = s->u.amiga.statusdir;
pia(p)->cra |= PIA_DDR;
}
static struct parport_operations pp_mfc3_ops = {
.write_data = mfc3_write_data,
.read_data = mfc3_read_data,
.write_control = mfc3_write_control,
.read_control = mfc3_read_control,
.frob_control = mfc3_frob_control,
.read_status = mfc3_read_status,
.enable_irq = mfc3_enable_irq,
.disable_irq = mfc3_disable_irq,
.data_forward = mfc3_data_forward,
.data_reverse = mfc3_data_reverse,
.init_state = mfc3_init_state,
.save_state = mfc3_save_state,
.restore_state = mfc3_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
/* ----------- Initialisation code --------------------------------- */
static int __init parport_mfc3_init(void)
{
struct parport *p;
int pias = 0;
struct pia *pp;
struct zorro_dev *z = NULL;
if (!MACH_IS_AMIGA)
return -ENODEV;
while ((z = zorro_find_device(ZORRO_PROD_BSC_MULTIFACE_III, z))) {
unsigned long piabase = z->resource.start+PIABASE;
if (!request_mem_region(piabase, sizeof(struct pia), "PIA"))
continue;
pp = ZTWO_VADDR(piabase);
pp->crb = 0;
pp->pddrb = 255; /* all data pins output */
pp->crb = PIA_DDR|32|8;
dummy = pp->pddrb; /* reading clears interrupt */
pp->cra = 0;
pp->pddra = 0xe0; /* /RESET, /DIR ,/AUTO-FEED output */
pp->cra = PIA_DDR;
pp->ppra = 0; /* reset printer */
udelay(10);
pp->ppra = 128;
p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS,
PARPORT_DMA_NONE, &pp_mfc3_ops);
if (!p)
goto out_port;
if (p->irq != PARPORT_IRQ_NONE) {
if (use_cnt++ == 0)
if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops))
goto out_irq;
}
p->dev = &z->dev;
this_port[pias++] = p;
pr_info("%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */
p->private_data = (void *)piabase;
parport_announce_port (p);
if (pias >= MAX_MFC)
break;
continue;
out_irq:
parport_put_port(p);
out_port:
release_mem_region(piabase, sizeof(struct pia));
}
return pias ? 0 : -ENODEV;
}
static void __exit parport_mfc3_exit(void)
{
int i;
for (i = 0; i < MAX_MFC; i++) {
if (!this_port[i])
continue;
parport_remove_port(this_port[i]);
if (this_port[i]->irq != PARPORT_IRQ_NONE) {
if (--use_cnt == 0)
free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops);
}
release_mem_region(ZTWO_PADDR(this_port[i]->private_data), sizeof(struct pia));
parport_put_port(this_port[i]);
}
}
MODULE_AUTHOR("Joerg Dorchain <[email protected]>");
MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
MODULE_LICENSE("GPL");
module_init(parport_mfc3_init)
module_exit(parport_mfc3_exit)
| linux-master | drivers/parport/parport_mfc3.c |
/*
* Parallel-port resource manager code.
*
* Authors: David Campbell <[email protected]>
* Tim Waugh <[email protected]>
* Jose Renau <[email protected]>
* Philip Blundell <[email protected]>
* Andrea Arcangeli
*
* based on work by Grant Guenther <[email protected]>
* and Philip Blundell
*
* Any part of this program may be used in documents licensed under
* the GNU Free Documentation License, Version 1.1 or any later version
* published by the Free Software Foundation.
*/
#undef PARPORT_DEBUG_SHARING /* undef for production */
#include <linux/module.h>
#include <linux/string.h>
#include <linux/threads.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <asm/irq.h>
#undef PARPORT_PARANOID
#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
int parport_default_spintime = DEFAULT_SPIN_TIME;
static LIST_HEAD(portlist);
static DEFINE_SPINLOCK(parportlist_lock);
/* list of all allocated ports, sorted by ->number */
static LIST_HEAD(all_ports);
static DEFINE_SPINLOCK(full_list_lock);
static LIST_HEAD(drivers);
static DEFINE_MUTEX(registration_lock);
/* What you can do to a port that's gone away.. */
static void dead_write_lines(struct parport *p, unsigned char b){}
static unsigned char dead_read_lines(struct parport *p) { return 0; }
static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
unsigned char c) { return 0; }
static void dead_onearg(struct parport *p){}
static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
static void dead_state(struct parport *p, struct parport_state *s) { }
static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
{ return 0; }
static size_t dead_read(struct parport *p, void *b, size_t l, int f)
{ return 0; }
static struct parport_operations dead_ops = {
.write_data = dead_write_lines, /* data */
.read_data = dead_read_lines,
.write_control = dead_write_lines, /* control */
.read_control = dead_read_lines,
.frob_control = dead_frob_lines,
.read_status = dead_read_lines, /* status */
.enable_irq = dead_onearg, /* enable_irq */
.disable_irq = dead_onearg, /* disable_irq */
.data_forward = dead_onearg, /* data_forward */
.data_reverse = dead_onearg, /* data_reverse */
.init_state = dead_initstate, /* init_state */
.save_state = dead_state,
.restore_state = dead_state,
.epp_write_data = dead_write, /* epp */
.epp_read_data = dead_read,
.epp_write_addr = dead_write,
.epp_read_addr = dead_read,
.ecp_write_data = dead_write, /* ecp */
.ecp_read_data = dead_read,
.ecp_write_addr = dead_write,
.compat_write_data = dead_write, /* compat */
.nibble_read_data = dead_read, /* nibble */
.byte_read_data = dead_read, /* byte */
.owner = NULL,
};
static struct device_type parport_device_type = {
.name = "parport",
};
static int is_parport(struct device *dev)
{
return dev->type == &parport_device_type;
}
static int parport_probe(struct device *dev)
{
struct parport_driver *drv;
if (is_parport(dev))
return -ENODEV;
drv = to_parport_driver(dev->driver);
if (!drv->probe) {
/* if driver has not defined a custom probe */
struct pardevice *par_dev = to_pardevice(dev);
if (strcmp(par_dev->name, drv->name))
return -ENODEV;
return 0;
}
/* if driver defined its own probe */
return drv->probe(to_pardevice(dev));
}
static struct bus_type parport_bus_type = {
.name = "parport",
.probe = parport_probe,
};
int parport_bus_init(void)
{
return bus_register(&parport_bus_type);
}
void parport_bus_exit(void)
{
bus_unregister(&parport_bus_type);
}
/*
* iterates through all the drivers registered with the bus and sends the port
* details to the match_port callback of the driver, so that the driver can
* know about the new port that just registered with the bus and decide if it
* wants to use this new port.
*/
static int driver_check(struct device_driver *dev_drv, void *_port)
{
struct parport *port = _port;
struct parport_driver *drv = to_parport_driver(dev_drv);
if (drv->match_port)
drv->match_port(port);
return 0;
}
/* Call attach(port) for each registered driver. */
static void attach_driver_chain(struct parport *port)
{
/* caller has exclusive registration_lock */
struct parport_driver *drv;
list_for_each_entry(drv, &drivers, list)
drv->attach(port);
/*
* call the driver_check function of the drivers registered in
* new device model
*/
bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
}
static int driver_detach(struct device_driver *_drv, void *_port)
{
struct parport *port = _port;
struct parport_driver *drv = to_parport_driver(_drv);
if (drv->detach)
drv->detach(port);
return 0;
}
/* Call detach(port) for each registered driver. */
static void detach_driver_chain(struct parport *port)
{
struct parport_driver *drv;
/* caller has exclusive registration_lock */
list_for_each_entry(drv, &drivers, list)
drv->detach(port);
/*
* call the detach function of the drivers registered in
* new device model
*/
bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
}
/* Ask kmod for some lowlevel drivers. */
static void get_lowlevel_driver(void)
{
/*
* There is no actual module called this: you should set
* up an alias for modutils.
*/
request_module("parport_lowlevel");
}
/*
* iterates through all the devices connected to the bus and sends the device
* details to the match_port callback of the driver, so that the driver can
* know what are all the ports that are connected to the bus and choose the
* port to which it wants to register its device.
*/
static int port_check(struct device *dev, void *dev_drv)
{
struct parport_driver *drv = dev_drv;
/* only send ports, do not send other devices connected to bus */
if (is_parport(dev))
drv->match_port(to_parport_dev(dev));
return 0;
}
/*
* Iterates through all the devices connected to the bus and return 1
* if the device is a parallel port.
*/
static int port_detect(struct device *dev, void *dev_drv)
{
if (is_parport(dev))
return 1;
return 0;
}
/**
* __parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
* @owner: owner module of drv
* @mod_name: module name string
*
* This can be called by a parallel port device driver in order
* to receive notifications about ports being found in the
* system, as well as ports no longer available.
*
* If devmodel is true then the new device model is used
* for registration.
*
* The @drv structure is allocated by the caller and must not be
* deallocated until after calling parport_unregister_driver().
*
* If using the non device model:
* The driver's attach() function may block. The port that
* attach() is given will be valid for the duration of the
* callback, but if the driver wants to take a copy of the
* pointer it must call parport_get_port() to do so. Calling
* parport_register_device() on that port will do this for you.
*
* The driver's detach() function may block. The port that
* detach() is given will be valid for the duration of the
* callback, but if the driver wants to take a copy of the
* pointer it must call parport_get_port() to do so.
*
*
* Returns 0 on success. The non device model will always succeeds.
* but the new device model can fail and will return the error code.
**/
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
/* using device model */
int ret;
/* initialize common driver fields */
drv->driver.name = drv->name;
drv->driver.bus = &parport_bus_type;
drv->driver.owner = owner;
drv->driver.mod_name = mod_name;
ret = driver_register(&drv->driver);
if (ret)
return ret;
/*
* check if bus has any parallel port registered, if
* none is found then load the lowlevel driver.
*/
ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
port_detect);
if (!ret)
get_lowlevel_driver();
mutex_lock(®istration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
port_check);
mutex_unlock(®istration_lock);
return 0;
}
EXPORT_SYMBOL(__parport_register_driver);
static int port_detach(struct device *dev, void *_drv)
{
struct parport_driver *drv = _drv;
if (is_parport(dev) && drv->detach)
drv->detach(to_parport_dev(dev));
return 0;
}
/**
* parport_unregister_driver - deregister a parallel port device driver
* @drv: structure describing the driver that was given to
* parport_register_driver()
*
* This should be called by a parallel port device driver that
* has registered itself using parport_register_driver() when it
* is about to be unloaded.
*
* When it returns, the driver's attach() routine will no longer
* be called, and for each port that attach() was called for, the
* detach() routine will have been called.
*
* All the driver's attach() and detach() calls are guaranteed to have
* finished by the time this function returns.
**/
void parport_unregister_driver(struct parport_driver *drv)
{
mutex_lock(®istration_lock);
bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
driver_unregister(&drv->driver);
mutex_unlock(®istration_lock);
}
EXPORT_SYMBOL(parport_unregister_driver);
static void free_port(struct device *dev)
{
int d;
struct parport *port = to_parport_dev(dev);
spin_lock(&full_list_lock);
list_del(&port->full_list);
spin_unlock(&full_list_lock);
for (d = 0; d < 5; d++) {
kfree(port->probe_info[d].class_name);
kfree(port->probe_info[d].mfr);
kfree(port->probe_info[d].model);
kfree(port->probe_info[d].cmdset);
kfree(port->probe_info[d].description);
}
kfree(port->name);
kfree(port);
}
/**
* parport_get_port - increment a port's reference count
* @port: the port
*
* This ensures that a struct parport pointer remains valid
* until the matching parport_put_port() call.
**/
struct parport *parport_get_port(struct parport *port)
{
struct device *dev = get_device(&port->bus_dev);
return to_parport_dev(dev);
}
EXPORT_SYMBOL(parport_get_port);
void parport_del_port(struct parport *port)
{
device_unregister(&port->bus_dev);
}
EXPORT_SYMBOL(parport_del_port);
/**
* parport_put_port - decrement a port's reference count
* @port: the port
*
* This should be called once for each call to parport_get_port(),
* once the port is no longer needed. When the reference count reaches
* zero (port is no longer used), free_port is called.
**/
void parport_put_port(struct parport *port)
{
put_device(&port->bus_dev);
}
EXPORT_SYMBOL(parport_put_port);
/**
* parport_register_port - register a parallel port
* @base: base I/O address
* @irq: IRQ line
* @dma: DMA channel
* @ops: pointer to the port driver's port operations structure
*
* When a parallel port (lowlevel) driver finds a port that
* should be made available to parallel port device drivers, it
* should call parport_register_port(). The @base, @irq, and
* @dma parameters are for the convenience of port drivers, and
* for ports where they aren't meaningful needn't be set to
* anything special. They can be altered afterwards by adjusting
* the relevant members of the parport structure that is returned
* and represents the port. They should not be tampered with
* after calling parport_announce_port, however.
*
* If there are parallel port device drivers in the system that
* have registered themselves using parport_register_driver(),
* they are not told about the port at this time; that is done by
* parport_announce_port().
*
* The @ops structure is allocated by the caller, and must not be
* deallocated before calling parport_remove_port().
*
* If there is no memory to allocate a new parport structure,
* this function will return %NULL.
**/
struct parport *parport_register_port(unsigned long base, int irq, int dma,
struct parport_operations *ops)
{
struct list_head *l;
struct parport *tmp;
int num;
int device;
char *name;
int ret;
tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
if (!tmp)
return NULL;
/* Init our structure */
tmp->base = base;
tmp->irq = irq;
tmp->dma = dma;
tmp->muxport = tmp->daisy = tmp->muxsel = -1;
tmp->modes = 0;
INIT_LIST_HEAD(&tmp->list);
tmp->devices = tmp->cad = NULL;
tmp->flags = 0;
tmp->ops = ops;
tmp->physport = tmp;
memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
rwlock_init(&tmp->cad_lock);
spin_lock_init(&tmp->waitlist_lock);
spin_lock_init(&tmp->pardevice_lock);
tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
sema_init(&tmp->ieee1284.irq, 0);
tmp->spintime = parport_default_spintime;
atomic_set(&tmp->ref_count, 1);
INIT_LIST_HEAD(&tmp->full_list);
name = kmalloc(PARPORT_NAME_MAX_LEN, GFP_KERNEL);
if (!name) {
kfree(tmp);
return NULL;
}
/* Search for the lowest free parport number. */
spin_lock(&full_list_lock);
for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
struct parport *p = list_entry(l, struct parport, full_list);
if (p->number != num)
break;
}
tmp->portnum = tmp->number = num;
list_add_tail(&tmp->full_list, l);
spin_unlock(&full_list_lock);
/*
* Now that the portnum is known finish doing the Init.
*/
sprintf(name, "parport%d", tmp->portnum = tmp->number);
tmp->name = name;
tmp->bus_dev.bus = &parport_bus_type;
tmp->bus_dev.release = free_port;
dev_set_name(&tmp->bus_dev, name);
tmp->bus_dev.type = &parport_device_type;
for (device = 0; device < 5; device++)
/* assume the worst */
tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
tmp->waithead = tmp->waittail = NULL;
ret = device_register(&tmp->bus_dev);
if (ret) {
put_device(&tmp->bus_dev);
return NULL;
}
return tmp;
}
EXPORT_SYMBOL(parport_register_port);
/**
* parport_announce_port - tell device drivers about a parallel port
* @port: parallel port to announce
*
* After a port driver has registered a parallel port with
* parport_register_port, and performed any necessary
* initialisation or adjustments, it should call
* parport_announce_port() in order to notify all device drivers
* that have called parport_register_driver(). Their attach()
* functions will be called, with @port as the parameter.
**/
void parport_announce_port(struct parport *port)
{
int i;
#ifdef CONFIG_PARPORT_1284
/* Analyse the IEEE1284.3 topology of the port. */
parport_daisy_init(port);
#endif
if (!port->dev)
pr_warn("%s: fix this legacy no-device port driver!\n",
port->name);
parport_proc_register(port);
mutex_lock(®istration_lock);
spin_lock_irq(&parportlist_lock);
list_add_tail(&port->list, &portlist);
for (i = 1; i < 3; i++) {
struct parport *slave = port->slaves[i-1];
if (slave)
list_add_tail(&slave->list, &portlist);
}
spin_unlock_irq(&parportlist_lock);
/* Let drivers know that new port(s) has arrived. */
attach_driver_chain(port);
for (i = 1; i < 3; i++) {
struct parport *slave = port->slaves[i-1];
if (slave)
attach_driver_chain(slave);
}
mutex_unlock(®istration_lock);
}
EXPORT_SYMBOL(parport_announce_port);
/**
* parport_remove_port - deregister a parallel port
* @port: parallel port to deregister
*
* When a parallel port driver is forcibly unloaded, or a
* parallel port becomes inaccessible, the port driver must call
* this function in order to deal with device drivers that still
* want to use it.
*
* The parport structure associated with the port has its
* operations structure replaced with one containing 'null'
* operations that return errors or just don't do anything.
*
* Any drivers that have registered themselves using
* parport_register_driver() are notified that the port is no
* longer accessible by having their detach() routines called
* with @port as the parameter.
**/
void parport_remove_port(struct parport *port)
{
int i;
mutex_lock(®istration_lock);
/* Spread the word. */
detach_driver_chain(port);
#ifdef CONFIG_PARPORT_1284
/* Forget the IEEE1284.3 topology of the port. */
parport_daisy_fini(port);
for (i = 1; i < 3; i++) {
struct parport *slave = port->slaves[i-1];
if (!slave)
continue;
detach_driver_chain(slave);
parport_daisy_fini(slave);
}
#endif
port->ops = &dead_ops;
spin_lock(&parportlist_lock);
list_del_init(&port->list);
for (i = 1; i < 3; i++) {
struct parport *slave = port->slaves[i-1];
if (slave)
list_del_init(&slave->list);
}
spin_unlock(&parportlist_lock);
mutex_unlock(®istration_lock);
parport_proc_unregister(port);
for (i = 1; i < 3; i++) {
struct parport *slave = port->slaves[i-1];
if (slave)
parport_put_port(slave);
}
}
EXPORT_SYMBOL(parport_remove_port);
static void free_pardevice(struct device *dev)
{
struct pardevice *par_dev = to_pardevice(dev);
kfree(par_dev->name);
kfree(par_dev);
}
/**
* parport_register_dev_model - register a device on a parallel port
* @port: port to which the device is attached
* @name: a name to refer to the device
* @par_dev_cb: struct containing callbacks
* @id: device number to be given to the device
*
* This function, called by parallel port device drivers,
* declares that a device is connected to a port, and tells the
* system all it needs to know.
*
* The struct pardev_cb contains pointer to callbacks. preemption
* callback function, @preempt, is called when this device driver
* has claimed access to the port but another device driver wants
* to use it. It is given, @private, as its parameter, and should
* return zero if it is willing for the system to release the port
* to another driver on its behalf. If it wants to keep control of
* the port it should return non-zero, and no action will be taken.
* It is good manners for the driver to try to release the port at
* the earliest opportunity after its preemption callback rejects a
* preemption attempt. Note that if a preemption callback is happy
* for preemption to go ahead, there is no need to release the
* port; it is done automatically. This function may not block, as
* it may be called from interrupt context. If the device driver
* does not support preemption, @preempt can be %NULL.
*
* The wake-up ("kick") callback function, @wakeup, is called when
* the port is available to be claimed for exclusive access; that
* is, parport_claim() is guaranteed to succeed when called from
* inside the wake-up callback function. If the driver wants to
* claim the port it should do so; otherwise, it need not take
* any action. This function may not block, as it may be called
* from interrupt context. If the device driver does not want to
* be explicitly invited to claim the port in this way, @wakeup can
* be %NULL.
*
* The interrupt handler, @irq_func, is called when an interrupt
* arrives from the parallel port. Note that if a device driver
* wants to use interrupts it should use parport_enable_irq(),
* and can also check the irq member of the parport structure
* representing the port.
*
* The parallel port (lowlevel) driver is the one that has called
* request_irq() and whose interrupt handler is called first.
* This handler does whatever needs to be done to the hardware to
* acknowledge the interrupt (for PC-style ports there is nothing
* special to be done). It then tells the IEEE 1284 code about
* the interrupt, which may involve reacting to an IEEE 1284
* event depending on the current IEEE 1284 phase. After this,
* it calls @irq_func. Needless to say, @irq_func will be called
* from interrupt context, and may not block.
*
* The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
* so should only be used when sharing the port with other device
* drivers is impossible and would lead to incorrect behaviour.
* Use it sparingly! Normally, @flags will be zero.
*
* This function returns a pointer to a structure that represents
* the device on the port, or %NULL if there is not enough memory
* to allocate space for that structure.
**/
struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int id)
{
struct pardevice *par_dev;
int ret;
char *devname;
if (port->physport->flags & PARPORT_FLAG_EXCL) {
/* An exclusive device is registered. */
pr_err("%s: no more devices allowed\n", port->name);
return NULL;
}
if (par_dev_cb->flags & PARPORT_DEV_LURK) {
if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
pr_info("%s: refused to register lurking device (%s) without callbacks\n",
port->name, name);
return NULL;
}
}
if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
if (port->physport->devices) {
/*
* If a device is already registered and this new
* device wants exclusive access, then no need to
* continue as we can not grant exclusive access to
* this device.
*/
pr_err("%s: cannot grant exclusive access for device %s\n",
port->name, name);
return NULL;
}
}
if (!try_module_get(port->ops->owner))
return NULL;
parport_get_port(port);
par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
if (!par_dev)
goto err_put_port;
par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
if (!par_dev->state)
goto err_put_par_dev;
devname = kstrdup(name, GFP_KERNEL);
if (!devname)
goto err_free_par_dev;
par_dev->name = devname;
par_dev->port = port;
par_dev->daisy = -1;
par_dev->preempt = par_dev_cb->preempt;
par_dev->wakeup = par_dev_cb->wakeup;
par_dev->private = par_dev_cb->private;
par_dev->flags = par_dev_cb->flags;
par_dev->irq_func = par_dev_cb->irq_func;
par_dev->waiting = 0;
par_dev->timeout = 5 * HZ;
par_dev->dev.parent = &port->bus_dev;
par_dev->dev.bus = &parport_bus_type;
ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
if (ret)
goto err_free_devname;
par_dev->dev.release = free_pardevice;
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
if (ret) {
kfree(par_dev->state);
put_device(&par_dev->dev);
goto err_put_port;
}
/* Chain this onto the list */
par_dev->prev = NULL;
/*
* This function must not run from an irq handler so we don' t need
* to clear irq on the local CPU. -arca
*/
spin_lock(&port->physport->pardevice_lock);
if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
if (port->physport->devices) {
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
kfree(par_dev->state);
device_unregister(&par_dev->dev);
goto err_put_port;
}
port->flags |= PARPORT_FLAG_EXCL;
}
par_dev->next = port->physport->devices;
wmb(); /*
* Make sure that tmp->next is written before it's
* added to the list; see comments marked 'no locking
* required'
*/
if (port->physport->devices)
port->physport->devices->prev = par_dev;
port->physport->devices = par_dev;
spin_unlock(&port->physport->pardevice_lock);
init_waitqueue_head(&par_dev->wait_q);
par_dev->timeslice = parport_default_timeslice;
par_dev->waitnext = NULL;
par_dev->waitprev = NULL;
/*
* This has to be run as last thing since init_state may need other
* pardevice fields. -arca
*/
port->ops->init_state(par_dev, par_dev->state);
if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
port->proc_device = par_dev;
parport_device_proc_register(par_dev);
}
return par_dev;
err_free_devname:
kfree(devname);
err_free_par_dev:
kfree(par_dev->state);
err_put_par_dev:
if (!par_dev->devmodel)
kfree(par_dev);
err_put_port:
parport_put_port(port);
module_put(port->ops->owner);
return NULL;
}
EXPORT_SYMBOL(parport_register_dev_model);
/**
* parport_unregister_device - deregister a device on a parallel port
* @dev: pointer to structure representing device
*
* This undoes the effect of parport_register_device().
**/
void parport_unregister_device(struct pardevice *dev)
{
struct parport *port;
#ifdef PARPORT_PARANOID
if (!dev) {
pr_err("%s: passed NULL\n", __func__);
return;
}
#endif
port = dev->port->physport;
if (port->proc_device == dev) {
port->proc_device = NULL;
clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
parport_device_proc_unregister(dev);
}
if (port->cad == dev) {
printk(KERN_DEBUG "%s: %s forgot to release port\n",
port->name, dev->name);
parport_release(dev);
}
spin_lock(&port->pardevice_lock);
if (dev->next)
dev->next->prev = dev->prev;
if (dev->prev)
dev->prev->next = dev->next;
else
port->devices = dev->next;
if (dev->flags & PARPORT_DEV_EXCL)
port->flags &= ~PARPORT_FLAG_EXCL;
spin_unlock(&port->pardevice_lock);
/*
* Make sure we haven't left any pointers around in the wait
* list.
*/
spin_lock_irq(&port->waitlist_lock);
if (dev->waitprev || dev->waitnext || port->waithead == dev) {
if (dev->waitprev)
dev->waitprev->waitnext = dev->waitnext;
else
port->waithead = dev->waitnext;
if (dev->waitnext)
dev->waitnext->waitprev = dev->waitprev;
else
port->waittail = dev->waitprev;
}
spin_unlock_irq(&port->waitlist_lock);
kfree(dev->state);
device_unregister(&dev->dev);
module_put(port->ops->owner);
parport_put_port(port);
}
EXPORT_SYMBOL(parport_unregister_device);
/**
* parport_find_number - find a parallel port by number
* @number: parallel port number
*
* This returns the parallel port with the specified number, or
* %NULL if there is none.
*
* There is an implicit parport_get_port() done already; to throw
* away the reference to the port that parport_find_number()
* gives you, use parport_put_port().
*/
struct parport *parport_find_number(int number)
{
struct parport *port, *result = NULL;
if (list_empty(&portlist))
get_lowlevel_driver();
spin_lock(&parportlist_lock);
list_for_each_entry(port, &portlist, list) {
if (port->number == number) {
result = parport_get_port(port);
break;
}
}
spin_unlock(&parportlist_lock);
return result;
}
EXPORT_SYMBOL(parport_find_number);
/**
* parport_find_base - find a parallel port by base address
* @base: base I/O address
*
* This returns the parallel port with the specified base
* address, or %NULL if there is none.
*
* There is an implicit parport_get_port() done already; to throw
* away the reference to the port that parport_find_base()
* gives you, use parport_put_port().
*/
struct parport *parport_find_base(unsigned long base)
{
struct parport *port, *result = NULL;
if (list_empty(&portlist))
get_lowlevel_driver();
spin_lock(&parportlist_lock);
list_for_each_entry(port, &portlist, list) {
if (port->base == base) {
result = parport_get_port(port);
break;
}
}
spin_unlock(&parportlist_lock);
return result;
}
EXPORT_SYMBOL(parport_find_base);
/**
* parport_claim - claim access to a parallel port device
* @dev: pointer to structure representing a device on the port
*
* This function will not block and so can be used from interrupt
* context. If parport_claim() succeeds in claiming access to
* the port it returns zero and the port is available to use. It
* may fail (returning non-zero) if the port is in use by another
* driver and that driver is not willing to relinquish control of
* the port.
**/
int parport_claim(struct pardevice *dev)
{
struct pardevice *oldcad;
struct parport *port = dev->port->physport;
unsigned long flags;
if (port->cad == dev) {
pr_info("%s: %s already owner\n", dev->port->name, dev->name);
return 0;
}
/* Preempt any current device */
write_lock_irqsave(&port->cad_lock, flags);
oldcad = port->cad;
if (oldcad) {
if (oldcad->preempt) {
if (oldcad->preempt(oldcad->private))
goto blocked;
port->ops->save_state(port, dev->state);
} else
goto blocked;
if (port->cad != oldcad) {
/*
* I think we'll actually deadlock rather than
* get here, but just in case..
*/
pr_warn("%s: %s released port when preempted!\n",
port->name, oldcad->name);
if (port->cad)
goto blocked;
}
}
/* Can't fail from now on, so mark ourselves as no longer waiting. */
if (dev->waiting & 1) {
dev->waiting = 0;
/* Take ourselves out of the wait list again. */
spin_lock_irq(&port->waitlist_lock);
if (dev->waitprev)
dev->waitprev->waitnext = dev->waitnext;
else
port->waithead = dev->waitnext;
if (dev->waitnext)
dev->waitnext->waitprev = dev->waitprev;
else
port->waittail = dev->waitprev;
spin_unlock_irq(&port->waitlist_lock);
dev->waitprev = dev->waitnext = NULL;
}
/* Now we do the change of devices */
port->cad = dev;
#ifdef CONFIG_PARPORT_1284
/* If it's a mux port, select it. */
if (dev->port->muxport >= 0) {
/* FIXME */
port->muxsel = dev->port->muxport;
}
/* If it's a daisy chain device, select it. */
if (dev->daisy >= 0) {
/* This could be lazier. */
if (!parport_daisy_select(port, dev->daisy,
IEEE1284_MODE_COMPAT))
port->daisy = dev->daisy;
}
#endif /* IEEE1284.3 support */
/* Restore control registers */
port->ops->restore_state(port, dev->state);
write_unlock_irqrestore(&port->cad_lock, flags);
dev->time = jiffies;
return 0;
blocked:
/*
* If this is the first time we tried to claim the port, register an
* interest. This is only allowed for devices sleeping in
* parport_claim_or_block(), or those with a wakeup function.
*/
/* The cad_lock is still held for writing here */
if (dev->waiting & 2 || dev->wakeup) {
spin_lock(&port->waitlist_lock);
if (test_and_set_bit(0, &dev->waiting) == 0) {
/* First add ourselves to the end of the wait list. */
dev->waitnext = NULL;
dev->waitprev = port->waittail;
if (port->waittail) {
port->waittail->waitnext = dev;
port->waittail = dev;
} else
port->waithead = port->waittail = dev;
}
spin_unlock(&port->waitlist_lock);
}
write_unlock_irqrestore(&port->cad_lock, flags);
return -EAGAIN;
}
EXPORT_SYMBOL(parport_claim);
/**
* parport_claim_or_block - claim access to a parallel port device
* @dev: pointer to structure representing a device on the port
*
* This behaves like parport_claim(), but will block if necessary
* to wait for the port to be free. A return value of 1
* indicates that it slept; 0 means that it succeeded without
* needing to sleep. A negative error code indicates failure.
**/
int parport_claim_or_block(struct pardevice *dev)
{
int r;
/*
* Signal to parport_claim() that we can wait even without a
* wakeup function.
*/
dev->waiting = 2;
/* Try to claim the port. If this fails, we need to sleep. */
r = parport_claim(dev);
if (r == -EAGAIN) {
#ifdef PARPORT_DEBUG_SHARING
printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
dev->name);
#endif
/*
* FIXME!!! Use the proper locking for dev->waiting,
* and make this use the "wait_event_interruptible()"
* interfaces. The cli/sti that used to be here
* did nothing.
*
* See also parport_release()
*/
/*
* If dev->waiting is clear now, an interrupt
* gave us the port and we would deadlock if we slept.
*/
if (dev->waiting) {
wait_event_interruptible(dev->wait_q,
!dev->waiting);
if (signal_pending(current))
return -EINTR;
r = 1;
} else {
r = 0;
#ifdef PARPORT_DEBUG_SHARING
printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
dev->name);
#endif
}
#ifdef PARPORT_DEBUG_SHARING
if (dev->port->physport->cad != dev)
printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
dev->name, dev->port->physport->cad ?
dev->port->physport->cad->name : "nobody");
#endif
}
dev->waiting = 0;
return r;
}
EXPORT_SYMBOL(parport_claim_or_block);
/**
* parport_release - give up access to a parallel port device
* @dev: pointer to structure representing parallel port device
*
* This function cannot fail, but it should not be called without
* the port claimed. Similarly, if the port is already claimed
* you should not try claiming it again.
**/
void parport_release(struct pardevice *dev)
{
struct parport *port = dev->port->physport;
struct pardevice *pd;
unsigned long flags;
/* Make sure that dev is the current device */
write_lock_irqsave(&port->cad_lock, flags);
if (port->cad != dev) {
write_unlock_irqrestore(&port->cad_lock, flags);
pr_warn("%s: %s tried to release parport when not owner\n",
port->name, dev->name);
return;
}
#ifdef CONFIG_PARPORT_1284
/* If this is on a mux port, deselect it. */
if (dev->port->muxport >= 0) {
/* FIXME */
port->muxsel = -1;
}
/* If this is a daisy device, deselect it. */
if (dev->daisy >= 0) {
parport_daisy_deselect_all(port);
port->daisy = -1;
}
#endif
port->cad = NULL;
write_unlock_irqrestore(&port->cad_lock, flags);
/* Save control registers */
port->ops->save_state(port, dev->state);
/*
* If anybody is waiting, find out who's been there longest and
* then wake them up. (Note: no locking required)
*/
/* !!! LOCKING IS NEEDED HERE */
for (pd = port->waithead; pd; pd = pd->waitnext) {
if (pd->waiting & 2) { /* sleeping in claim_or_block */
parport_claim(pd);
if (waitqueue_active(&pd->wait_q))
wake_up_interruptible(&pd->wait_q);
return;
} else if (pd->wakeup) {
pd->wakeup(pd->private);
if (dev->port->cad) /* racy but no matter */
return;
} else {
pr_err("%s: don't know how to wake %s\n",
port->name, pd->name);
}
}
/*
* Nobody was waiting, so walk the list to see if anyone is
* interested in being woken up. (Note: no locking required)
*/
/* !!! LOCKING IS NEEDED HERE */
for (pd = port->devices; !port->cad && pd; pd = pd->next) {
if (pd->wakeup && pd != dev)
pd->wakeup(pd->private);
}
}
EXPORT_SYMBOL(parport_release);
irqreturn_t parport_irq_handler(int irq, void *dev_id)
{
struct parport *port = dev_id;
parport_generic_irq(port);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(parport_irq_handler);
MODULE_LICENSE("GPL");
| linux-master | drivers/parport/share.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Low-level parallel port routines for the Amiga built-in port
*
* Author: Joerg Dorchain <[email protected]>
*
* This is a complete rewrite of the code, but based heaviy upon the old
* lp_intern. code.
*
* The built-in Amiga parallel port provides one port at a fixed address
* with 8 bidirectional data lines (D0 - D7) and 3 bidirectional status
* lines (BUSY, POUT, SEL), 1 output control line /STROBE (raised automatically
* in hardware when the data register is accessed), and 1 input control line
* /ACK, able to cause an interrupt, but both not directly settable by
* software.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/amigaints.h>
#undef DEBUG
static void amiga_write_data(struct parport *p, unsigned char data)
{
pr_debug("write_data %c\n", data);
/* Triggers also /STROBE. This behavior cannot be changed */
ciaa.prb = data;
mb();
}
static unsigned char amiga_read_data(struct parport *p)
{
/* Triggers also /STROBE. This behavior cannot be changed */
return ciaa.prb;
}
static unsigned char control_amiga_to_pc(unsigned char control)
{
return PARPORT_CONTROL_SELECT |
PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_STROBE;
/* fake value: interrupt enable, select in, no reset,
no autolf, no strobe - seems to be closest the wiring diagram */
}
static void amiga_write_control(struct parport *p, unsigned char control)
{
pr_debug("write_control %02x\n", control);
/* No implementation possible */
}
static unsigned char amiga_read_control( struct parport *p)
{
pr_debug("read_control\n");
return control_amiga_to_pc(0);
}
static unsigned char amiga_frob_control( struct parport *p, unsigned char mask, unsigned char val)
{
unsigned char old;
pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = amiga_read_control(p);
amiga_write_control(p, (old & ~mask) ^ val);
return old;
}
static unsigned char status_amiga_to_pc(unsigned char status)
{
unsigned char ret = PARPORT_STATUS_BUSY | PARPORT_STATUS_ACK | PARPORT_STATUS_ERROR;
if (status & 1) /* Busy */
ret &= ~PARPORT_STATUS_BUSY;
if (status & 2) /* PaperOut */
ret |= PARPORT_STATUS_PAPEROUT;
if (status & 4) /* Selected */
ret |= PARPORT_STATUS_SELECT;
/* the rest is not connected or handled autonomously in hardware */
return ret;
}
static unsigned char amiga_read_status(struct parport *p)
{
unsigned char status;
status = status_amiga_to_pc(ciab.pra & 7);
pr_debug("read_status %02x\n", status);
return status;
}
static void amiga_enable_irq(struct parport *p)
{
enable_irq(IRQ_AMIGA_CIAA_FLG);
}
static void amiga_disable_irq(struct parport *p)
{
disable_irq(IRQ_AMIGA_CIAA_FLG);
}
static void amiga_data_forward(struct parport *p)
{
pr_debug("forward\n");
ciaa.ddrb = 0xff; /* all pins output */
mb();
}
static void amiga_data_reverse(struct parport *p)
{
pr_debug("reverse\n");
ciaa.ddrb = 0; /* all pins input */
mb();
}
static void amiga_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.amiga.data = 0;
s->u.amiga.datadir = 255;
s->u.amiga.status = 0;
s->u.amiga.statusdir = 0;
}
static void amiga_save_state(struct parport *p, struct parport_state *s)
{
mb();
s->u.amiga.data = ciaa.prb;
s->u.amiga.datadir = ciaa.ddrb;
s->u.amiga.status = ciab.pra & 7;
s->u.amiga.statusdir = ciab.ddra & 7;
mb();
}
static void amiga_restore_state(struct parport *p, struct parport_state *s)
{
mb();
ciaa.prb = s->u.amiga.data;
ciaa.ddrb = s->u.amiga.datadir;
ciab.pra |= (ciab.pra & 0xf8) | s->u.amiga.status;
ciab.ddra |= (ciab.ddra & 0xf8) | s->u.amiga.statusdir;
mb();
}
static struct parport_operations pp_amiga_ops = {
.write_data = amiga_write_data,
.read_data = amiga_read_data,
.write_control = amiga_write_control,
.read_control = amiga_read_control,
.frob_control = amiga_frob_control,
.read_status = amiga_read_status,
.enable_irq = amiga_enable_irq,
.disable_irq = amiga_disable_irq,
.data_forward = amiga_data_forward,
.data_reverse = amiga_data_reverse,
.init_state = amiga_init_state,
.save_state = amiga_save_state,
.restore_state = amiga_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
/* ----------- Initialisation code --------------------------------- */
static int __init amiga_parallel_probe(struct platform_device *pdev)
{
struct parport *p;
int err;
ciaa.ddrb = 0xff;
ciab.ddra &= 0xf8;
mb();
p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
PARPORT_DMA_NONE, &pp_amiga_ops);
if (!p)
return -EBUSY;
err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
p);
if (err)
goto out_irq;
pr_info("%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */
parport_announce_port(p);
platform_set_drvdata(pdev, p);
return 0;
out_irq:
parport_put_port(p);
return err;
}
static int __exit amiga_parallel_remove(struct platform_device *pdev)
{
struct parport *port = platform_get_drvdata(pdev);
parport_remove_port(port);
if (port->irq != PARPORT_IRQ_NONE)
free_irq(IRQ_AMIGA_CIAA_FLG, port);
parport_put_port(port);
return 0;
}
static struct platform_driver amiga_parallel_driver = {
.remove = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
},
};
module_platform_driver_probe(amiga_parallel_driver, amiga_parallel_probe);
MODULE_AUTHOR("Joerg Dorchain <[email protected]>");
MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:amiga-parallel");
| linux-master | drivers/parport/parport_amiga.c |
// SPDX-License-Identifier: GPL-2.0-only
/* parport_sunbpp.c: Parallel-port routines for SBUS
*
* Author: Derrick J. Brashear <[email protected]>
*
* based on work by:
* Phil Blundell <[email protected]>
* Tim Waugh <[email protected]>
* Jose Renau <[email protected]>
* David Campbell <[email protected]>
* Grant Guenther <[email protected]>
* Eddie C. Dost <[email protected]>
* Stephen Williams ([email protected])
* Gus Baldauf ([email protected])
* Peter Zaitcev
* Tom Dyas
*
* Updated to new SBUS device framework: David S. Miller <[email protected]>
*
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/parport.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/oplib.h> /* OpenProm Library */
#include <asm/dma.h> /* BPP uses LSI 64854 for DMA */
#include <asm/irq.h>
#include <asm/sunbpp.h>
#undef __SUNBPP_DEBUG
#ifdef __SUNBPP_DEBUG
#define dprintk(x) printk x
#else
#define dprintk(x)
#endif
static void parport_sunbpp_disable_irq(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
u32 tmp;
tmp = sbus_readl(®s->p_csr);
tmp &= ~DMA_INT_ENAB;
sbus_writel(tmp, ®s->p_csr);
}
static void parport_sunbpp_enable_irq(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
u32 tmp;
tmp = sbus_readl(®s->p_csr);
tmp |= DMA_INT_ENAB;
sbus_writel(tmp, ®s->p_csr);
}
static void parport_sunbpp_write_data(struct parport *p, unsigned char d)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
sbus_writeb(d, ®s->p_dr);
dprintk((KERN_DEBUG "wrote 0x%x\n", d));
}
static unsigned char parport_sunbpp_read_data(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
return sbus_readb(®s->p_dr);
}
static unsigned char status_sunbpp_to_pc(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char bits = 0;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_ir = sbus_readb(®s->p_ir);
if (!(value_ir & P_IR_ERR))
bits |= PARPORT_STATUS_ERROR;
if (!(value_ir & P_IR_SLCT))
bits |= PARPORT_STATUS_SELECT;
if (!(value_ir & P_IR_PE))
bits |= PARPORT_STATUS_PAPEROUT;
if (value_tcr & P_TCR_ACK)
bits |= PARPORT_STATUS_ACK;
if (!(value_tcr & P_TCR_BUSY))
bits |= PARPORT_STATUS_BUSY;
dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir));
dprintk((KERN_DEBUG "read status 0x%x\n", bits));
return bits;
}
static unsigned char control_sunbpp_to_pc(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char bits = 0;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_or = sbus_readb(®s->p_or);
if (!(value_tcr & P_TCR_DS))
bits |= PARPORT_CONTROL_STROBE;
if (!(value_or & P_OR_AFXN))
bits |= PARPORT_CONTROL_AUTOFD;
if (!(value_or & P_OR_INIT))
bits |= PARPORT_CONTROL_INIT;
if (value_or & P_OR_SLCT_IN)
bits |= PARPORT_CONTROL_SELECT;
dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or));
dprintk((KERN_DEBUG "read control 0x%x\n", bits));
return bits;
}
static unsigned char parport_sunbpp_read_control(struct parport *p)
{
return control_sunbpp_to_pc(p);
}
static unsigned char parport_sunbpp_frob_control(struct parport *p,
unsigned char mask,
unsigned char val)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_or = sbus_readb(®s->p_or);
dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n",
value_tcr, value_or));
if (mask & PARPORT_CONTROL_STROBE) {
if (val & PARPORT_CONTROL_STROBE) {
value_tcr &= ~P_TCR_DS;
} else {
value_tcr |= P_TCR_DS;
}
}
if (mask & PARPORT_CONTROL_AUTOFD) {
if (val & PARPORT_CONTROL_AUTOFD) {
value_or &= ~P_OR_AFXN;
} else {
value_or |= P_OR_AFXN;
}
}
if (mask & PARPORT_CONTROL_INIT) {
if (val & PARPORT_CONTROL_INIT) {
value_or &= ~P_OR_INIT;
} else {
value_or |= P_OR_INIT;
}
}
if (mask & PARPORT_CONTROL_SELECT) {
if (val & PARPORT_CONTROL_SELECT) {
value_or |= P_OR_SLCT_IN;
} else {
value_or &= ~P_OR_SLCT_IN;
}
}
sbus_writeb(value_or, ®s->p_or);
sbus_writeb(value_tcr, ®s->p_tcr);
dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n",
value_tcr, value_or));
return parport_sunbpp_read_control(p);
}
static void parport_sunbpp_write_control(struct parport *p, unsigned char d)
{
const unsigned char wm = (PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD |
PARPORT_CONTROL_INIT |
PARPORT_CONTROL_SELECT);
parport_sunbpp_frob_control (p, wm, d & wm);
}
static unsigned char parport_sunbpp_read_status(struct parport *p)
{
return status_sunbpp_to_pc(p);
}
static void parport_sunbpp_data_forward (struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
dprintk((KERN_DEBUG "forward\n"));
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, ®s->p_tcr);
}
static void parport_sunbpp_data_reverse (struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
u8 val = sbus_readb(®s->p_tcr);
dprintk((KERN_DEBUG "reverse\n"));
val |= P_TCR_DIR;
sbus_writeb(val, ®s->p_tcr);
}
static void parport_sunbpp_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.pc.ctr = 0xc;
s->u.pc.ecr = 0x0;
}
static void parport_sunbpp_save_state(struct parport *p, struct parport_state *s)
{
s->u.pc.ctr = parport_sunbpp_read_control(p);
}
static void parport_sunbpp_restore_state(struct parport *p, struct parport_state *s)
{
parport_sunbpp_write_control(p, s->u.pc.ctr);
}
static struct parport_operations parport_sunbpp_ops =
{
.write_data = parport_sunbpp_write_data,
.read_data = parport_sunbpp_read_data,
.write_control = parport_sunbpp_write_control,
.read_control = parport_sunbpp_read_control,
.frob_control = parport_sunbpp_frob_control,
.read_status = parport_sunbpp_read_status,
.enable_irq = parport_sunbpp_enable_irq,
.disable_irq = parport_sunbpp_disable_irq,
.data_forward = parport_sunbpp_data_forward,
.data_reverse = parport_sunbpp_data_reverse,
.init_state = parport_sunbpp_init_state,
.save_state = parport_sunbpp_save_state,
.restore_state = parport_sunbpp_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
static int bpp_probe(struct platform_device *op)
{
struct parport_operations *ops;
struct bpp_regs __iomem *regs;
int irq, dma, err = 0, size;
unsigned char value_tcr;
void __iomem *base;
struct parport *p;
irq = op->archdata.irqs[0];
base = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"sunbpp");
if (!base)
return -ENODEV;
size = resource_size(&op->resource[0]);
dma = PARPORT_DMA_NONE;
ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations),
GFP_KERNEL);
if (!ops) {
err = -ENOMEM;
goto out_unmap;
}
dprintk(("register_port\n"));
if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
err = -ENOMEM;
goto out_free_ops;
}
p->size = size;
p->dev = &op->dev;
if ((err = request_irq(p->irq, parport_irq_handler,
IRQF_SHARED, p->name, p)) != 0) {
goto out_put_port;
}
parport_sunbpp_enable_irq(p);
regs = (struct bpp_regs __iomem *)p->base;
value_tcr = sbus_readb(®s->p_tcr);
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, ®s->p_tcr);
pr_info("%s: sunbpp at 0x%lx\n", p->name, p->base);
dev_set_drvdata(&op->dev, p);
parport_announce_port(p);
return 0;
out_put_port:
parport_put_port(p);
out_free_ops:
kfree(ops);
out_unmap:
of_iounmap(&op->resource[0], base, size);
return err;
}
static int bpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
struct parport_operations *ops = p->ops;
parport_remove_port(p);
if (p->irq != PARPORT_IRQ_NONE) {
parport_sunbpp_disable_irq(p);
free_irq(p->irq, p);
}
of_iounmap(&op->resource[0], (void __iomem *) p->base, p->size);
parport_put_port(p);
kfree(ops);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id bpp_match[] = {
{
.name = "SUNW,bpp",
},
{},
};
MODULE_DEVICE_TABLE(of, bpp_match);
static struct platform_driver bpp_sbus_driver = {
.driver = {
.name = "bpp",
.of_match_table = bpp_match,
},
.probe = bpp_probe,
.remove = bpp_remove,
};
module_platform_driver(bpp_sbus_driver);
MODULE_AUTHOR("Derrick J Brashear");
MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
| linux-master | drivers/parport/parport_sunbpp.c |
// SPDX-License-Identifier: GPL-2.0
/* Sysctl interface for parport devices.
*
* Authors: David Campbell
* Tim Waugh <[email protected]>
* Philip Blundell <[email protected]>
* Andrea Arcangeli
* Riccardo Facchetti <[email protected]>
*
* based on work by Grant Guenther <[email protected]>
* and Philip Blundell
*
* Cleaned up include files - Russell King <[email protected]>
*/
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/parport.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
#define PARPORT_MIN_TIMESLICE_VALUE 1ul
#define PARPORT_MAX_TIMESLICE_VALUE ((unsigned long) HZ)
#define PARPORT_MIN_SPINTIME_VALUE 1
#define PARPORT_MAX_SPINTIME_VALUE 1000
/*
* PARPORT_BASE_* is the size of the known parts of the sysctl path
* in dev/partport/%s/devices/%s. "dev/parport/"(12), "/devices/"(9
* and null char(1).
*/
#define PARPORT_BASE_PATH_SIZE 13
#define PARPORT_BASE_DEVICES_PATH_SIZE 22
static int do_active_device(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[256];
struct pardevice *dev;
int len = 0;
if (write) /* can't happen anyway */
return -EACCES;
if (*ppos) {
*lenp = 0;
return 0;
}
for (dev = port->devices; dev ; dev = dev->next) {
if(dev == port->cad) {
len += sprintf(buffer, "%s\n", dev->name);
}
}
if(!len) {
len += sprintf(buffer, "%s\n", "none");
}
if (len > *lenp)
len = *lenp;
else
*lenp = len;
*ppos += len;
memcpy(result, buffer, len);
return 0;
}
#ifdef CONFIG_PARPORT_1284
static int do_autoprobe(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport_device_info *info = table->extra2;
const char *str;
char buffer[256];
int len = 0;
if (write) /* permissions stop this */
return -EACCES;
if (*ppos) {
*lenp = 0;
return 0;
}
if ((str = info->class_name) != NULL)
len += sprintf (buffer + len, "CLASS:%s;\n", str);
if ((str = info->model) != NULL)
len += sprintf (buffer + len, "MODEL:%s;\n", str);
if ((str = info->mfr) != NULL)
len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str);
if ((str = info->description) != NULL)
len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str);
if ((str = info->cmdset) != NULL)
len += sprintf (buffer + len, "COMMAND SET:%s;\n", str);
if (len > *lenp)
len = *lenp;
else
*lenp = len;
*ppos += len;
memcpy(result, buffer, len);
return 0;
}
#endif /* IEEE1284.3 support. */
static int do_hardware_base_addr(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
if (*ppos) {
*lenp = 0;
return 0;
}
if (write) /* permissions prevent this anyway */
return -EACCES;
len += sprintf (buffer, "%lu\t%lu\n", port->base, port->base_hi);
if (len > *lenp)
len = *lenp;
else
*lenp = len;
*ppos += len;
memcpy(result, buffer, len);
return 0;
}
static int do_hardware_irq(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
if (*ppos) {
*lenp = 0;
return 0;
}
if (write) /* permissions prevent this anyway */
return -EACCES;
len += sprintf (buffer, "%d\n", port->irq);
if (len > *lenp)
len = *lenp;
else
*lenp = len;
*ppos += len;
memcpy(result, buffer, len);
return 0;
}
static int do_hardware_dma(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
if (*ppos) {
*lenp = 0;
return 0;
}
if (write) /* permissions prevent this anyway */
return -EACCES;
len += sprintf (buffer, "%d\n", port->dma);
if (len > *lenp)
len = *lenp;
else
*lenp = len;
*ppos += len;
memcpy(result, buffer, len);
return 0;
}
static int do_hardware_modes(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[40];
int len = 0;
if (*ppos) {
*lenp = 0;
return 0;
}
if (write) /* permissions prevent this anyway */
return -EACCES;
{
#define printmode(x) \
do { \
if (port->modes & PARPORT_MODE_##x) \
len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
} while (0)
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
printmode(COMPAT);
printmode(EPP);
printmode(ECP);
printmode(DMA);
#undef printmode
}
buffer[len++] = '\n';
if (len > *lenp)
len = *lenp;
else
*lenp = len;
*ppos += len;
memcpy(result, buffer, len);
return 0;
}
static const unsigned long parport_min_timeslice_value =
PARPORT_MIN_TIMESLICE_VALUE;
static const unsigned long parport_max_timeslice_value =
PARPORT_MAX_TIMESLICE_VALUE;
static const int parport_min_spintime_value =
PARPORT_MIN_SPINTIME_VALUE;
static const int parport_max_spintime_value =
PARPORT_MAX_SPINTIME_VALUE;
struct parport_sysctl_table {
struct ctl_table_header *port_header;
struct ctl_table_header *devices_header;
struct ctl_table vars[12];
struct ctl_table device_dir[2];
};
static const struct parport_sysctl_table parport_sysctl_template = {
.port_header = NULL,
.devices_header = NULL,
{
{
.procname = "spintime",
.data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void*) &parport_min_spintime_value,
.extra2 = (void*) &parport_max_spintime_value
},
{
.procname = "base-addr",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_hardware_base_addr
},
{
.procname = "irq",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_hardware_irq
},
{
.procname = "dma",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_hardware_dma
},
{
.procname = "modes",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_hardware_modes
},
#ifdef CONFIG_PARPORT_1284
{
.procname = "autoprobe",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_autoprobe
},
{
.procname = "autoprobe0",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_autoprobe
},
{
.procname = "autoprobe1",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_autoprobe
},
{
.procname = "autoprobe2",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_autoprobe
},
{
.procname = "autoprobe3",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_autoprobe
},
#endif /* IEEE 1284 support */
{}
},
{
{
.procname = "active",
.data = NULL,
.maxlen = 0,
.mode = 0444,
.proc_handler = do_active_device
},
{}
},
};
struct parport_device_sysctl_table
{
struct ctl_table_header *sysctl_header;
struct ctl_table vars[2];
struct ctl_table device_dir[2];
struct ctl_table devices_root_dir[2];
struct ctl_table port_dir[2];
struct ctl_table parport_dir[2];
struct ctl_table dev_dir[2];
};
static const struct parport_device_sysctl_table
parport_device_sysctl_template = {
.sysctl_header = NULL,
{
{
.procname = "timeslice",
.data = NULL,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void*) &parport_min_timeslice_value,
.extra2 = (void*) &parport_max_timeslice_value
},
{}
},
{
{
.procname = NULL,
.data = NULL,
.maxlen = 0,
.mode = 0555,
},
{}
}
};
struct parport_default_sysctl_table
{
struct ctl_table_header *sysctl_header;
struct ctl_table vars[3];
struct ctl_table default_dir[2];
struct ctl_table parport_dir[2];
struct ctl_table dev_dir[2];
};
static struct parport_default_sysctl_table
parport_default_sysctl_table = {
.sysctl_header = NULL,
{
{
.procname = "timeslice",
.data = &parport_default_timeslice,
.maxlen = sizeof(parport_default_timeslice),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void*) &parport_min_timeslice_value,
.extra2 = (void*) &parport_max_timeslice_value
},
{
.procname = "spintime",
.data = &parport_default_spintime,
.maxlen = sizeof(parport_default_spintime),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void*) &parport_min_spintime_value,
.extra2 = (void*) &parport_max_spintime_value
},
{}
}
};
int parport_proc_register(struct parport *port)
{
struct parport_sysctl_table *t;
char *tmp_dir_path;
size_t tmp_path_len, port_name_len;
int bytes_written, i, err = 0;
t = kmemdup(&parport_sysctl_template, sizeof(*t), GFP_KERNEL);
if (t == NULL)
return -ENOMEM;
t->device_dir[0].extra1 = port;
t->vars[0].data = &port->spintime;
for (i = 0; i < 5; i++) {
t->vars[i].extra1 = port;
t->vars[5 + i].extra2 = &port->probe_info[i];
}
port_name_len = strnlen(port->name, PARPORT_NAME_MAX_LEN);
/*
* Allocate a buffer for two paths: dev/parport/PORT and dev/parport/PORT/devices.
* We calculate for the second as that will give us enough for the first.
*/
tmp_path_len = PARPORT_BASE_DEVICES_PATH_SIZE + port_name_len;
tmp_dir_path = kzalloc(tmp_path_len, GFP_KERNEL);
if (!tmp_dir_path) {
err = -ENOMEM;
goto exit_free_t;
}
bytes_written = snprintf(tmp_dir_path, tmp_path_len,
"dev/parport/%s/devices", port->name);
if (tmp_path_len <= bytes_written) {
err = -ENOENT;
goto exit_free_tmp_dir_path;
}
t->devices_header = register_sysctl(tmp_dir_path, t->device_dir);
if (t->devices_header == NULL) {
err = -ENOENT;
goto exit_free_tmp_dir_path;
}
tmp_path_len = PARPORT_BASE_PATH_SIZE + port_name_len;
bytes_written = snprintf(tmp_dir_path, tmp_path_len,
"dev/parport/%s", port->name);
if (tmp_path_len <= bytes_written) {
err = -ENOENT;
goto unregister_devices_h;
}
t->port_header = register_sysctl(tmp_dir_path, t->vars);
if (t->port_header == NULL) {
err = -ENOENT;
goto unregister_devices_h;
}
port->sysctl_table = t;
kfree(tmp_dir_path);
return 0;
unregister_devices_h:
unregister_sysctl_table(t->devices_header);
exit_free_tmp_dir_path:
kfree(tmp_dir_path);
exit_free_t:
kfree(t);
return err;
}
int parport_proc_unregister(struct parport *port)
{
if (port->sysctl_table) {
struct parport_sysctl_table *t = port->sysctl_table;
port->sysctl_table = NULL;
unregister_sysctl_table(t->devices_header);
unregister_sysctl_table(t->port_header);
kfree(t);
}
return 0;
}
int parport_device_proc_register(struct pardevice *device)
{
int bytes_written, err = 0;
struct parport_device_sysctl_table *t;
struct parport * port = device->port;
size_t port_name_len, device_name_len, tmp_dir_path_len;
char *tmp_dir_path;
t = kmemdup(&parport_device_sysctl_template, sizeof(*t), GFP_KERNEL);
if (t == NULL)
return -ENOMEM;
port_name_len = strnlen(port->name, PARPORT_NAME_MAX_LEN);
device_name_len = strnlen(device->name, PATH_MAX);
/* Allocate a buffer for two paths: dev/parport/PORT/devices/DEVICE. */
tmp_dir_path_len = PARPORT_BASE_DEVICES_PATH_SIZE + port_name_len + device_name_len;
tmp_dir_path = kzalloc(tmp_dir_path_len, GFP_KERNEL);
if (!tmp_dir_path) {
err = -ENOMEM;
goto exit_free_t;
}
bytes_written = snprintf(tmp_dir_path, tmp_dir_path_len, "dev/parport/%s/devices/%s",
port->name, device->name);
if (tmp_dir_path_len <= bytes_written) {
err = -ENOENT;
goto exit_free_path;
}
t->vars[0].data = &device->timeslice;
t->sysctl_header = register_sysctl(tmp_dir_path, t->vars);
if (t->sysctl_header == NULL) {
kfree(t);
t = NULL;
}
device->sysctl_table = t;
kfree(tmp_dir_path);
return 0;
exit_free_path:
kfree(tmp_dir_path);
exit_free_t:
kfree(t);
return err;
}
int parport_device_proc_unregister(struct pardevice *device)
{
if (device->sysctl_table) {
struct parport_device_sysctl_table *t = device->sysctl_table;
device->sysctl_table = NULL;
unregister_sysctl_table(t->sysctl_header);
kfree(t);
}
return 0;
}
static int __init parport_default_proc_register(void)
{
int ret;
parport_default_sysctl_table.sysctl_header =
register_sysctl("dev/parport/default", parport_default_sysctl_table.vars);
if (!parport_default_sysctl_table.sysctl_header)
return -ENOMEM;
ret = parport_bus_init();
if (ret) {
unregister_sysctl_table(parport_default_sysctl_table.
sysctl_header);
return ret;
}
return 0;
}
static void __exit parport_default_proc_unregister(void)
{
if (parport_default_sysctl_table.sysctl_header) {
unregister_sysctl_table(parport_default_sysctl_table.
sysctl_header);
parport_default_sysctl_table.sysctl_header = NULL;
}
parport_bus_exit();
}
#else /* no sysctl or no procfs*/
int parport_proc_register(struct parport *pp)
{
return 0;
}
int parport_proc_unregister(struct parport *pp)
{
return 0;
}
int parport_device_proc_register(struct pardevice *device)
{
return 0;
}
int parport_device_proc_unregister(struct pardevice *device)
{
return 0;
}
static int __init parport_default_proc_register (void)
{
return parport_bus_init();
}
static void __exit parport_default_proc_unregister (void)
{
parport_bus_exit();
}
#endif
subsys_initcall(parport_default_proc_register)
module_exit(parport_default_proc_unregister)
| linux-master | drivers/parport/procfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Low-level parallel port routines for built-in port on SGI IP32
*
* Author: Arnaud Giersch <[email protected]>
*
* Based on parport_pc.c by
* Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
* Andrea Arcangeli, et al.
*
* Thanks to Ilya A. Volynets-Evenbakh for his help.
*
* Copyright (C) 2005, 2006 Arnaud Giersch.
*/
/* Current status:
*
* Basic SPP and PS2 modes are supported.
* Support for parallel port IRQ is present.
* Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
* supported.
* SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
* or without interrupt support.
*
* Hardware ECP mode is not fully implemented (ecp_read_data and
* ecp_write_addr are actually missing).
*
* To do:
*
* Fully implement ECP mode.
* EPP and ECP mode need to be tested. I currently do not own any
* peripheral supporting these extended mode, and cannot test them.
* If DMA mode works well, decide if support for PIO FIFO modes should be
* dropped.
* Use the io{read,write} family functions when they become available in
* the linux-mips.org tree. Note: the MIPS specific functions readsb()
* and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
* respectively.
*/
/* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
* IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
* This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
* FIFO buffer and supports DMA transfers.
*
* [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
*
* Theoretically, we could simply use the parport_pc module. It is however
* not so simple. The parport_pc code assumes that the parallel port
* registers are port-mapped. On the O2, they are memory-mapped.
* Furthermore, each register is replicated on 256 consecutive addresses (as
* it is for the built-in serial ports on the same chip).
*/
/*--- Some configuration defines ---------------------------------------*/
/* DEBUG_PARPORT_IP32
* 0 disable debug
* 1 standard level: pr_debug1 is enabled
* 2 parport_ip32_dump_state is enabled
* >=3 verbose level: pr_debug is enabled
*/
#if !defined(DEBUG_PARPORT_IP32)
# define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
#endif
/*----------------------------------------------------------------------*/
/* Setup DEBUG macros. This is done before any includes, just in case we
* activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
*/
#if DEBUG_PARPORT_IP32 == 1
# warning DEBUG_PARPORT_IP32 == 1
#elif DEBUG_PARPORT_IP32 == 2
# warning DEBUG_PARPORT_IP32 == 2
#elif DEBUG_PARPORT_IP32 >= 3
# warning DEBUG_PARPORT_IP32 >= 3
# if !defined(DEBUG)
# define DEBUG /* enable pr_debug() in kernel.h */
# endif
#endif
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/ip32/ip32_ints.h>
#include <asm/ip32/mace.h>
/*--- Global variables -------------------------------------------------*/
/* Verbose probing on by default for debugging. */
#if DEBUG_PARPORT_IP32 >= 1
# define DEFAULT_VERBOSE_PROBING 1
#else
# define DEFAULT_VERBOSE_PROBING 0
#endif
/* Default prefix for printk */
#define PPIP32 "parport_ip32: "
/*
* These are the module parameters:
* @features: bit mask of features to enable/disable
* (all enabled by default)
* @verbose_probing: log chit-chat during initialization
*/
#define PARPORT_IP32_ENABLE_IRQ (1U << 0)
#define PARPORT_IP32_ENABLE_DMA (1U << 1)
#define PARPORT_IP32_ENABLE_SPP (1U << 2)
#define PARPORT_IP32_ENABLE_EPP (1U << 3)
#define PARPORT_IP32_ENABLE_ECP (1U << 4)
static unsigned int features = ~0U;
static bool verbose_probing = DEFAULT_VERBOSE_PROBING;
/* We do not support more than one port. */
static struct parport *this_port;
/* Timing constants for FIFO modes. */
#define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
#define FIFO_POLLING_INTERVAL 50 /* microseconds */
/*--- I/O register definitions -----------------------------------------*/
/**
* struct parport_ip32_regs - virtual addresses of parallel port registers
* @data: Data Register
* @dsr: Device Status Register
* @dcr: Device Control Register
* @eppAddr: EPP Address Register
* @eppData0: EPP Data Register 0
* @eppData1: EPP Data Register 1
* @eppData2: EPP Data Register 2
* @eppData3: EPP Data Register 3
* @ecpAFifo: ECP Address FIFO
* @fifo: General FIFO register. The same address is used for:
* - cFifo, the Parallel Port DATA FIFO
* - ecpDFifo, the ECP Data FIFO
* - tFifo, the ECP Test FIFO
* @cnfgA: Configuration Register A
* @cnfgB: Configuration Register B
* @ecr: Extended Control Register
*/
struct parport_ip32_regs {
void __iomem *data;
void __iomem *dsr;
void __iomem *dcr;
void __iomem *eppAddr;
void __iomem *eppData0;
void __iomem *eppData1;
void __iomem *eppData2;
void __iomem *eppData3;
void __iomem *ecpAFifo;
void __iomem *fifo;
void __iomem *cnfgA;
void __iomem *cnfgB;
void __iomem *ecr;
};
/* Device Status Register */
#define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
#define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
#define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
#define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
#define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
#define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
/* #define DSR_reserved (1U << 1) */
#define DSR_TIMEOUT (1U << 0) /* EPP timeout */
/* Device Control Register */
/* #define DCR_reserved (1U << 7) | (1U << 6) */
#define DCR_DIR (1U << 5) /* direction */
#define DCR_IRQ (1U << 4) /* interrupt on nAck */
#define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
#define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
#define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
#define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
/* ECP Configuration Register A */
#define CNFGA_IRQ (1U << 7)
#define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
#define CNFGA_ID_SHIFT 4
#define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
#define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
#define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
/* #define CNFGA_reserved (1U << 3) */
#define CNFGA_nBYTEINTRANS (1U << 2)
#define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
/* ECP Configuration Register B */
#define CNFGB_COMPRESS (1U << 7)
#define CNFGB_INTRVAL (1U << 6)
#define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
#define CNFGB_IRQ_SHIFT 3
#define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
#define CNFGB_DMA_SHIFT 0
/* Extended Control Register */
#define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
#define ECR_MODE_SHIFT 5
#define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
#define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
#define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
#define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
#define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
/* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
#define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
#define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
#define ECR_nERRINTR (1U << 4)
#define ECR_DMAEN (1U << 3)
#define ECR_SERVINTR (1U << 2)
#define ECR_F_FULL (1U << 1)
#define ECR_F_EMPTY (1U << 0)
/*--- Private data -----------------------------------------------------*/
/**
* enum parport_ip32_irq_mode - operation mode of interrupt handler
* @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
* @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
*/
enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
/**
* struct parport_ip32_private - private stuff for &struct parport
* @regs: register addresses
* @dcr_cache: cached contents of DCR
* @dcr_writable: bit mask of writable DCR bits
* @pword: number of bytes per PWord
* @fifo_depth: number of PWords that FIFO will hold
* @readIntrThreshold: minimum number of PWords we can read
* if we get an interrupt
* @writeIntrThreshold: minimum number of PWords we can write
* if we get an interrupt
* @irq_mode: operation mode of interrupt handler for this port
* @irq_complete: mutex used to wait for an interrupt to occur
*/
struct parport_ip32_private {
struct parport_ip32_regs regs;
unsigned int dcr_cache;
unsigned int dcr_writable;
unsigned int pword;
unsigned int fifo_depth;
unsigned int readIntrThreshold;
unsigned int writeIntrThreshold;
enum parport_ip32_irq_mode irq_mode;
struct completion irq_complete;
};
/*--- Debug code -------------------------------------------------------*/
/*
* pr_debug1 - print debug messages
*
* This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
*/
#if DEBUG_PARPORT_IP32 >= 1
# define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
#else /* DEBUG_PARPORT_IP32 < 1 */
# define pr_debug1(...) do { } while (0)
#endif
/*
* pr_trace, pr_trace1 - trace function calls
* @p: pointer to &struct parport
* @fmt: printk format string
* @...: parameters for format string
*
* Macros used to trace function calls. The given string is formatted after
* function name. pr_trace() uses pr_debug(), and pr_trace1() uses
* pr_debug1(). __pr_trace() is the low-level macro and is not to be used
* directly.
*/
#define __pr_trace(pr, p, fmt, ...) \
pr("%s: %s" fmt "\n", \
({ const struct parport *__p = (p); \
__p ? __p->name : "parport_ip32"; }), \
__func__ , ##__VA_ARGS__)
#define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
#define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
/*
* __pr_probe, pr_probe - print message if @verbose_probing is true
* @p: pointer to &struct parport
* @fmt: printk format string
* @...: parameters for format string
*
* For new lines, use pr_probe(). Use __pr_probe() for continued lines.
*/
#define __pr_probe(...) \
do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
#define pr_probe(p, fmt, ...) \
__pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
/*
* parport_ip32_dump_state - print register status of parport
* @p: pointer to &struct parport
* @str: string to add in message
* @show_ecp_config: shall we dump ECP configuration registers too?
*
* This function is only here for debugging purpose, and should be used with
* care. Reading the parallel port registers may have undesired side effects.
* Especially if @show_ecp_config is true, the parallel port is resetted.
* This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
*/
#if DEBUG_PARPORT_IP32 >= 2
static void parport_ip32_dump_state(struct parport *p, char *str,
unsigned int show_ecp_config)
{
struct parport_ip32_private * const priv = p->physport->private_data;
unsigned int i;
printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
{
static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
"ECP", "EPP", "???",
"TST", "CFG"};
unsigned int ecr = readb(priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
pr_cont(" %s",
ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
if (ecr & ECR_nERRINTR)
pr_cont(",nErrIntrEn");
if (ecr & ECR_DMAEN)
pr_cont(",dmaEn");
if (ecr & ECR_SERVINTR)
pr_cont(",serviceIntr");
if (ecr & ECR_F_FULL)
pr_cont(",f_full");
if (ecr & ECR_F_EMPTY)
pr_cont(",f_empty");
pr_cont("\n");
}
if (show_ecp_config) {
unsigned int oecr, cnfgA, cnfgB;
oecr = readb(priv->regs.ecr);
writeb(ECR_MODE_PS2, priv->regs.ecr);
writeb(ECR_MODE_CFG, priv->regs.ecr);
cnfgA = readb(priv->regs.cnfgA);
cnfgB = readb(priv->regs.cnfgB);
writeb(ECR_MODE_PS2, priv->regs.ecr);
writeb(oecr, priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
pr_cont(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
switch (cnfgA & CNFGA_ID_MASK) {
case CNFGA_ID_8:
pr_cont(",8 bits");
break;
case CNFGA_ID_16:
pr_cont(",16 bits");
break;
case CNFGA_ID_32:
pr_cont(",32 bits");
break;
default:
pr_cont(",unknown ID");
break;
}
if (!(cnfgA & CNFGA_nBYTEINTRANS))
pr_cont(",ByteInTrans");
if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
pr_cont(",%d byte%s left",
cnfgA & CNFGA_PWORDLEFT,
((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
pr_cont("\n");
printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
pr_cont(" irq=%u,dma=%u",
(cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
(cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
pr_cont(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
if (cnfgB & CNFGB_COMPRESS)
pr_cont(",compress");
pr_cont("\n");
}
for (i = 0; i < 2; i++) {
unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
i ? "soft" : "hard", dcr);
pr_cont(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
if (dcr & DCR_IRQ)
pr_cont(",ackIntEn");
if (!(dcr & DCR_SELECT))
pr_cont(",nSelectIn");
if (dcr & DCR_nINIT)
pr_cont(",nInit");
if (!(dcr & DCR_AUTOFD))
pr_cont(",nAutoFD");
if (!(dcr & DCR_STROBE))
pr_cont(",nStrobe");
pr_cont("\n");
}
#define sep (f++ ? ',' : ' ')
{
unsigned int f = 0;
unsigned int dsr = readb(priv->regs.dsr);
printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
if (!(dsr & DSR_nBUSY))
pr_cont("%cBusy", sep);
if (dsr & DSR_nACK)
pr_cont("%cnAck", sep);
if (dsr & DSR_PERROR)
pr_cont("%cPError", sep);
if (dsr & DSR_SELECT)
pr_cont("%cSelect", sep);
if (dsr & DSR_nFAULT)
pr_cont("%cnFault", sep);
if (!(dsr & DSR_nPRINT))
pr_cont("%c(Print)", sep);
if (dsr & DSR_TIMEOUT)
pr_cont("%cTimeout", sep);
pr_cont("\n");
}
#undef sep
}
#else /* DEBUG_PARPORT_IP32 < 2 */
#define parport_ip32_dump_state(...) do { } while (0)
#endif
/*
* CHECK_EXTRA_BITS - track and log extra bits
* @p: pointer to &struct parport
* @b: byte to inspect
* @m: bit mask of authorized bits
*
* This is used to track and log extra bits that should not be there in
* parport_ip32_write_control() and parport_ip32_frob_control(). It is only
* defined if %DEBUG_PARPORT_IP32 >= 1.
*/
#if DEBUG_PARPORT_IP32 >= 1
#define CHECK_EXTRA_BITS(p, b, m) \
do { \
unsigned int __b = (b), __m = (m); \
if (__b & ~__m) \
pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
"0x%02x/0x%02x\n", \
(p)->name, __func__, #b, __b, __m); \
} while (0)
#else /* DEBUG_PARPORT_IP32 < 1 */
#define CHECK_EXTRA_BITS(...) do { } while (0)
#endif
/*--- IP32 parallel port DMA operations --------------------------------*/
/**
* struct parport_ip32_dma_data - private data needed for DMA operation
* @dir: DMA direction (from or to device)
* @buf: buffer physical address
* @len: buffer length
* @next: address of next bytes to DMA transfer
* @left: number of bytes remaining
* @ctx: next context to write (0: context_a; 1: context_b)
* @irq_on: are the DMA IRQs currently enabled?
* @lock: spinlock to protect access to the structure
*/
struct parport_ip32_dma_data {
enum dma_data_direction dir;
dma_addr_t buf;
dma_addr_t next;
size_t len;
size_t left;
unsigned int ctx;
unsigned int irq_on;
spinlock_t lock;
};
static struct parport_ip32_dma_data parport_ip32_dma;
/**
* parport_ip32_dma_setup_context - setup next DMA context
* @limit: maximum data size for the context
*
* The alignment constraints must be verified in caller function, and the
* parameter @limit must be set accordingly.
*/
static void parport_ip32_dma_setup_context(unsigned int limit)
{
unsigned long flags;
spin_lock_irqsave(&parport_ip32_dma.lock, flags);
if (parport_ip32_dma.left > 0) {
/* Note: ctxreg is "volatile" here only because
* mace->perif.ctrl.parport.context_a and context_b are
* "volatile". */
volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
&mace->perif.ctrl.parport.context_a :
&mace->perif.ctrl.parport.context_b;
u64 count;
u64 ctxval;
if (parport_ip32_dma.left <= limit) {
count = parport_ip32_dma.left;
ctxval = MACEPAR_CONTEXT_LASTFLAG;
} else {
count = limit;
ctxval = 0;
}
pr_trace(NULL,
"(%u): 0x%04x:0x%04x, %u -> %u%s",
limit,
(unsigned int)parport_ip32_dma.buf,
(unsigned int)parport_ip32_dma.next,
(unsigned int)count,
parport_ip32_dma.ctx, ctxval ? "*" : "");
ctxval |= parport_ip32_dma.next &
MACEPAR_CONTEXT_BASEADDR_MASK;
ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
MACEPAR_CONTEXT_DATALEN_MASK;
writeq(ctxval, ctxreg);
parport_ip32_dma.next += count;
parport_ip32_dma.left -= count;
parport_ip32_dma.ctx ^= 1U;
}
/* If there is nothing more to send, disable IRQs to avoid to
* face an IRQ storm which can lock the machine. Disable them
* only once. */
if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
pr_debug(PPIP32 "IRQ off (ctx)\n");
disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
parport_ip32_dma.irq_on = 0;
}
spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
}
/**
* parport_ip32_dma_interrupt - DMA interrupt handler
* @irq: interrupt number
* @dev_id: unused
*/
static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id)
{
if (parport_ip32_dma.left)
pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
return IRQ_HANDLED;
}
#if DEBUG_PARPORT_IP32
static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
{
pr_trace1(NULL, "(%d)", irq);
return IRQ_HANDLED;
}
#endif
/**
* parport_ip32_dma_start - begins a DMA transfer
* @p: partport to work on
* @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
* @addr: pointer to data buffer
* @count: buffer size
*
* Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
* correctly balanced.
*/
static int parport_ip32_dma_start(struct parport *p,
enum dma_data_direction dir, void *addr, size_t count)
{
unsigned int limit;
u64 ctrl;
pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
/* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
* be 64 bytes aligned. */
BUG_ON(dir != DMA_TO_DEVICE);
/* Reset DMA controller */
ctrl = MACEPAR_CTLSTAT_RESET;
writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
/* DMA IRQs should normally be enabled */
if (!parport_ip32_dma.irq_on) {
WARN_ON(1);
enable_irq(MACEISA_PAR_CTXA_IRQ);
enable_irq(MACEISA_PAR_CTXB_IRQ);
parport_ip32_dma.irq_on = 1;
}
/* Prepare DMA pointers */
parport_ip32_dma.dir = dir;
parport_ip32_dma.buf = dma_map_single(&p->bus_dev, addr, count, dir);
parport_ip32_dma.len = count;
parport_ip32_dma.next = parport_ip32_dma.buf;
parport_ip32_dma.left = parport_ip32_dma.len;
parport_ip32_dma.ctx = 0;
/* Setup DMA direction and first two contexts */
ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
/* Single transfer should not cross a 4K page boundary */
limit = MACEPAR_CONTEXT_DATA_BOUND -
(parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
parport_ip32_dma_setup_context(limit);
parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
/* Real start of DMA transfer */
ctrl |= MACEPAR_CTLSTAT_ENABLE;
writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
return 0;
}
/**
* parport_ip32_dma_stop - ends a running DMA transfer
* @p: partport to work on
*
* Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
* correctly balanced.
*/
static void parport_ip32_dma_stop(struct parport *p)
{
u64 ctx_a;
u64 ctx_b;
u64 ctrl;
u64 diag;
size_t res[2]; /* {[0] = res_a, [1] = res_b} */
pr_trace(NULL, "()");
/* Disable IRQs */
spin_lock_irq(&parport_ip32_dma.lock);
if (parport_ip32_dma.irq_on) {
pr_debug(PPIP32 "IRQ off (stop)\n");
disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
parport_ip32_dma.irq_on = 0;
}
spin_unlock_irq(&parport_ip32_dma.lock);
/* Force IRQ synchronization, even if the IRQs were disabled
* elsewhere. */
synchronize_irq(MACEISA_PAR_CTXA_IRQ);
synchronize_irq(MACEISA_PAR_CTXB_IRQ);
/* Stop DMA transfer */
ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
/* Adjust residue (parport_ip32_dma.left) */
ctx_a = readq(&mace->perif.ctrl.parport.context_a);
ctx_b = readq(&mace->perif.ctrl.parport.context_b);
ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
diag = readq(&mace->perif.ctrl.parport.diagnostic);
res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
MACEPAR_CONTEXT_DATALEN_SHIFT) :
0;
res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
MACEPAR_CONTEXT_DATALEN_SHIFT) :
0;
if (diag & MACEPAR_DIAG_DMACTIVE)
res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
MACEPAR_DIAG_CTRSHIFT);
parport_ip32_dma.left += res[0] + res[1];
/* Reset DMA controller, and re-enable IRQs */
ctrl = MACEPAR_CTLSTAT_RESET;
writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
pr_debug(PPIP32 "IRQ on (stop)\n");
enable_irq(MACEISA_PAR_CTXA_IRQ);
enable_irq(MACEISA_PAR_CTXB_IRQ);
parport_ip32_dma.irq_on = 1;
dma_unmap_single(&p->bus_dev, parport_ip32_dma.buf,
parport_ip32_dma.len, parport_ip32_dma.dir);
}
/**
* parport_ip32_dma_get_residue - get residue from last DMA transfer
*
* Returns the number of bytes remaining from last DMA transfer.
*/
static inline size_t parport_ip32_dma_get_residue(void)
{
return parport_ip32_dma.left;
}
/**
* parport_ip32_dma_register - initialize DMA engine
*
* Returns zero for success.
*/
static int parport_ip32_dma_register(void)
{
int err;
spin_lock_init(&parport_ip32_dma.lock);
parport_ip32_dma.irq_on = 1;
/* Reset DMA controller */
writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
/* Request IRQs */
err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
0, "parport_ip32", NULL);
if (err)
goto fail_a;
err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
0, "parport_ip32", NULL);
if (err)
goto fail_b;
#if DEBUG_PARPORT_IP32
/* FIXME - what is this IRQ for? */
err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
0, "parport_ip32", NULL);
if (err)
goto fail_merr;
#endif
return 0;
#if DEBUG_PARPORT_IP32
fail_merr:
free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
#endif
fail_b:
free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
fail_a:
return err;
}
/**
* parport_ip32_dma_unregister - release and free resources for DMA engine
*/
static void parport_ip32_dma_unregister(void)
{
#if DEBUG_PARPORT_IP32
free_irq(MACEISA_PAR_MERR_IRQ, NULL);
#endif
free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
}
/*--- Interrupt handlers and associates --------------------------------*/
/**
* parport_ip32_wakeup - wakes up code waiting for an interrupt
* @p: pointer to &struct parport
*/
static inline void parport_ip32_wakeup(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
complete(&priv->irq_complete);
}
/**
* parport_ip32_interrupt - interrupt handler
* @irq: interrupt number
* @dev_id: pointer to &struct parport
*
* Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
* %PARPORT_IP32_IRQ_FWD.
*/
static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id)
{
struct parport * const p = dev_id;
struct parport_ip32_private * const priv = p->physport->private_data;
enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
switch (irq_mode) {
case PARPORT_IP32_IRQ_FWD:
return parport_irq_handler(irq, dev_id);
case PARPORT_IP32_IRQ_HERE:
parport_ip32_wakeup(p);
break;
}
return IRQ_HANDLED;
}
/*--- Some utility function to manipulate ECR register -----------------*/
/**
* parport_ip32_read_econtrol - read contents of the ECR register
* @p: pointer to &struct parport
*/
static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return readb(priv->regs.ecr);
}
/**
* parport_ip32_write_econtrol - write new contents to the ECR register
* @p: pointer to &struct parport
* @c: new value to write
*/
static inline void parport_ip32_write_econtrol(struct parport *p,
unsigned int c)
{
struct parport_ip32_private * const priv = p->physport->private_data;
writeb(c, priv->regs.ecr);
}
/**
* parport_ip32_frob_econtrol - change bits from the ECR register
* @p: pointer to &struct parport
* @mask: bit mask of bits to change
* @val: new value for changed bits
*
* Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
* in @val, and write the result to the ECR.
*/
static inline void parport_ip32_frob_econtrol(struct parport *p,
unsigned int mask,
unsigned int val)
{
unsigned int c;
c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
parport_ip32_write_econtrol(p, c);
}
/**
* parport_ip32_set_mode - change mode of ECP port
* @p: pointer to &struct parport
* @mode: new mode to write in ECR
*
* ECR is reset in a sane state (interrupts and DMA disabled), and placed in
* mode @mode. Go through PS2 mode if needed.
*/
static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
{
unsigned int omode;
mode &= ECR_MODE_MASK;
omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
|| omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
/* We have to go through PS2 mode */
unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
parport_ip32_write_econtrol(p, ecr);
}
parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
}
/*--- Basic functions needed for parport -------------------------------*/
/**
* parport_ip32_read_data - return current contents of the DATA register
* @p: pointer to &struct parport
*/
static inline unsigned char parport_ip32_read_data(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return readb(priv->regs.data);
}
/**
* parport_ip32_write_data - set new contents for the DATA register
* @p: pointer to &struct parport
* @d: new value to write
*/
static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
{
struct parport_ip32_private * const priv = p->physport->private_data;
writeb(d, priv->regs.data);
}
/**
* parport_ip32_read_status - return current contents of the DSR register
* @p: pointer to &struct parport
*/
static inline unsigned char parport_ip32_read_status(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return readb(priv->regs.dsr);
}
/**
* __parport_ip32_read_control - return cached contents of the DCR register
* @p: pointer to &struct parport
*/
static inline unsigned int __parport_ip32_read_control(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return priv->dcr_cache; /* use soft copy */
}
/**
* __parport_ip32_write_control - set new contents for the DCR register
* @p: pointer to &struct parport
* @c: new value to write
*/
static inline void __parport_ip32_write_control(struct parport *p,
unsigned int c)
{
struct parport_ip32_private * const priv = p->physport->private_data;
CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
c &= priv->dcr_writable; /* only writable bits */
writeb(c, priv->regs.dcr);
priv->dcr_cache = c; /* update soft copy */
}
/**
* __parport_ip32_frob_control - change bits from the DCR register
* @p: pointer to &struct parport
* @mask: bit mask of bits to change
* @val: new value for changed bits
*
* This is equivalent to read from the DCR, mask out the bits in @mask,
* exclusive-or with the bits in @val, and write the result to the DCR.
* Actually, the cached contents of the DCR is used.
*/
static inline void __parport_ip32_frob_control(struct parport *p,
unsigned int mask,
unsigned int val)
{
unsigned int c;
c = (__parport_ip32_read_control(p) & ~mask) ^ val;
__parport_ip32_write_control(p, c);
}
/**
* parport_ip32_read_control - return cached contents of the DCR register
* @p: pointer to &struct parport
*
* The return value is masked so as to only return the value of %DCR_STROBE,
* %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
*/
static inline unsigned char parport_ip32_read_control(struct parport *p)
{
const unsigned int rm =
DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
return __parport_ip32_read_control(p) & rm;
}
/**
* parport_ip32_write_control - set new contents for the DCR register
* @p: pointer to &struct parport
* @c: new value to write
*
* The value is masked so as to only change the value of %DCR_STROBE,
* %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
*/
static inline void parport_ip32_write_control(struct parport *p,
unsigned char c)
{
const unsigned int wm =
DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
CHECK_EXTRA_BITS(p, c, wm);
__parport_ip32_frob_control(p, wm, c & wm);
}
/**
* parport_ip32_frob_control - change bits from the DCR register
* @p: pointer to &struct parport
* @mask: bit mask of bits to change
* @val: new value for changed bits
*
* This differs from __parport_ip32_frob_control() in that it only allows to
* change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
*/
static inline unsigned char parport_ip32_frob_control(struct parport *p,
unsigned char mask,
unsigned char val)
{
const unsigned int wm =
DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
CHECK_EXTRA_BITS(p, mask, wm);
CHECK_EXTRA_BITS(p, val, wm);
__parport_ip32_frob_control(p, mask & wm, val & wm);
return parport_ip32_read_control(p);
}
/**
* parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
* @p: pointer to &struct parport
*/
static inline void parport_ip32_disable_irq(struct parport *p)
{
__parport_ip32_frob_control(p, DCR_IRQ, 0);
}
/**
* parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
* @p: pointer to &struct parport
*/
static inline void parport_ip32_enable_irq(struct parport *p)
{
__parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
}
/**
* parport_ip32_data_forward - enable host-to-peripheral communications
* @p: pointer to &struct parport
*
* Enable the data line drivers, for 8-bit host-to-peripheral communications.
*/
static inline void parport_ip32_data_forward(struct parport *p)
{
__parport_ip32_frob_control(p, DCR_DIR, 0);
}
/**
* parport_ip32_data_reverse - enable peripheral-to-host communications
* @p: pointer to &struct parport
*
* Place the data bus in a high impedance state, if @p->modes has the
* PARPORT_MODE_TRISTATE bit set.
*/
static inline void parport_ip32_data_reverse(struct parport *p)
{
__parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
}
/**
* parport_ip32_init_state - for core parport code
* @dev: pointer to &struct pardevice
* @s: pointer to &struct parport_state to initialize
*/
static void parport_ip32_init_state(struct pardevice *dev,
struct parport_state *s)
{
s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
}
/**
* parport_ip32_save_state - for core parport code
* @p: pointer to &struct parport
* @s: pointer to &struct parport_state to save state to
*/
static void parport_ip32_save_state(struct parport *p,
struct parport_state *s)
{
s->u.ip32.dcr = __parport_ip32_read_control(p);
s->u.ip32.ecr = parport_ip32_read_econtrol(p);
}
/**
* parport_ip32_restore_state - for core parport code
* @p: pointer to &struct parport
* @s: pointer to &struct parport_state to restore state from
*/
static void parport_ip32_restore_state(struct parport *p,
struct parport_state *s)
{
parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
parport_ip32_write_econtrol(p, s->u.ip32.ecr);
__parport_ip32_write_control(p, s->u.ip32.dcr);
}
/*--- EPP mode functions -----------------------------------------------*/
/**
* parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
* @p: pointer to &struct parport
*
* Returns 1 if the Timeout bit is clear, and 0 otherwise.
*/
static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
unsigned int cleared;
if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
cleared = 1;
else {
unsigned int r;
/* To clear timeout some chips require double read */
parport_ip32_read_status(p);
r = parport_ip32_read_status(p);
/* Some reset by writing 1 */
writeb(r | DSR_TIMEOUT, priv->regs.dsr);
/* Others by writing 0 */
writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
r = parport_ip32_read_status(p);
cleared = !(r & DSR_TIMEOUT);
}
pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
return cleared;
}
/**
* parport_ip32_epp_read - generic EPP read function
* @eppreg: I/O register to read from
* @p: pointer to &struct parport
* @buf: buffer to store read data
* @len: length of buffer @buf
* @flags: may be PARPORT_EPP_FAST
*/
static size_t parport_ip32_epp_read(void __iomem *eppreg,
struct parport *p, void *buf,
size_t len, int flags)
{
struct parport_ip32_private * const priv = p->physport->private_data;
size_t got;
parport_ip32_set_mode(p, ECR_MODE_EPP);
parport_ip32_data_reverse(p);
parport_ip32_write_control(p, DCR_nINIT);
if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
readsb(eppreg, buf, len);
if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
parport_ip32_clear_epp_timeout(p);
return -EIO;
}
got = len;
} else {
u8 *bufp = buf;
for (got = 0; got < len; got++) {
*bufp++ = readb(eppreg);
if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
parport_ip32_clear_epp_timeout(p);
break;
}
}
}
parport_ip32_data_forward(p);
parport_ip32_set_mode(p, ECR_MODE_PS2);
return got;
}
/**
* parport_ip32_epp_write - generic EPP write function
* @eppreg: I/O register to write to
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
* @flags: may be PARPORT_EPP_FAST
*/
static size_t parport_ip32_epp_write(void __iomem *eppreg,
struct parport *p, const void *buf,
size_t len, int flags)
{
struct parport_ip32_private * const priv = p->physport->private_data;
size_t written;
parport_ip32_set_mode(p, ECR_MODE_EPP);
parport_ip32_data_forward(p);
parport_ip32_write_control(p, DCR_nINIT);
if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
writesb(eppreg, buf, len);
if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
parport_ip32_clear_epp_timeout(p);
return -EIO;
}
written = len;
} else {
const u8 *bufp = buf;
for (written = 0; written < len; written++) {
writeb(*bufp++, eppreg);
if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
parport_ip32_clear_epp_timeout(p);
break;
}
}
}
parport_ip32_set_mode(p, ECR_MODE_PS2);
return written;
}
/**
* parport_ip32_epp_read_data - read a block of data in EPP mode
* @p: pointer to &struct parport
* @buf: buffer to store read data
* @len: length of buffer @buf
* @flags: may be PARPORT_EPP_FAST
*/
static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
size_t len, int flags)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
}
/**
* parport_ip32_epp_write_data - write a block of data in EPP mode
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
* @flags: may be PARPORT_EPP_FAST
*/
static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
size_t len, int flags)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
}
/**
* parport_ip32_epp_read_addr - read a block of addresses in EPP mode
* @p: pointer to &struct parport
* @buf: buffer to store read data
* @len: length of buffer @buf
* @flags: may be PARPORT_EPP_FAST
*/
static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
size_t len, int flags)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
}
/**
* parport_ip32_epp_write_addr - write a block of addresses in EPP mode
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
* @flags: may be PARPORT_EPP_FAST
*/
static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
size_t len, int flags)
{
struct parport_ip32_private * const priv = p->physport->private_data;
return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
}
/*--- ECP mode functions (FIFO) ----------------------------------------*/
/**
* parport_ip32_fifo_wait_break - check if the waiting function should return
* @p: pointer to &struct parport
* @expire: timeout expiring date, in jiffies
*
* parport_ip32_fifo_wait_break() checks if the waiting function should return
* immediately or not. The break conditions are:
* - expired timeout;
* - a pending signal;
* - nFault asserted low.
* This function also calls cond_resched().
*/
static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
unsigned long expire)
{
cond_resched();
if (time_after(jiffies, expire)) {
pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
return 1;
}
if (signal_pending(current)) {
pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
return 1;
}
if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
return 1;
}
return 0;
}
/**
* parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
* @p: pointer to &struct parport
*
* Returns the number of bytes that can safely be written in the FIFO. A
* return value of zero means that the calling function should terminate as
* fast as possible.
*/
static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
struct parport * const physport = p->physport;
unsigned long expire;
unsigned int count;
unsigned int ecr;
expire = jiffies + physport->cad->timeout;
count = 0;
while (1) {
if (parport_ip32_fifo_wait_break(p, expire))
break;
/* Check FIFO state. We do nothing when the FIFO is nor full,
* nor empty. It appears that the FIFO full bit is not always
* reliable, the FIFO state is sometimes wrongly reported, and
* the chip gets confused if we give it another byte. */
ecr = parport_ip32_read_econtrol(p);
if (ecr & ECR_F_EMPTY) {
/* FIFO is empty, fill it up */
count = priv->fifo_depth;
break;
}
/* Wait a moment... */
udelay(FIFO_POLLING_INTERVAL);
} /* while (1) */
return count;
}
/**
* parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
* @p: pointer to &struct parport
*
* Returns the number of bytes that can safely be written in the FIFO. A
* return value of zero means that the calling function should terminate as
* fast as possible.
*/
static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
{
static unsigned int lost_interrupt = 0;
struct parport_ip32_private * const priv = p->physport->private_data;
struct parport * const physport = p->physport;
unsigned long nfault_timeout;
unsigned long expire;
unsigned int count;
unsigned int ecr;
nfault_timeout = min((unsigned long)physport->cad->timeout,
msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
expire = jiffies + physport->cad->timeout;
count = 0;
while (1) {
if (parport_ip32_fifo_wait_break(p, expire))
break;
/* Initialize mutex used to take interrupts into account */
reinit_completion(&priv->irq_complete);
/* Enable serviceIntr */
parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
/* Enabling serviceIntr while the FIFO is empty does not
* always generate an interrupt, so check for emptiness
* now. */
ecr = parport_ip32_read_econtrol(p);
if (!(ecr & ECR_F_EMPTY)) {
/* FIFO is not empty: wait for an interrupt or a
* timeout to occur */
wait_for_completion_interruptible_timeout(
&priv->irq_complete, nfault_timeout);
ecr = parport_ip32_read_econtrol(p);
if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
&& !lost_interrupt) {
pr_warn(PPIP32 "%s: lost interrupt in %s\n",
p->name, __func__);
lost_interrupt = 1;
}
}
/* Disable serviceIntr */
parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
/* Check FIFO state */
if (ecr & ECR_F_EMPTY) {
/* FIFO is empty, fill it up */
count = priv->fifo_depth;
break;
} else if (ecr & ECR_SERVINTR) {
/* FIFO is not empty, but we know that can safely push
* writeIntrThreshold bytes into it */
count = priv->writeIntrThreshold;
break;
}
/* FIFO is not empty, and we did not get any interrupt.
* Either it's time to check for nFault, or a signal is
* pending. This is verified in
* parport_ip32_fifo_wait_break(), so we continue the loop. */
} /* while (1) */
return count;
}
/**
* parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
*
* Uses PIO to write the contents of the buffer @buf into the parallel port
* FIFO. Returns the number of bytes that were actually written. It can work
* with or without the help of interrupts. The parallel port must be
* correctly initialized before calling parport_ip32_fifo_write_block_pio().
*/
static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
const void *buf, size_t len)
{
struct parport_ip32_private * const priv = p->physport->private_data;
const u8 *bufp = buf;
size_t left = len;
priv->irq_mode = PARPORT_IP32_IRQ_HERE;
while (left > 0) {
unsigned int count;
count = (p->irq == PARPORT_IRQ_NONE) ?
parport_ip32_fwp_wait_polling(p) :
parport_ip32_fwp_wait_interrupt(p);
if (count == 0)
break; /* Transmission should be stopped */
if (count > left)
count = left;
if (count == 1) {
writeb(*bufp, priv->regs.fifo);
bufp++, left--;
} else {
writesb(priv->regs.fifo, bufp, count);
bufp += count, left -= count;
}
}
priv->irq_mode = PARPORT_IP32_IRQ_FWD;
return len - left;
}
/**
* parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
*
* Uses DMA to write the contents of the buffer @buf into the parallel port
* FIFO. Returns the number of bytes that were actually written. The
* parallel port must be correctly initialized before calling
* parport_ip32_fifo_write_block_dma().
*/
static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
const void *buf, size_t len)
{
struct parport_ip32_private * const priv = p->physport->private_data;
struct parport * const physport = p->physport;
unsigned long nfault_timeout;
unsigned long expire;
size_t written;
unsigned int ecr;
priv->irq_mode = PARPORT_IP32_IRQ_HERE;
parport_ip32_dma_start(p, DMA_TO_DEVICE, (void *)buf, len);
reinit_completion(&priv->irq_complete);
parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
nfault_timeout = min((unsigned long)physport->cad->timeout,
msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
expire = jiffies + physport->cad->timeout;
while (1) {
if (parport_ip32_fifo_wait_break(p, expire))
break;
wait_for_completion_interruptible_timeout(&priv->irq_complete,
nfault_timeout);
ecr = parport_ip32_read_econtrol(p);
if (ecr & ECR_SERVINTR)
break; /* DMA transfer just finished */
}
parport_ip32_dma_stop(p);
written = len - parport_ip32_dma_get_residue();
priv->irq_mode = PARPORT_IP32_IRQ_FWD;
return written;
}
/**
* parport_ip32_fifo_write_block - write a block of data
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
*
* Uses PIO or DMA to write the contents of the buffer @buf into the parallel
* p FIFO. Returns the number of bytes that were actually written.
*/
static size_t parport_ip32_fifo_write_block(struct parport *p,
const void *buf, size_t len)
{
size_t written = 0;
if (len)
/* FIXME - Maybe some threshold value should be set for @len
* under which we revert to PIO mode? */
written = (p->modes & PARPORT_MODE_DMA) ?
parport_ip32_fifo_write_block_dma(p, buf, len) :
parport_ip32_fifo_write_block_pio(p, buf, len);
return written;
}
/**
* parport_ip32_drain_fifo - wait for FIFO to empty
* @p: pointer to &struct parport
* @timeout: timeout, in jiffies
*
* This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
* 0 if the timeout @timeout is reached before, or if a signal is pending.
*/
static unsigned int parport_ip32_drain_fifo(struct parport *p,
unsigned long timeout)
{
unsigned long expire = jiffies + timeout;
unsigned int polling_interval;
unsigned int counter;
/* Busy wait for approx. 200us */
for (counter = 0; counter < 40; counter++) {
if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
break;
if (time_after(jiffies, expire))
break;
if (signal_pending(current))
break;
udelay(5);
}
/* Poll slowly. Polling interval starts with 1 millisecond, and is
* increased exponentially until 128. */
polling_interval = 1; /* msecs */
while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
if (time_after_eq(jiffies, expire))
break;
msleep_interruptible(polling_interval);
if (signal_pending(current))
break;
if (polling_interval < 128)
polling_interval *= 2;
}
return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
}
/**
* parport_ip32_get_fifo_residue - reset FIFO
* @p: pointer to &struct parport
* @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
*
* This function resets FIFO, and returns the number of bytes remaining in it.
*/
static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
unsigned int mode)
{
struct parport_ip32_private * const priv = p->physport->private_data;
unsigned int residue;
unsigned int cnfga;
/* FIXME - We are missing one byte if the printer is off-line. I
* don't know how to detect this. It looks that the full bit is not
* always reliable. For the moment, the problem is avoided in most
* cases by testing for BUSY in parport_ip32_compat_write_data().
*/
if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
residue = 0;
else {
pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
/* Stop all transfers.
*
* Microsoft's document instructs to drive DCR_STROBE to 0,
* but it doesn't work (at least in Compatibility mode, not
* tested in ECP mode). Switching directly to Test mode (as
* in parport_pc) is not an option: it does confuse the port,
* ECP service interrupts are no more working after that. A
* hard reset is then needed to revert to a sane state.
*
* Let's hope that the FIFO is really stuck and that the
* peripheral doesn't wake up now.
*/
parport_ip32_frob_control(p, DCR_STROBE, 0);
/* Fill up FIFO */
for (residue = priv->fifo_depth; residue > 0; residue--) {
if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
break;
writeb(0x00, priv->regs.fifo);
}
}
if (residue)
pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
p->name, residue,
(residue == 1) ? " was" : "s were");
/* Now reset the FIFO */
parport_ip32_set_mode(p, ECR_MODE_PS2);
/* Host recovery for ECP mode */
if (mode == ECR_MODE_ECP) {
parport_ip32_data_reverse(p);
parport_ip32_frob_control(p, DCR_nINIT, 0);
if (parport_wait_peripheral(p, DSR_PERROR, 0))
pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
p->name, __func__);
parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
p->name, __func__);
}
/* Adjust residue if needed */
parport_ip32_set_mode(p, ECR_MODE_CFG);
cnfga = readb(priv->regs.cnfgA);
if (!(cnfga & CNFGA_nBYTEINTRANS)) {
pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
p->name, cnfga);
pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
p->name);
residue++;
}
/* Don't care about partial PWords since we do not support
* PWord != 1 byte. */
/* Back to forward PS2 mode. */
parport_ip32_set_mode(p, ECR_MODE_PS2);
parport_ip32_data_forward(p);
return residue;
}
/**
* parport_ip32_compat_write_data - write a block of data in SPP mode
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
* @flags: ignored
*/
static size_t parport_ip32_compat_write_data(struct parport *p,
const void *buf, size_t len,
int flags)
{
static unsigned int ready_before = 1;
struct parport_ip32_private * const priv = p->physport->private_data;
struct parport * const physport = p->physport;
size_t written = 0;
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
return parport_ieee1284_write_compat(p, buf, len, flags);
/* Reset FIFO, go in forward mode, and disable ackIntEn */
parport_ip32_set_mode(p, ECR_MODE_PS2);
parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
parport_ip32_data_forward(p);
parport_ip32_disable_irq(p);
parport_ip32_set_mode(p, ECR_MODE_PPF);
physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Wait for peripheral to become ready */
if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
pr_info(PPIP32 "%s: not ready in %s\n",
p->name, __func__);
ready_before = 0;
goto stop;
}
ready_before = 1;
written = parport_ip32_fifo_write_block(p, buf, len);
/* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
/* Check for a potential residue */
written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
/* Then, wait for BUSY to get low. */
if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
p->name, __func__);
stop:
/* Reset FIFO */
parport_ip32_set_mode(p, ECR_MODE_PS2);
physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return written;
}
/*
* FIXME - Insert here parport_ip32_ecp_read_data().
*/
/**
* parport_ip32_ecp_write_data - write a block of data in ECP mode
* @p: pointer to &struct parport
* @buf: buffer of data to write
* @len: length of buffer @buf
* @flags: ignored
*/
static size_t parport_ip32_ecp_write_data(struct parport *p,
const void *buf, size_t len,
int flags)
{
static unsigned int ready_before = 1;
struct parport_ip32_private * const priv = p->physport->private_data;
struct parport * const physport = p->physport;
size_t written = 0;
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
return parport_ieee1284_ecp_write_data(p, buf, len, flags);
/* Negotiate to forward mode if necessary. */
if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high. */
parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
DCR_nINIT | DCR_AUTOFD);
/* Event 49: PError goes high. */
if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s\n",
p->name, __func__);
physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
return 0;
}
}
/* Reset FIFO, go in forward mode, and disable ackIntEn */
parport_ip32_set_mode(p, ECR_MODE_PS2);
parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
parport_ip32_data_forward(p);
parport_ip32_disable_irq(p);
parport_ip32_set_mode(p, ECR_MODE_ECP);
physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Wait for peripheral to become ready */
if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
pr_info(PPIP32 "%s: not ready in %s\n",
p->name, __func__);
ready_before = 0;
goto stop;
}
ready_before = 1;
written = parport_ip32_fifo_write_block(p, buf, len);
/* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
/* Check for a potential residue */
written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
/* Then, wait for BUSY to get low. */
if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
p->name, __func__);
stop:
/* Reset FIFO */
parport_ip32_set_mode(p, ECR_MODE_PS2);
physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return written;
}
/*
* FIXME - Insert here parport_ip32_ecp_write_addr().
*/
/*--- Default parport operations ---------------------------------------*/
static const struct parport_operations parport_ip32_ops __initconst = {
.write_data = parport_ip32_write_data,
.read_data = parport_ip32_read_data,
.write_control = parport_ip32_write_control,
.read_control = parport_ip32_read_control,
.frob_control = parport_ip32_frob_control,
.read_status = parport_ip32_read_status,
.enable_irq = parport_ip32_enable_irq,
.disable_irq = parport_ip32_disable_irq,
.data_forward = parport_ip32_data_forward,
.data_reverse = parport_ip32_data_reverse,
.init_state = parport_ip32_init_state,
.save_state = parport_ip32_save_state,
.restore_state = parport_ip32_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
/*--- Device detection -------------------------------------------------*/
/**
* parport_ip32_ecp_supported - check for an ECP port
* @p: pointer to the &parport structure
*
* Returns 1 if an ECP port is found, and 0 otherwise. This function actually
* checks if an Extended Control Register seems to be present. On successful
* return, the port is placed in SPP mode.
*/
static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
unsigned int ecr;
ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
writeb(ecr, priv->regs.ecr);
if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
goto fail;
pr_probe(p, "Found working ECR register\n");
parport_ip32_set_mode(p, ECR_MODE_SPP);
parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
return 1;
fail:
pr_probe(p, "ECR register not found\n");
return 0;
}
/**
* parport_ip32_fifo_supported - check for FIFO parameters
* @p: pointer to the &parport structure
*
* Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
* success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
* On return, the port is placed in SPP mode.
*/
static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
unsigned int configa, configb;
unsigned int pword;
unsigned int i;
/* Configuration mode */
parport_ip32_set_mode(p, ECR_MODE_CFG);
configa = readb(priv->regs.cnfgA);
configb = readb(priv->regs.cnfgB);
/* Find out PWord size */
switch (configa & CNFGA_ID_MASK) {
case CNFGA_ID_8:
pword = 1;
break;
case CNFGA_ID_16:
pword = 2;
break;
case CNFGA_ID_32:
pword = 4;
break;
default:
pr_probe(p, "Unknown implementation ID: 0x%0x\n",
(configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
goto fail;
break;
}
if (pword != 1) {
pr_probe(p, "Unsupported PWord size: %u\n", pword);
goto fail;
}
priv->pword = pword;
pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
/* Check for compression support */
writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
pr_probe(p, "Hardware compression detected (unsupported)\n");
writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
/* Reset FIFO and go in test mode (no interrupt, no DMA) */
parport_ip32_set_mode(p, ECR_MODE_TST);
/* FIFO must be empty now */
if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
pr_probe(p, "FIFO not reset\n");
goto fail;
}
/* Find out FIFO depth. */
priv->fifo_depth = 0;
for (i = 0; i < 1024; i++) {
if (readb(priv->regs.ecr) & ECR_F_FULL) {
/* FIFO full */
priv->fifo_depth = i;
break;
}
writeb((u8)i, priv->regs.fifo);
}
if (i >= 1024) {
pr_probe(p, "Can't fill FIFO\n");
goto fail;
}
if (!priv->fifo_depth) {
pr_probe(p, "Can't get FIFO depth\n");
goto fail;
}
pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
/* Enable interrupts */
parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
/* Find out writeIntrThreshold: number of PWords we know we can write
* if we get an interrupt. */
priv->writeIntrThreshold = 0;
for (i = 0; i < priv->fifo_depth; i++) {
if (readb(priv->regs.fifo) != (u8)i) {
pr_probe(p, "Invalid data in FIFO\n");
goto fail;
}
if (!priv->writeIntrThreshold
&& readb(priv->regs.ecr) & ECR_SERVINTR)
/* writeIntrThreshold reached */
priv->writeIntrThreshold = i + 1;
if (i + 1 < priv->fifo_depth
&& readb(priv->regs.ecr) & ECR_F_EMPTY) {
/* FIFO empty before the last byte? */
pr_probe(p, "Data lost in FIFO\n");
goto fail;
}
}
if (!priv->writeIntrThreshold) {
pr_probe(p, "Can't get writeIntrThreshold\n");
goto fail;
}
pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
/* FIFO must be empty now */
if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
pr_probe(p, "Can't empty FIFO\n");
goto fail;
}
/* Reset FIFO */
parport_ip32_set_mode(p, ECR_MODE_PS2);
/* Set reverse direction (must be in PS2 mode) */
parport_ip32_data_reverse(p);
/* Test FIFO, no interrupt, no DMA */
parport_ip32_set_mode(p, ECR_MODE_TST);
/* Enable interrupts */
parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
/* Find out readIntrThreshold: number of PWords we can read if we get
* an interrupt. */
priv->readIntrThreshold = 0;
for (i = 0; i < priv->fifo_depth; i++) {
writeb(0xaa, priv->regs.fifo);
if (readb(priv->regs.ecr) & ECR_SERVINTR) {
/* readIntrThreshold reached */
priv->readIntrThreshold = i + 1;
break;
}
}
if (!priv->readIntrThreshold) {
pr_probe(p, "Can't get readIntrThreshold\n");
goto fail;
}
pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
/* Reset ECR */
parport_ip32_set_mode(p, ECR_MODE_PS2);
parport_ip32_data_forward(p);
parport_ip32_set_mode(p, ECR_MODE_SPP);
return 1;
fail:
priv->fifo_depth = 0;
parport_ip32_set_mode(p, ECR_MODE_SPP);
return 0;
}
/*--- Initialization code ----------------------------------------------*/
/**
* parport_ip32_make_isa_registers - compute (ISA) register addresses
* @regs: pointer to &struct parport_ip32_regs to fill
* @base: base address of standard and EPP registers
* @base_hi: base address of ECP registers
* @regshift: how much to shift register offset by
*
* Compute register addresses, according to the ISA standard. The addresses
* of the standard and EPP registers are computed from address @base. The
* addresses of the ECP registers are computed from address @base_hi.
*/
static void __init
parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
void __iomem *base, void __iomem *base_hi,
unsigned int regshift)
{
#define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
#define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
*regs = (struct parport_ip32_regs){
.data = r_base(0),
.dsr = r_base(1),
.dcr = r_base(2),
.eppAddr = r_base(3),
.eppData0 = r_base(4),
.eppData1 = r_base(5),
.eppData2 = r_base(6),
.eppData3 = r_base(7),
.ecpAFifo = r_base(0),
.fifo = r_base_hi(0),
.cnfgA = r_base_hi(0),
.cnfgB = r_base_hi(1),
.ecr = r_base_hi(2)
};
#undef r_base_hi
#undef r_base
}
/**
* parport_ip32_probe_port - probe and register IP32 built-in parallel port
*
* Returns the new allocated &parport structure. On error, an error code is
* encoded in return value with the ERR_PTR function.
*/
static __init struct parport *parport_ip32_probe_port(void)
{
struct parport_ip32_regs regs;
struct parport_ip32_private *priv = NULL;
struct parport_operations *ops = NULL;
struct parport *p = NULL;
int err;
parport_ip32_make_isa_registers(®s, &mace->isa.parallel,
&mace->isa.ecp1284, 8 /* regshift */);
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
if (ops == NULL || priv == NULL || p == NULL) {
err = -ENOMEM;
goto fail;
}
p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
p->private_data = priv;
*ops = parport_ip32_ops;
*priv = (struct parport_ip32_private){
.regs = regs,
.dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
DCR_AUTOFD | DCR_STROBE,
.irq_mode = PARPORT_IP32_IRQ_FWD,
};
init_completion(&priv->irq_complete);
/* Probe port. */
if (!parport_ip32_ecp_supported(p)) {
err = -ENODEV;
goto fail;
}
parport_ip32_dump_state(p, "begin init", 0);
/* We found what looks like a working ECR register. Simply assume
* that all modes are correctly supported. Enable basic modes. */
p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
p->modes |= PARPORT_MODE_TRISTATE;
if (!parport_ip32_fifo_supported(p)) {
pr_warn(PPIP32 "%s: error: FIFO disabled\n", p->name);
/* Disable hardware modes depending on a working FIFO. */
features &= ~PARPORT_IP32_ENABLE_SPP;
features &= ~PARPORT_IP32_ENABLE_ECP;
/* DMA is not needed if FIFO is not supported. */
features &= ~PARPORT_IP32_ENABLE_DMA;
}
/* Request IRQ */
if (features & PARPORT_IP32_ENABLE_IRQ) {
int irq = MACEISA_PARALLEL_IRQ;
if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
pr_warn(PPIP32 "%s: error: IRQ disabled\n", p->name);
/* DMA cannot work without interrupts. */
features &= ~PARPORT_IP32_ENABLE_DMA;
} else {
pr_probe(p, "Interrupt support enabled\n");
p->irq = irq;
priv->dcr_writable |= DCR_IRQ;
}
}
/* Allocate DMA resources */
if (features & PARPORT_IP32_ENABLE_DMA) {
if (parport_ip32_dma_register())
pr_warn(PPIP32 "%s: error: DMA disabled\n", p->name);
else {
pr_probe(p, "DMA support enabled\n");
p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
p->modes |= PARPORT_MODE_DMA;
}
}
if (features & PARPORT_IP32_ENABLE_SPP) {
/* Enable compatibility FIFO mode */
p->ops->compat_write_data = parport_ip32_compat_write_data;
p->modes |= PARPORT_MODE_COMPAT;
pr_probe(p, "Hardware support for SPP mode enabled\n");
}
if (features & PARPORT_IP32_ENABLE_EPP) {
/* Set up access functions to use EPP hardware. */
p->ops->epp_read_data = parport_ip32_epp_read_data;
p->ops->epp_write_data = parport_ip32_epp_write_data;
p->ops->epp_read_addr = parport_ip32_epp_read_addr;
p->ops->epp_write_addr = parport_ip32_epp_write_addr;
p->modes |= PARPORT_MODE_EPP;
pr_probe(p, "Hardware support for EPP mode enabled\n");
}
if (features & PARPORT_IP32_ENABLE_ECP) {
/* Enable ECP FIFO mode */
p->ops->ecp_write_data = parport_ip32_ecp_write_data;
/* FIXME - not implemented */
/* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
/* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
p->modes |= PARPORT_MODE_ECP;
pr_probe(p, "Hardware support for ECP mode enabled\n");
}
/* Initialize the port with sensible values */
parport_ip32_set_mode(p, ECR_MODE_PS2);
parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
parport_ip32_data_forward(p);
parport_ip32_disable_irq(p);
parport_ip32_write_data(p, 0x00);
parport_ip32_dump_state(p, "end init", 0);
/* Print out what we found */
pr_info("%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi);
if (p->irq != PARPORT_IRQ_NONE)
pr_cont(", irq %d", p->irq);
pr_cont(" [");
#define printmode(x) \
do { \
if (p->modes & PARPORT_MODE_##x) \
pr_cont("%s%s", f++ ? "," : "", #x); \
} while (0)
{
unsigned int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
printmode(COMPAT);
printmode(EPP);
printmode(ECP);
printmode(DMA);
}
#undef printmode
pr_cont("]\n");
parport_announce_port(p);
return p;
fail:
if (p)
parport_put_port(p);
kfree(priv);
kfree(ops);
return ERR_PTR(err);
}
/**
* parport_ip32_unregister_port - unregister a parallel port
* @p: pointer to the &struct parport
*
* Unregisters a parallel port and free previously allocated resources
* (memory, IRQ, ...).
*/
static __exit void parport_ip32_unregister_port(struct parport *p)
{
struct parport_ip32_private * const priv = p->physport->private_data;
struct parport_operations *ops = p->ops;
parport_remove_port(p);
if (p->modes & PARPORT_MODE_DMA)
parport_ip32_dma_unregister();
if (p->irq != PARPORT_IRQ_NONE)
free_irq(p->irq, p);
parport_put_port(p);
kfree(priv);
kfree(ops);
}
/**
* parport_ip32_init - module initialization function
*/
static int __init parport_ip32_init(void)
{
pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
this_port = parport_ip32_probe_port();
return PTR_ERR_OR_ZERO(this_port);
}
/**
* parport_ip32_exit - module termination function
*/
static void __exit parport_ip32_exit(void)
{
parport_ip32_unregister_port(this_port);
}
/*--- Module stuff -----------------------------------------------------*/
MODULE_AUTHOR("Arnaud Giersch <[email protected]>");
MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
module_init(parport_ip32_init);
module_exit(parport_ip32_exit);
module_param(verbose_probing, bool, S_IRUGO);
MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
module_param(features, uint, S_IRUGO);
MODULE_PARM_DESC(features,
"Bit mask of features to enable"
", bit 0: IRQ support"
", bit 1: DMA support"
", bit 2: hardware SPP mode"
", bit 3: hardware EPP mode"
", bit 4: hardware ECP mode");
| linux-master | drivers/parport/parport_ip32.c |
// SPDX-License-Identifier: GPL-2.0
/* IEEE-1284 operations for parport.
*
* This file is for generic IEEE 1284 operations. The idea is that
* they are used by the low-level drivers. If they have a special way
* of doing something, they can provide their own routines (and put
* the function pointers in port->ops); if not, they can just use these
* as a fallback.
*
* Note: Make no assumptions about hardware or architecture in this file!
*
* Author: Tim Waugh <[email protected]>
* Fixed AUTOFD polarity in ecp_forward_to_reverse(). Fred Barnes, 1999
* Software emulated EPP fixes, Fred Barnes, 04/2001.
*/
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#undef DEBUG /* undef me for production */
#ifdef CONFIG_LP_CONSOLE
#undef DEBUG /* Don't want a garbled console */
#endif
/*** *
* One-way data transfer functions. *
* ***/
/* Compatibility mode. */
size_t parport_ieee1284_write_compat (struct parport *port,
const void *buffer, size_t len,
int flags)
{
int no_irq = 1;
ssize_t count = 0;
const unsigned char *addr = buffer;
unsigned char byte;
struct pardevice *dev = port->physport->cad;
unsigned char ctl = (PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT);
if (port->irq != PARPORT_IRQ_NONE) {
parport_enable_irq (port);
no_irq = 0;
}
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
parport_write_control (port, ctl);
parport_data_forward (port);
while (count < len) {
unsigned long expire = jiffies + dev->timeout;
long wait = msecs_to_jiffies(10);
unsigned char mask = (PARPORT_STATUS_ERROR
| PARPORT_STATUS_BUSY);
unsigned char val = (PARPORT_STATUS_ERROR
| PARPORT_STATUS_BUSY);
/* Wait until the peripheral's ready */
do {
/* Is the peripheral ready yet? */
if (!parport_wait_peripheral (port, mask, val))
/* Skip the loop */
goto ready;
/* Is the peripheral upset? */
if ((parport_read_status (port) &
(PARPORT_STATUS_PAPEROUT |
PARPORT_STATUS_SELECT |
PARPORT_STATUS_ERROR))
!= (PARPORT_STATUS_SELECT |
PARPORT_STATUS_ERROR))
/* If nFault is asserted (i.e. no
* error) and PAPEROUT and SELECT are
* just red herrings, give the driver
* a chance to check it's happy with
* that before continuing. */
goto stop;
/* Have we run out of time? */
if (!time_before (jiffies, expire))
break;
/* Yield the port for a while. If this is the
first time around the loop, don't let go of
the port. This way, we find out if we have
our interrupt handler called. */
if (count && no_irq) {
parport_release (dev);
schedule_timeout_interruptible(wait);
parport_claim_or_block (dev);
}
else
/* We must have the device claimed here */
parport_wait_event (port, wait);
/* Is there a signal pending? */
if (signal_pending (current))
break;
/* Wait longer next time. */
wait *= 2;
} while (time_before (jiffies, expire));
if (signal_pending (current))
break;
pr_debug("%s: Timed out\n", port->name);
break;
ready:
/* Write the character to the data lines. */
byte = *addr++;
parport_write_data (port, byte);
udelay (1);
/* Pulse strobe. */
parport_write_control (port, ctl | PARPORT_CONTROL_STROBE);
udelay (1); /* strobe */
parport_write_control (port, ctl);
udelay (1); /* hold */
/* Assume the peripheral received it. */
count++;
/* Let another process run if it needs to. */
if (time_before (jiffies, expire))
if (!parport_yield_blocking (dev)
&& need_resched())
schedule ();
}
stop:
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return count;
}
/* Nibble mode. */
size_t parport_ieee1284_read_nibble (struct parport *port,
void *buffer, size_t len,
int flags)
{
#ifndef CONFIG_PARPORT_1284
return 0;
#else
unsigned char *buf = buffer;
int i;
unsigned char byte = 0;
len *= 2; /* in nibbles */
for (i=0; i < len; i++) {
unsigned char nibble;
/* Does the error line indicate end of data? */
if (((i & 1) == 0) &&
(parport_read_status(port) & PARPORT_STATUS_ERROR)) {
goto end_of_data;
}
/* Event 7: Set nAutoFd low. */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 9: nAck goes low. */
port->ieee1284.phase = IEEE1284_PH_REV_DATA;
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK, 0)) {
/* Timeout -- no more data? */
pr_debug("%s: Nibble timeout at event 9 (%d bytes)\n",
port->name, i / 2);
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
break;
}
/* Read a nibble. */
nibble = parport_read_status (port) >> 3;
nibble &= ~8;
if ((nibble & 0x10) == 0)
nibble |= 8;
nibble &= 0xf;
/* Event 10: Set nAutoFd high. */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
/* Event 11: nAck goes high. */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
pr_debug("%s: Nibble timeout at event 11\n",
port->name);
break;
}
if (i & 1) {
/* Second nibble */
byte |= nibble << 4;
*buf++ = byte;
} else
byte = nibble;
}
if (i == len) {
/* Read the last nibble without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
pr_debug("%s: No more nibble data (%d bytes)\n",
port->name, i / 2);
/* Go to reverse idle phase. */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
}
else
port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
}
return i/2;
#endif /* IEEE1284 support */
}
/* Byte mode. */
size_t parport_ieee1284_read_byte (struct parport *port,
void *buffer, size_t len,
int flags)
{
#ifndef CONFIG_PARPORT_1284
return 0;
#else
unsigned char *buf = buffer;
ssize_t count = 0;
for (count = 0; count < len; count++) {
unsigned char byte;
/* Data available? */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
goto end_of_data;
}
/* Event 14: Place data bus in high impedance state. */
parport_data_reverse (port);
/* Event 7: Set nAutoFd low. */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 9: nAck goes low. */
port->physport->ieee1284.phase = IEEE1284_PH_REV_DATA;
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
0)) {
/* Timeout -- no more data? */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
0);
pr_debug("%s: Byte timeout at event 9\n", port->name);
break;
}
byte = parport_read_data (port);
*buf++ = byte;
/* Event 10: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
/* Event 11: nAck goes high. */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
pr_debug("%s: Byte timeout at event 11\n", port->name);
break;
}
/* Event 16: Set nStrobe low. */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
udelay (5);
/* Event 17: Set nStrobe high. */
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
}
if (count == len) {
/* Read the last byte without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
pr_debug("%s: No more byte data (%zd bytes)\n",
port->name, count);
/* Go to reverse idle phase. */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
}
else
port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
}
return count;
#endif /* IEEE1284 support */
}
/*** *
* ECP Functions. *
* ***/
#ifdef CONFIG_PARPORT_1284
static inline
int ecp_forward_to_reverse (struct parport *port)
{
int retval;
/* Event 38: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
parport_data_reverse (port);
udelay (5);
/* Event 39: Set nInit low to initiate bus reversal */
parport_frob_control (port,
PARPORT_CONTROL_INIT,
0);
/* Event 40: PError goes low */
retval = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT, 0);
if (!retval) {
pr_debug("%s: ECP direction: reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
} else {
pr_debug("%s: ECP direction: failed to reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
return retval;
}
static inline
int ecp_reverse_to_forward (struct parport *port)
{
int retval;
/* Event 47: Set nInit high */
parport_frob_control (port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high */
retval = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (!retval) {
parport_data_forward (port);
pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
} else {
pr_debug("%s: ECP direction: failed to switch forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
return retval;
}
#endif /* IEEE1284 support */
/* ECP mode, forward channel, data. */
size_t parport_ieee1284_ecp_write_data (struct parport *port,
const void *buffer, size_t len,
int flags)
{
#ifndef CONFIG_PARPORT_1284
return 0;
#else
const unsigned char *buf = buffer;
size_t written;
int retry;
port = port->physport;
if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE)
if (ecp_reverse_to_forward (port))
return 0;
port->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* HostAck high (data, not command) */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
for (written = 0; written < len; written++, buf++) {
unsigned long expire = jiffies + port->cad->timeout;
unsigned char byte;
byte = *buf;
try_again:
parport_write_data (port, byte);
parport_frob_control (port, PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
udelay (5);
for (retry = 0; retry < 100; retry++) {
if (!parport_wait_peripheral (port,
PARPORT_STATUS_BUSY, 0))
goto success;
if (signal_pending (current)) {
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
0);
break;
}
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
udelay (50);
if (parport_read_status (port) & PARPORT_STATUS_PAPEROUT) {
/* It's buggered. */
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
break;
}
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
udelay (50);
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
success:
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
udelay (5);
if (parport_wait_peripheral (port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY))
/* Peripheral hasn't accepted the data. */
break;
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return written;
#endif /* IEEE1284 support */
}
/* ECP mode, reverse channel, data. */
size_t parport_ieee1284_ecp_read_data (struct parport *port,
void *buffer, size_t len, int flags)
{
#ifndef CONFIG_PARPORT_1284
return 0;
#else
struct pardevice *dev = port->cad;
unsigned char *buf = buffer;
int rle_count = 0; /* shut gcc up */
unsigned char ctl;
int rle = 0;
ssize_t count = 0;
port = port->physport;
if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE)
if (ecp_forward_to_reverse (port))
return 0;
port->ieee1284.phase = IEEE1284_PH_REV_DATA;
/* Set HostAck low to start accepting data. */
ctl = parport_read_control (port);
ctl &= ~(PARPORT_CONTROL_STROBE | PARPORT_CONTROL_INIT |
PARPORT_CONTROL_AUTOFD);
parport_write_control (port,
ctl | PARPORT_CONTROL_AUTOFD);
while (count < len) {
unsigned long expire = jiffies + dev->timeout;
unsigned char byte;
int command;
/* Event 43: Peripheral sets nAck low. It can take as
long as it wants. */
while (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* The peripheral hasn't given us data in
35ms. If we have data to give back to the
caller, do it now. */
if (count)
goto out;
/* If we've used up all the time we were allowed,
give up altogether. */
if (!time_before (jiffies, expire))
goto out;
/* Yield the port for a while. */
if (dev->port->irq != PARPORT_IRQ_NONE) {
parport_release (dev);
schedule_timeout_interruptible(msecs_to_jiffies(40));
parport_claim_or_block (dev);
}
else
/* We must have the device claimed here. */
parport_wait_event (port, msecs_to_jiffies(40));
/* Is there a signal pending? */
if (signal_pending (current))
goto out;
}
/* Is this a command? */
if (rle)
/* The last byte was a run-length count, so
this can't be as well. */
command = 0;
else
command = (parport_read_status (port) &
PARPORT_STATUS_BUSY) ? 1 : 0;
/* Read the data. */
byte = parport_read_data (port);
/* If this is a channel command, rather than an RLE
command or a normal data byte, don't accept it. */
if (command) {
if (byte & 0x80) {
pr_debug("%s: stopping short at channel command (%02x)\n",
port->name, byte);
goto out;
}
else if (port->ieee1284.mode != IEEE1284_MODE_ECPRLE)
pr_debug("%s: device illegally using RLE; accepting anyway\n",
port->name);
rle_count = byte + 1;
/* Are we allowed to read that many bytes? */
if (rle_count > (len - count)) {
pr_debug("%s: leaving %d RLE bytes for next time\n",
port->name, rle_count);
break;
}
rle = 1;
}
/* Event 44: Set HostAck high, acknowledging handshake. */
parport_write_control (port, ctl);
/* Event 45: The peripheral has 35ms to set nAck high. */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* It's gone wrong. Return what data we have
to the caller. */
pr_debug("ECP read timed out at 45\n");
if (command)
pr_warn("%s: command ignored (%02x)\n",
port->name, byte);
break;
}
/* Event 46: Set HostAck low and accept the data. */
parport_write_control (port,
ctl | PARPORT_CONTROL_AUTOFD);
/* If we just read a run-length count, fetch the data. */
if (command)
continue;
/* If this is the byte after a run-length count, decompress. */
if (rle) {
rle = 0;
memset (buf, byte, rle_count);
buf += rle_count;
count += rle_count;
pr_debug("%s: decompressed to %d bytes\n",
port->name, rle_count);
} else {
/* Normal data byte. */
*buf = byte;
buf++, count++;
}
}
out:
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
return count;
#endif /* IEEE1284 support */
}
/* ECP mode, forward channel, commands. */
size_t parport_ieee1284_ecp_write_addr (struct parport *port,
const void *buffer, size_t len,
int flags)
{
#ifndef CONFIG_PARPORT_1284
return 0;
#else
const unsigned char *buf = buffer;
size_t written;
int retry;
port = port->physport;
if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE)
if (ecp_reverse_to_forward (port))
return 0;
port->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* HostAck low (command, not data) */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_INIT,
PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_INIT);
for (written = 0; written < len; written++, buf++) {
unsigned long expire = jiffies + port->cad->timeout;
unsigned char byte;
byte = *buf;
try_again:
parport_write_data (port, byte);
parport_frob_control (port, PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
udelay (5);
for (retry = 0; retry < 100; retry++) {
if (!parport_wait_peripheral (port,
PARPORT_STATUS_BUSY, 0))
goto success;
if (signal_pending (current)) {
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
0);
break;
}
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
udelay (50);
if (parport_read_status (port) & PARPORT_STATUS_PAPEROUT) {
/* It's buggered. */
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
break;
}
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
udelay (50);
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
success:
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
udelay (5);
if (parport_wait_peripheral (port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY))
/* Peripheral hasn't accepted the data. */
break;
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return written;
#endif /* IEEE1284 support */
}
/*** *
* EPP functions. *
* ***/
/* EPP mode, forward channel, data. */
size_t parport_ieee1284_epp_write_data (struct parport *port,
const void *buffer, size_t len,
int flags)
{
unsigned char *bp = (unsigned char *) buffer;
size_t ret = 0;
/* set EPP idle state (just to make sure) with strobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD |
PARPORT_CONTROL_SELECT |
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_INIT);
port->ops->data_forward (port);
for (; len > 0; len--, bp++) {
/* Event 62: Write data and set autofd low */
parport_write_data (port, *bp);
parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 58: wait for busy (nWait) to go high */
if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 10))
break;
/* Event 63: set nAutoFd (nDStrb) high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
/* Event 60: wait for busy (nWait) to go low */
if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY, 5))
break;
ret++;
}
/* Event 61: set strobe (nWrite) high */
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
return ret;
}
/* EPP mode, reverse channel, data. */
size_t parport_ieee1284_epp_read_data (struct parport *port,
void *buffer, size_t len,
int flags)
{
unsigned char *bp = (unsigned char *) buffer;
unsigned ret = 0;
/* set EPP idle state (just to make sure) with strobe high */
parport_frob_control (port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD |
PARPORT_CONTROL_SELECT |
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
port->ops->data_reverse (port);
for (; len > 0; len--, bp++) {
/* Event 67: set nAutoFd (nDStrb) low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 58: wait for Busy to go high */
if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) {
break;
}
*bp = parport_read_data (port);
/* Event 63: set nAutoFd (nDStrb) high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
/* Event 60: wait for Busy to go low */
if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY, 5)) {
break;
}
ret++;
}
port->ops->data_forward (port);
return ret;
}
/* EPP mode, forward channel, addresses. */
size_t parport_ieee1284_epp_write_addr (struct parport *port,
const void *buffer, size_t len,
int flags)
{
unsigned char *bp = (unsigned char *) buffer;
size_t ret = 0;
/* set EPP idle state (just to make sure) with strobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD |
PARPORT_CONTROL_SELECT |
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_INIT);
port->ops->data_forward (port);
for (; len > 0; len--, bp++) {
/* Event 56: Write data and set nAStrb low. */
parport_write_data (port, *bp);
parport_frob_control (port, PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
/* Event 58: wait for busy (nWait) to go high */
if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 10))
break;
/* Event 59: set nAStrb high */
parport_frob_control (port, PARPORT_CONTROL_SELECT, 0);
/* Event 60: wait for busy (nWait) to go low */
if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY, 5))
break;
ret++;
}
/* Event 61: set strobe (nWrite) high */
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
return ret;
}
/* EPP mode, reverse channel, addresses. */
size_t parport_ieee1284_epp_read_addr (struct parport *port,
void *buffer, size_t len,
int flags)
{
unsigned char *bp = (unsigned char *) buffer;
unsigned ret = 0;
/* Set EPP idle state (just to make sure) with strobe high */
parport_frob_control (port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD |
PARPORT_CONTROL_SELECT |
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
port->ops->data_reverse (port);
for (; len > 0; len--, bp++) {
/* Event 64: set nSelectIn (nAStrb) low */
parport_frob_control (port, PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
/* Event 58: wait for Busy to go high */
if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) {
break;
}
*bp = parport_read_data (port);
/* Event 59: set nSelectIn (nAStrb) high */
parport_frob_control (port, PARPORT_CONTROL_SELECT,
0);
/* Event 60: wait for Busy to go low */
if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY, 5))
break;
ret++;
}
port->ops->data_forward (port);
return ret;
}
EXPORT_SYMBOL(parport_ieee1284_ecp_write_data);
EXPORT_SYMBOL(parport_ieee1284_ecp_read_data);
EXPORT_SYMBOL(parport_ieee1284_ecp_write_addr);
EXPORT_SYMBOL(parport_ieee1284_write_compat);
EXPORT_SYMBOL(parport_ieee1284_read_nibble);
EXPORT_SYMBOL(parport_ieee1284_read_byte);
EXPORT_SYMBOL(parport_ieee1284_epp_write_data);
EXPORT_SYMBOL(parport_ieee1284_epp_read_data);
EXPORT_SYMBOL(parport_ieee1284_epp_write_addr);
EXPORT_SYMBOL(parport_ieee1284_epp_read_addr);
| linux-master | drivers/parport/ieee1284_ops.c |
/*
* IEEE 1284.3 Parallel port daisy chain and multiplexor code
*
* Copyright (C) 1999, 2000 Tim Waugh <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* ??-12-1998: Initial implementation.
* 31-01-1999: Make port-cloning transparent.
* 13-02-1999: Move DeviceID technique from parport_probe.
* 13-03-1999: Get DeviceID from non-IEEE 1284.3 devices too.
* 22-02-2000: Count devices that are actually detected.
*
* Any part of this program may be used in documents licensed under
* the GNU Free Documentation License, Version 1.1 or any later version
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <asm/current.h>
#include <linux/uaccess.h>
#undef DEBUG
static struct daisydev {
struct daisydev *next;
struct parport *port;
int daisy;
int devnum;
} *topology = NULL;
static DEFINE_SPINLOCK(topology_lock);
static int numdevs;
static bool daisy_init_done;
/* Forward-declaration of lower-level functions. */
static int mux_present(struct parport *port);
static int num_mux_ports(struct parport *port);
static int select_port(struct parport *port);
static int assign_addrs(struct parport *port);
/* Add a device to the discovered topology. */
static void add_dev(int devnum, struct parport *port, int daisy)
{
struct daisydev *newdev, **p;
newdev = kmalloc(sizeof(struct daisydev), GFP_KERNEL);
if (newdev) {
newdev->port = port;
newdev->daisy = daisy;
newdev->devnum = devnum;
spin_lock(&topology_lock);
for (p = &topology; *p && (*p)->devnum<devnum; p = &(*p)->next)
;
newdev->next = *p;
*p = newdev;
spin_unlock(&topology_lock);
}
}
/* Clone a parport (actually, make an alias). */
static struct parport *clone_parport(struct parport *real, int muxport)
{
struct parport *extra = parport_register_port(real->base,
real->irq,
real->dma,
real->ops);
if (extra) {
extra->portnum = real->portnum;
extra->physport = real;
extra->muxport = muxport;
real->slaves[muxport-1] = extra;
}
return extra;
}
static int daisy_drv_probe(struct pardevice *par_dev)
{
struct device_driver *drv = par_dev->dev.driver;
if (strcmp(drv->name, "daisy_drv"))
return -ENODEV;
if (strcmp(par_dev->name, daisy_dev_name))
return -ENODEV;
return 0;
}
static struct parport_driver daisy_driver = {
.name = "daisy_drv",
.probe = daisy_drv_probe,
.devmodel = true,
};
/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
* Return value is number of devices actually detected. */
int parport_daisy_init(struct parport *port)
{
int detected = 0;
char *deviceid;
static const char *th[] = { /*0*/"th", "st", "nd", "rd", "th" };
int num_ports;
int i;
int last_try = 0;
if (!daisy_init_done) {
/*
* flag should be marked true first as
* parport_register_driver() might try to load the low
* level driver which will lead to announcing new ports
* and which will again come back here at
* parport_daisy_init()
*/
daisy_init_done = true;
i = parport_register_driver(&daisy_driver);
if (i) {
pr_err("daisy registration failed\n");
daisy_init_done = false;
return i;
}
}
again:
/* Because this is called before any other devices exist,
* we don't have to claim exclusive access. */
/* If mux present on normal port, need to create new
* parports for each extra port. */
if (port->muxport < 0 && mux_present(port) &&
/* don't be fooled: a mux must have 2 or 4 ports. */
((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) {
/* Leave original as port zero. */
port->muxport = 0;
pr_info("%s: 1st (default) port of %d-way multiplexor\n",
port->name, num_ports);
for (i = 1; i < num_ports; i++) {
/* Clone the port. */
struct parport *extra = clone_parport(port, i);
if (!extra) {
if (signal_pending(current))
break;
schedule();
continue;
}
pr_info("%s: %d%s port of %d-way multiplexor on %s\n",
extra->name, i + 1, th[i + 1], num_ports,
port->name);
/* Analyse that port too. We won't recurse
forever because of the 'port->muxport < 0'
test above. */
parport_daisy_init(extra);
}
}
if (port->muxport >= 0)
select_port(port);
parport_daisy_deselect_all(port);
detected += assign_addrs(port);
/* Count the potential legacy device at the end. */
add_dev(numdevs++, port, -1);
/* Find out the legacy device's IEEE 1284 device ID. */
deviceid = kmalloc(1024, GFP_KERNEL);
if (deviceid) {
if (parport_device_id(numdevs - 1, deviceid, 1024) > 2)
detected++;
kfree(deviceid);
}
if (!detected && !last_try) {
/* No devices were detected. Perhaps they are in some
funny state; let's try to reset them and see if
they wake up. */
parport_daisy_fini(port);
parport_write_control(port, PARPORT_CONTROL_SELECT);
udelay(50);
parport_write_control(port,
PARPORT_CONTROL_SELECT |
PARPORT_CONTROL_INIT);
udelay(50);
last_try = 1;
goto again;
}
return detected;
}
/* Forget about devices on a physical port. */
void parport_daisy_fini(struct parport *port)
{
struct daisydev **p;
spin_lock(&topology_lock);
p = &topology;
while (*p) {
struct daisydev *dev = *p;
if (dev->port != port) {
p = &dev->next;
continue;
}
*p = dev->next;
kfree(dev);
}
/* Gaps in the numbering could be handled better. How should
someone enumerate through all IEEE1284.3 devices in the
topology?. */
if (!topology) numdevs = 0;
spin_unlock(&topology_lock);
return;
}
/**
* parport_open - find a device by canonical device number
* @devnum: canonical device number
* @name: name to associate with the device
*
* This function is similar to parport_register_device(), except
* that it locates a device by its number rather than by the port
* it is attached to.
*
* All parameters except for @devnum are the same as for
* parport_register_device(). The return value is the same as
* for parport_register_device().
**/
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
if (!p) {
spin_unlock(&topology_lock);
return NULL;
}
daisy = p->daisy;
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
dev->daisy = daisy;
/* Check that there really is a device to select. */
if (daisy >= 0) {
int selected;
parport_claim_or_block(dev);
selected = port->daisy;
parport_release(dev);
if (selected != daisy) {
/* No corresponding device. */
parport_unregister_device(dev);
return NULL;
}
}
return dev;
}
/**
* parport_close - close a device opened with parport_open()
* @dev: device to close
*
* This is to parport_open() as parport_unregister_device() is to
* parport_register_device().
**/
void parport_close(struct pardevice *dev)
{
parport_unregister_device(dev);
}
/* Send a daisy-chain-style CPP command packet. */
static int cpp_daisy(struct parport *port, int cmd)
{
unsigned char s;
parport_data_forward(port);
parport_write_data(port, 0xaa); udelay(2);
parport_write_data(port, 0x55); udelay(2);
parport_write_data(port, 0x00); udelay(2);
parport_write_data(port, 0xff); udelay(2);
s = parport_read_status(port) & (PARPORT_STATUS_BUSY
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_BUSY
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
pr_debug("%s: cpp_daisy: aa5500ff(%02x)\n", port->name, s);
return -ENXIO;
}
parport_write_data(port, 0x87); udelay(2);
s = parport_read_status(port) & (PARPORT_STATUS_BUSY
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
pr_debug("%s: cpp_daisy: aa5500ff87(%02x)\n", port->name, s);
return -ENXIO;
}
parport_write_data(port, 0x78); udelay(2);
parport_write_data(port, cmd); udelay(2);
parport_frob_control(port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
udelay(1);
s = parport_read_status(port);
parport_frob_control(port, PARPORT_CONTROL_STROBE, 0);
udelay(1);
parport_write_data(port, 0xff); udelay(2);
return s;
}
/* Send a mux-style CPP command packet. */
static int cpp_mux(struct parport *port, int cmd)
{
unsigned char s;
int rc;
parport_data_forward(port);
parport_write_data(port, 0xaa); udelay(2);
parport_write_data(port, 0x55); udelay(2);
parport_write_data(port, 0xf0); udelay(2);
parport_write_data(port, 0x0f); udelay(2);
parport_write_data(port, 0x52); udelay(2);
parport_write_data(port, 0xad); udelay(2);
parport_write_data(port, cmd); udelay(2);
s = parport_read_status(port);
if (!(s & PARPORT_STATUS_ACK)) {
pr_debug("%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
port->name, cmd, s);
return -EIO;
}
rc = (((s & PARPORT_STATUS_SELECT ? 1 : 0) << 0) |
((s & PARPORT_STATUS_PAPEROUT ? 1 : 0) << 1) |
((s & PARPORT_STATUS_BUSY ? 0 : 1) << 2) |
((s & PARPORT_STATUS_ERROR ? 0 : 1) << 3));
return rc;
}
void parport_daisy_deselect_all(struct parport *port)
{
cpp_daisy(port, 0x30);
}
int parport_daisy_select(struct parport *port, int daisy, int mode)
{
switch (mode)
{
// For these modes we should switch to EPP mode:
case IEEE1284_MODE_EPP:
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
return !(cpp_daisy(port, 0x20 + daisy) &
PARPORT_STATUS_ERROR);
// For these modes we should switch to ECP mode:
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
case IEEE1284_MODE_ECPSWE:
return !(cpp_daisy(port, 0xd0 + daisy) &
PARPORT_STATUS_ERROR);
// Nothing was told for BECP in Daisy chain specification.
// May be it's wise to use ECP?
case IEEE1284_MODE_BECP:
// Others use compat mode
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
case IEEE1284_MODE_COMPAT:
default:
return !(cpp_daisy(port, 0xe0 + daisy) &
PARPORT_STATUS_ERROR);
}
}
static int mux_present(struct parport *port)
{
return cpp_mux(port, 0x51) == 3;
}
static int num_mux_ports(struct parport *port)
{
return cpp_mux(port, 0x58);
}
static int select_port(struct parport *port)
{
int muxport = port->muxport;
return cpp_mux(port, 0x60 + muxport) == muxport;
}
static int assign_addrs(struct parport *port)
{
unsigned char s;
unsigned char daisy;
int thisdev = numdevs;
int detected;
char *deviceid;
parport_data_forward(port);
parport_write_data(port, 0xaa); udelay(2);
parport_write_data(port, 0x55); udelay(2);
parport_write_data(port, 0x00); udelay(2);
parport_write_data(port, 0xff); udelay(2);
s = parport_read_status(port) & (PARPORT_STATUS_BUSY
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_BUSY
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
pr_debug("%s: assign_addrs: aa5500ff(%02x)\n", port->name, s);
return 0;
}
parport_write_data(port, 0x87); udelay(2);
s = parport_read_status(port) & (PARPORT_STATUS_BUSY
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
pr_debug("%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s);
return 0;
}
parport_write_data(port, 0x78); udelay(2);
s = parport_read_status(port);
for (daisy = 0;
(s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT))
== (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)
&& daisy < 4;
++daisy) {
parport_write_data(port, daisy);
udelay(2);
parport_frob_control(port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
udelay(1);
parport_frob_control(port, PARPORT_CONTROL_STROBE, 0);
udelay(1);
add_dev(numdevs++, port, daisy);
/* See if this device thought it was the last in the
* chain. */
if (!(s & PARPORT_STATUS_BUSY))
break;
/* We are seeing pass through status now. We see
last_dev from next device or if last_dev does not
work status lines from some non-daisy chain
device. */
s = parport_read_status(port);
}
parport_write_data(port, 0xff); udelay(2);
detected = numdevs - thisdev;
pr_debug("%s: Found %d daisy-chained devices\n", port->name, detected);
/* Ask the new devices to introduce themselves. */
deviceid = kmalloc(1024, GFP_KERNEL);
if (!deviceid) return 0;
for (daisy = 0; thisdev < numdevs; thisdev++, daisy++)
parport_device_id(thisdev, deviceid, 1024);
kfree(deviceid);
return detected;
}
| linux-master | drivers/parport/daisy.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Low-level parallel port routines for the Atari builtin port
*
* Author: Andreas Schwab <[email protected]>
*
* Based on parport_amiga.c.
*
* The built-in Atari parallel port provides one port at a fixed address
* with 8 output data lines (D0 - D7), 1 output control line (STROBE)
* and 1 input status line (BUSY) able to cause an interrupt.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
#include <asm/irq.h>
#include <asm/atariints.h>
static struct parport *this_port;
static unsigned char
parport_atari_read_data(struct parport *p)
{
unsigned long flags;
unsigned char data;
local_irq_save(flags);
sound_ym.rd_data_reg_sel = 15;
data = sound_ym.rd_data_reg_sel;
local_irq_restore(flags);
return data;
}
static void
parport_atari_write_data(struct parport *p, unsigned char data)
{
unsigned long flags;
local_irq_save(flags);
sound_ym.rd_data_reg_sel = 15;
sound_ym.wd_data = data;
local_irq_restore(flags);
}
static unsigned char
parport_atari_read_control(struct parport *p)
{
unsigned long flags;
unsigned char control = 0;
local_irq_save(flags);
sound_ym.rd_data_reg_sel = 14;
if (!(sound_ym.rd_data_reg_sel & (1 << 5)))
control = PARPORT_CONTROL_STROBE;
local_irq_restore(flags);
return control;
}
static void
parport_atari_write_control(struct parport *p, unsigned char control)
{
unsigned long flags;
local_irq_save(flags);
sound_ym.rd_data_reg_sel = 14;
if (control & PARPORT_CONTROL_STROBE)
sound_ym.wd_data = sound_ym.rd_data_reg_sel & ~(1 << 5);
else
sound_ym.wd_data = sound_ym.rd_data_reg_sel | (1 << 5);
local_irq_restore(flags);
}
static unsigned char
parport_atari_frob_control(struct parport *p, unsigned char mask,
unsigned char val)
{
unsigned char old = parport_atari_read_control(p);
parport_atari_write_control(p, (old & ~mask) ^ val);
return old;
}
static unsigned char
parport_atari_read_status(struct parport *p)
{
return ((st_mfp.par_dt_reg & 1 ? 0 : PARPORT_STATUS_BUSY) |
PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR);
}
static void
parport_atari_init_state(struct pardevice *d, struct parport_state *s)
{
}
static void
parport_atari_save_state(struct parport *p, struct parport_state *s)
{
}
static void
parport_atari_restore_state(struct parport *p, struct parport_state *s)
{
}
static void
parport_atari_enable_irq(struct parport *p)
{
enable_irq(IRQ_MFP_BUSY);
}
static void
parport_atari_disable_irq(struct parport *p)
{
disable_irq(IRQ_MFP_BUSY);
}
static void
parport_atari_data_forward(struct parport *p)
{
unsigned long flags;
local_irq_save(flags);
/* Soundchip port B as output. */
sound_ym.rd_data_reg_sel = 7;
sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x40;
local_irq_restore(flags);
}
static void
parport_atari_data_reverse(struct parport *p)
{
}
static struct parport_operations parport_atari_ops = {
.write_data = parport_atari_write_data,
.read_data = parport_atari_read_data,
.write_control = parport_atari_write_control,
.read_control = parport_atari_read_control,
.frob_control = parport_atari_frob_control,
.read_status = parport_atari_read_status,
.enable_irq = parport_atari_enable_irq,
.disable_irq = parport_atari_disable_irq,
.data_forward = parport_atari_data_forward,
.data_reverse = parport_atari_data_reverse,
.init_state = parport_atari_init_state,
.save_state = parport_atari_save_state,
.restore_state = parport_atari_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
static int __init parport_atari_init(void)
{
struct parport *p;
unsigned long flags;
if (MACH_IS_ATARI) {
local_irq_save(flags);
/* Soundchip port A/B as output. */
sound_ym.rd_data_reg_sel = 7;
sound_ym.wd_data = (sound_ym.rd_data_reg_sel & 0x3f) | 0xc0;
/* STROBE high. */
sound_ym.rd_data_reg_sel = 14;
sound_ym.wd_data = sound_ym.rd_data_reg_sel | (1 << 5);
local_irq_restore(flags);
/* MFP port I0 as input. */
st_mfp.data_dir &= ~1;
/* MFP port I0 interrupt on high->low edge. */
st_mfp.active_edge &= ~1;
p = parport_register_port((unsigned long)&sound_ym.wd_data,
IRQ_MFP_BUSY, PARPORT_DMA_NONE,
&parport_atari_ops);
if (!p)
return -ENODEV;
if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
p)) {
parport_put_port (p);
return -ENODEV;
}
this_port = p;
pr_info("%s: Atari built-in port using irq\n", p->name);
parport_announce_port (p);
return 0;
}
return -ENODEV;
}
static void __exit parport_atari_exit(void)
{
parport_remove_port(this_port);
if (this_port->irq != PARPORT_IRQ_NONE)
free_irq(IRQ_MFP_BUSY, this_port);
parport_put_port(this_port);
}
MODULE_AUTHOR("Andreas Schwab");
MODULE_DESCRIPTION("Parport Driver for Atari builtin Port");
MODULE_LICENSE("GPL");
module_init(parport_atari_init)
module_exit(parport_atari_exit)
| linux-master | drivers/parport/parport_atari.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Low-level parallel-port routines for 8255-based PC-style hardware.
*
* Authors: Phil Blundell <[email protected]>
* Tim Waugh <[email protected]>
* Jose Renau <[email protected]>
* David Campbell
* Andrea Arcangeli
*
* based on work by Grant Guenther <[email protected]> and Phil Blundell.
*
* Cleaned up include files - Russell King <[email protected]>
* DMA support - Bert De Jonghe <[email protected]>
* Many ECP bugs fixed. Fred Barnes & Jamie Lokier, 1999
* More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
* Various hacks, Fred Barnes, 04/2001
* Updated probing logic - Adam Belay <[email protected]>
*/
/* This driver should work with any hardware that is broadly compatible
* with that in the IBM PC. This applies to the majority of integrated
* I/O chipsets that are commonly available. The expected register
* layout is:
*
* base+0 data
* base+1 status
* base+2 control
*
* In addition, there are some optional registers:
*
* base+3 EPP address
* base+4 EPP data
* base+0x400 ECP config A
* base+0x401 ECP config B
* base+0x402 ECP control
*
* All registers are 8 bits wide and read/write. If your hardware differs
* only in register addresses (eg because your registers are on 32-bit
* word boundaries) then you can alter the constants in parport_pc.h to
* accommodate this.
*
* Note that the ECP registers may not start at offset 0x400 for PCI cards,
* but rather will start at port->base_hi.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched/signal.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/platform_device.h>
#include <linux/sysctl.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/dma.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <linux/via.h>
#include <asm/parport.h>
#define PARPORT_PC_MAX_PORTS PARPORT_MAX
#ifdef CONFIG_ISA_DMA_API
#define HAS_DMA
#endif
/* ECR modes */
#define ECR_SPP 00
#define ECR_PS2 01
#define ECR_PPF 02
#define ECR_ECP 03
#define ECR_EPP 04
#define ECR_VND 05
#define ECR_TST 06
#define ECR_CNF 07
#define ECR_MODE_MASK 0xe0
#define ECR_WRITE(p, v) frob_econtrol((p), 0xff, (v))
#undef DEBUG
#define NR_SUPERIOS 3
static struct superio_struct { /* For Super-IO chips autodetection */
int io;
int irq;
int dma;
} superios[NR_SUPERIOS] = { {0,},};
static int user_specified;
#if defined(CONFIG_PARPORT_PC_SUPERIO) || \
(defined(CONFIG_PARPORT_1284) && defined(CONFIG_PARPORT_PC_FIFO))
static int verbose_probing;
#endif
static int pci_registered_parport;
static int pnp_registered_parport;
/* frob_control, but for ECR */
static void frob_econtrol(struct parport *pb, unsigned char m,
unsigned char v)
{
const struct parport_pc_private *priv = pb->physport->private_data;
unsigned char ecr_writable = priv->ecr_writable;
unsigned char ectr = 0;
unsigned char new;
if (m != 0xff)
ectr = inb(ECONTROL(pb));
new = (ectr & ~m) ^ v;
if (ecr_writable)
/* All known users of the ECR mask require bit 0 to be set. */
new = (new & ecr_writable) | 1;
pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n", m, v, ectr, new);
outb(new, ECONTROL(pb));
}
static inline void frob_set_mode(struct parport *p, int mode)
{
frob_econtrol(p, ECR_MODE_MASK, mode << 5);
}
#ifdef CONFIG_PARPORT_PC_FIFO
/* Safely change the mode bits in the ECR
Returns:
0 : Success
-EBUSY: Could not drain FIFO in some finite amount of time,
mode not changed!
*/
static int change_mode(struct parport *p, int m)
{
const struct parport_pc_private *priv = p->physport->private_data;
unsigned char oecr;
int mode;
pr_debug("parport change_mode ECP-ISA to mode 0x%02x\n", m);
if (!priv->ecr) {
printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
return 0;
}
/* Bits <7:5> contain the mode. */
oecr = inb(ECONTROL(p));
mode = (oecr >> 5) & 0x7;
if (mode == m)
return 0;
if (mode >= 2 && !(priv->ctr & 0x20)) {
/* This mode resets the FIFO, so we may
* have to wait for it to drain first. */
unsigned long expire = jiffies + p->physport->cad->timeout;
int counter;
switch (mode) {
case ECR_PPF: /* Parallel Port FIFO mode */
case ECR_ECP: /* ECP Parallel Port mode */
/* Busy wait for 200us */
for (counter = 0; counter < 40; counter++) {
if (inb(ECONTROL(p)) & 0x01)
break;
if (signal_pending(current))
break;
udelay(5);
}
/* Poll slowly. */
while (!(inb(ECONTROL(p)) & 0x01)) {
if (time_after_eq(jiffies, expire))
/* The FIFO is stuck. */
return -EBUSY;
schedule_timeout_interruptible(
msecs_to_jiffies(10));
if (signal_pending(current))
break;
}
}
}
if (mode >= 2 && m >= 2) {
/* We have to go through mode 001 */
oecr &= ~(7 << 5);
oecr |= ECR_PS2 << 5;
ECR_WRITE(p, oecr);
}
/* Set the mode. */
oecr &= ~(7 << 5);
oecr |= m << 5;
ECR_WRITE(p, oecr);
return 0;
}
#endif /* FIFO support */
/*
* Clear TIMEOUT BIT in EPP MODE
*
* This is also used in SPP detection.
*/
static int clear_epp_timeout(struct parport *pb)
{
unsigned char r;
if (!(parport_pc_read_status(pb) & 0x01))
return 1;
/* To clear timeout some chips require double read */
parport_pc_read_status(pb);
r = parport_pc_read_status(pb);
outb(r | 0x01, STATUS(pb)); /* Some reset by writing 1 */
outb(r & 0xfe, STATUS(pb)); /* Others by writing 0 */
r = parport_pc_read_status(pb);
return !(r & 0x01);
}
/*
* Access functions.
*
* Most of these aren't static because they may be used by the
* parport_xxx_yyy macros. extern __inline__ versions of several
* of these are in parport_pc.h.
*/
static void parport_pc_init_state(struct pardevice *dev,
struct parport_state *s)
{
s->u.pc.ctr = 0xc;
if (dev->irq_func &&
dev->port->irq != PARPORT_IRQ_NONE)
/* Set ackIntEn */
s->u.pc.ctr |= 0x10;
s->u.pc.ecr = 0x34; /* NetMos chip can cause problems 0x24;
* D.Gruszka VScom */
}
static void parport_pc_save_state(struct parport *p, struct parport_state *s)
{
const struct parport_pc_private *priv = p->physport->private_data;
s->u.pc.ctr = priv->ctr;
if (priv->ecr)
s->u.pc.ecr = inb(ECONTROL(p));
}
static void parport_pc_restore_state(struct parport *p,
struct parport_state *s)
{
struct parport_pc_private *priv = p->physport->private_data;
register unsigned char c = s->u.pc.ctr & priv->ctr_writable;
outb(c, CONTROL(p));
priv->ctr = c;
if (priv->ecr)
ECR_WRITE(p, s->u.pc.ecr);
}
#ifdef CONFIG_PARPORT_1284
static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
size_t length, int flags)
{
size_t got = 0;
if (flags & PARPORT_W91284PIC) {
unsigned char status;
size_t left = length;
/* use knowledge about data lines..:
* nFault is 0 if there is at least 1 byte in the Warp's FIFO
* pError is 1 if there are 16 bytes in the Warp's FIFO
*/
status = inb(STATUS(port));
while (!(status & 0x08) && got < length) {
if (left >= 16 && (status & 0x20) && !(status & 0x08)) {
/* can grab 16 bytes from warp fifo */
if (!((long)buf & 0x03))
insl(EPPDATA(port), buf, 4);
else
insb(EPPDATA(port), buf, 16);
buf += 16;
got += 16;
left -= 16;
} else {
/* grab single byte from the warp fifo */
*((char *)buf) = inb(EPPDATA(port));
buf++;
got++;
left--;
}
status = inb(STATUS(port));
if (status & 0x01) {
/* EPP timeout should never occur... */
printk(KERN_DEBUG "%s: EPP timeout occurred while talking to w91284pic (should not have done)\n",
port->name);
clear_epp_timeout(port);
}
}
return got;
}
if ((length > 1) && ((flags & PARPORT_EPP_FAST_32)
|| flags & PARPORT_EPP_FAST_16
|| flags & PARPORT_EPP_FAST_8)) {
if ((flags & PARPORT_EPP_FAST_32)
&& !(((long)buf | length) & 0x03))
insl(EPPDATA(port), buf, (length >> 2));
else if ((flags & PARPORT_EPP_FAST_16)
&& !(((long)buf | length) & 0x01))
insw(EPPDATA(port), buf, length >> 1);
else
insb(EPPDATA(port), buf, length);
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; got < length; got++) {
*((char *)buf) = inb(EPPDATA(port));
buf++;
if (inb(STATUS(port)) & 0x01) {
/* EPP timeout */
clear_epp_timeout(port);
break;
}
}
return got;
}
static size_t parport_pc_epp_write_data(struct parport *port, const void *buf,
size_t length, int flags)
{
size_t written = 0;
if ((length > 1) && ((flags & PARPORT_EPP_FAST_32)
|| flags & PARPORT_EPP_FAST_16
|| flags & PARPORT_EPP_FAST_8)) {
if ((flags & PARPORT_EPP_FAST_32)
&& !(((long)buf | length) & 0x03))
outsl(EPPDATA(port), buf, (length >> 2));
else if ((flags & PARPORT_EPP_FAST_16)
&& !(((long)buf | length) & 0x01))
outsw(EPPDATA(port), buf, length >> 1);
else
outsb(EPPDATA(port), buf, length);
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; written < length; written++) {
outb(*((char *)buf), EPPDATA(port));
buf++;
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
break;
}
}
return written;
}
static size_t parport_pc_epp_read_addr(struct parport *port, void *buf,
size_t length, int flags)
{
size_t got = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
insb(EPPADDR(port), buf, length);
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; got < length; got++) {
*((char *)buf) = inb(EPPADDR(port));
buf++;
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
break;
}
}
return got;
}
static size_t parport_pc_epp_write_addr(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
outsb(EPPADDR(port), buf, length);
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; written < length; written++) {
outb(*((char *)buf), EPPADDR(port));
buf++;
if (inb(STATUS(port)) & 0x01) {
clear_epp_timeout(port);
break;
}
}
return written;
}
static size_t parport_pc_ecpepp_read_data(struct parport *port, void *buf,
size_t length, int flags)
{
size_t got;
frob_set_mode(port, ECR_EPP);
parport_pc_data_reverse(port);
parport_pc_write_control(port, 0x4);
got = parport_pc_epp_read_data(port, buf, length, flags);
frob_set_mode(port, ECR_PS2);
return got;
}
static size_t parport_pc_ecpepp_write_data(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written;
frob_set_mode(port, ECR_EPP);
parport_pc_write_control(port, 0x4);
parport_pc_data_forward(port);
written = parport_pc_epp_write_data(port, buf, length, flags);
frob_set_mode(port, ECR_PS2);
return written;
}
static size_t parport_pc_ecpepp_read_addr(struct parport *port, void *buf,
size_t length, int flags)
{
size_t got;
frob_set_mode(port, ECR_EPP);
parport_pc_data_reverse(port);
parport_pc_write_control(port, 0x4);
got = parport_pc_epp_read_addr(port, buf, length, flags);
frob_set_mode(port, ECR_PS2);
return got;
}
static size_t parport_pc_ecpepp_write_addr(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written;
frob_set_mode(port, ECR_EPP);
parport_pc_write_control(port, 0x4);
parport_pc_data_forward(port);
written = parport_pc_epp_write_addr(port, buf, length, flags);
frob_set_mode(port, ECR_PS2);
return written;
}
#endif /* IEEE 1284 support */
#ifdef CONFIG_PARPORT_PC_FIFO
static size_t parport_pc_fifo_write_block_pio(struct parport *port,
const void *buf, size_t length)
{
int ret = 0;
const unsigned char *bufp = buf;
size_t left = length;
unsigned long expire = jiffies + port->physport->cad->timeout;
const unsigned long fifo = FIFO(port);
int poll_for = 8; /* 80 usecs */
const struct parport_pc_private *priv = port->physport->private_data;
const int fifo_depth = priv->fifo_depth;
port = port->physport;
/* We don't want to be interrupted every character. */
parport_pc_disable_irq(port);
/* set nErrIntrEn and serviceIntr */
frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
/* Forward mode. */
parport_pc_data_forward(port); /* Must be in PS2 mode */
while (left) {
unsigned char byte;
unsigned char ecrval = inb(ECONTROL(port));
int i = 0;
if (need_resched() && time_before(jiffies, expire))
/* Can't yield the port. */
schedule();
/* Anyone else waiting for the port? */
if (port->waithead) {
printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
if (ecrval & 0x02) {
/* FIFO is full. Wait for interrupt. */
/* Clear serviceIntr */
ECR_WRITE(port, ecrval & ~(1<<2));
false_alarm:
ret = parport_wait_event(port, HZ);
if (ret < 0)
break;
ret = 0;
if (!time_before(jiffies, expire)) {
/* Timed out. */
printk(KERN_DEBUG "FIFO write timed out\n");
break;
}
ecrval = inb(ECONTROL(port));
if (!(ecrval & (1<<2))) {
if (need_resched() &&
time_before(jiffies, expire))
schedule();
goto false_alarm;
}
continue;
}
/* Can't fail now. */
expire = jiffies + port->cad->timeout;
poll:
if (signal_pending(current))
break;
if (ecrval & 0x01) {
/* FIFO is empty. Blast it full. */
const int n = left < fifo_depth ? left : fifo_depth;
outsb(fifo, bufp, n);
bufp += n;
left -= n;
/* Adjust the poll time. */
if (i < (poll_for - 2))
poll_for--;
continue;
} else if (i++ < poll_for) {
udelay(10);
ecrval = inb(ECONTROL(port));
goto poll;
}
/* Half-full(call me an optimist) */
byte = *bufp++;
outb(byte, fifo);
left--;
}
dump_parport_state("leave fifo_write_block_pio", port);
return length - left;
}
#ifdef HAS_DMA
static size_t parport_pc_fifo_write_block_dma(struct parport *port,
const void *buf, size_t length)
{
int ret = 0;
unsigned long dmaflag;
size_t left = length;
const struct parport_pc_private *priv = port->physport->private_data;
struct device *dev = port->physport->dev;
dma_addr_t dma_addr, dma_handle;
size_t maxlen = 0x10000; /* max 64k per DMA transfer */
unsigned long start = (unsigned long) buf;
unsigned long end = (unsigned long) buf + length - 1;
dump_parport_state("enter fifo_write_block_dma", port);
if (end < MAX_DMA_ADDRESS) {
/* If it would cross a 64k boundary, cap it at the end. */
if ((start ^ end) & ~0xffffUL)
maxlen = 0x10000 - (start & 0xffff);
dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length,
DMA_TO_DEVICE);
} else {
/* above 16 MB we use a bounce buffer as ISA-DMA
is not possible */
maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */
dma_addr = priv->dma_handle;
dma_handle = 0;
}
port = port->physport;
/* We don't want to be interrupted every character. */
parport_pc_disable_irq(port);
/* set nErrIntrEn and serviceIntr */
frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
/* Forward mode. */
parport_pc_data_forward(port); /* Must be in PS2 mode */
while (left) {
unsigned long expire = jiffies + port->physport->cad->timeout;
size_t count = left;
if (count > maxlen)
count = maxlen;
if (!dma_handle) /* bounce buffer ! */
memcpy(priv->dma_buf, buf, count);
dmaflag = claim_dma_lock();
disable_dma(port->dma);
clear_dma_ff(port->dma);
set_dma_mode(port->dma, DMA_MODE_WRITE);
set_dma_addr(port->dma, dma_addr);
set_dma_count(port->dma, count);
/* Set DMA mode */
frob_econtrol(port, 1<<3, 1<<3);
/* Clear serviceIntr */
frob_econtrol(port, 1<<2, 0);
enable_dma(port->dma);
release_dma_lock(dmaflag);
/* assume DMA will be successful */
left -= count;
buf += count;
if (dma_handle)
dma_addr += count;
/* Wait for interrupt. */
false_alarm:
ret = parport_wait_event(port, HZ);
if (ret < 0)
break;
ret = 0;
if (!time_before(jiffies, expire)) {
/* Timed out. */
printk(KERN_DEBUG "DMA write timed out\n");
break;
}
/* Is serviceIntr set? */
if (!(inb(ECONTROL(port)) & (1<<2))) {
cond_resched();
goto false_alarm;
}
dmaflag = claim_dma_lock();
disable_dma(port->dma);
clear_dma_ff(port->dma);
count = get_dma_residue(port->dma);
release_dma_lock(dmaflag);
cond_resched(); /* Can't yield the port. */
/* Anyone else waiting for the port? */
if (port->waithead) {
printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
/* update for possible DMA residue ! */
buf -= count;
left += count;
if (dma_handle)
dma_addr -= count;
}
/* Maybe got here through break, so adjust for DMA residue! */
dmaflag = claim_dma_lock();
disable_dma(port->dma);
clear_dma_ff(port->dma);
left += get_dma_residue(port->dma);
release_dma_lock(dmaflag);
/* Turn off DMA mode */
frob_econtrol(port, 1<<3, 0);
if (dma_handle)
dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE);
dump_parport_state("leave fifo_write_block_dma", port);
return length - left;
}
#endif
static inline size_t parport_pc_fifo_write_block(struct parport *port,
const void *buf, size_t length)
{
#ifdef HAS_DMA
if (port->dma != PARPORT_DMA_NONE)
return parport_pc_fifo_write_block_dma(port, buf, length);
#endif
return parport_pc_fifo_write_block_pio(port, buf, length);
}
/* Parallel Port FIFO mode (ECP chipsets) */
static size_t parport_pc_compat_write_block_pio(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written;
int r;
unsigned long expire;
const struct parport_pc_private *priv = port->physport->private_data;
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
return parport_ieee1284_write_compat(port, buf,
length, flags);
/* Set up parallel port FIFO mode.*/
parport_pc_data_forward(port); /* Must be in PS2 mode */
parport_pc_frob_control(port, PARPORT_CONTROL_STROBE, 0);
r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
written = parport_pc_fifo_write_block(port, buf, length);
/* Finish up. */
/* For some hardware we don't want to touch the mode until
* the FIFO is empty, so allow 4 seconds for each position
* in the fifo.
*/
expire = jiffies + (priv->fifo_depth * HZ * 4);
do {
/* Wait for the FIFO to empty */
r = change_mode(port, ECR_PS2);
if (r != -EBUSY)
break;
} while (time_before(jiffies, expire));
if (r == -EBUSY) {
printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
/* Prevent further data transfer. */
frob_set_mode(port, ECR_TST);
/* Adjust for the contents of the FIFO. */
for (written -= priv->fifo_depth; ; written++) {
if (inb(ECONTROL(port)) & 0x2) {
/* Full up. */
break;
}
outb(0, FIFO(port));
}
/* Reset the FIFO and return to PS2 mode. */
frob_set_mode(port, ECR_PS2);
}
r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
printk(KERN_DEBUG "%s: BUSY timeout (%d) in compat_write_block_pio\n",
port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return written;
}
/* ECP */
#ifdef CONFIG_PARPORT_1284
static size_t parport_pc_ecp_write_block_pio(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written;
int r;
unsigned long expire;
const struct parport_pc_private *priv = port->physport->private_data;
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
return parport_ieee1284_ecp_write_data(port, buf,
length, flags);
/* Switch to forward mode if necessary. */
if (port->physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high. */
parport_frob_control(port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high. */
r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
printk(KERN_DEBUG "%s: PError timeout (%d) in ecp_write_block_pio\n",
port->name, r);
}
}
/* Set up ECP parallel port mode.*/
parport_pc_data_forward(port); /* Must be in PS2 mode */
parport_pc_frob_control(port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD,
0);
r = change_mode(port, ECR_ECP); /* ECP FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
written = parport_pc_fifo_write_block(port, buf, length);
/* Finish up. */
/* For some hardware we don't want to touch the mode until
* the FIFO is empty, so allow 4 seconds for each position
* in the fifo.
*/
expire = jiffies + (priv->fifo_depth * (HZ * 4));
do {
/* Wait for the FIFO to empty */
r = change_mode(port, ECR_PS2);
if (r != -EBUSY)
break;
} while (time_before(jiffies, expire));
if (r == -EBUSY) {
printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
/* Prevent further data transfer. */
frob_set_mode(port, ECR_TST);
/* Adjust for the contents of the FIFO. */
for (written -= priv->fifo_depth; ; written++) {
if (inb(ECONTROL(port)) & 0x2) {
/* Full up. */
break;
}
outb(0, FIFO(port));
}
/* Reset the FIFO and return to PS2 mode. */
frob_set_mode(port, ECR_PS2);
/* Host transfer recovery. */
parport_pc_data_reverse(port); /* Must be in PS2 mode */
udelay(5);
parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
if (r)
printk(KERN_DEBUG "%s: PE,1 timeout (%d) in ecp_write_block_pio\n",
port->name, r);
parport_frob_control(port,
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
printk(KERN_DEBUG "%s: PE,2 timeout (%d) in ecp_write_block_pio\n",
port->name, r);
}
r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
printk(KERN_DEBUG "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return written;
}
#endif /* IEEE 1284 support */
#endif /* Allowed to use FIFO/DMA */
/*
* ******************************************
* INITIALISATION AND MODULE STUFF BELOW HERE
* ******************************************
*/
/* GCC is not inlining extern inline function later overwritten to non-inline,
so we use outlined_ variants here. */
static const struct parport_operations parport_pc_ops = {
.write_data = parport_pc_write_data,
.read_data = parport_pc_read_data,
.write_control = parport_pc_write_control,
.read_control = parport_pc_read_control,
.frob_control = parport_pc_frob_control,
.read_status = parport_pc_read_status,
.enable_irq = parport_pc_enable_irq,
.disable_irq = parport_pc_disable_irq,
.data_forward = parport_pc_data_forward,
.data_reverse = parport_pc_data_reverse,
.init_state = parport_pc_init_state,
.save_state = parport_pc_save_state,
.restore_state = parport_pc_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
#ifdef CONFIG_PARPORT_PC_SUPERIO
static struct superio_struct *find_free_superio(void)
{
int i;
for (i = 0; i < NR_SUPERIOS; i++)
if (superios[i].io == 0)
return &superios[i];
return NULL;
}
/* Super-IO chipset detection, Winbond, SMSC */
static void show_parconfig_smsc37c669(int io, int key)
{
int cr1, cr4, cra, cr23, cr26, cr27;
struct superio_struct *s;
static const char *const modes[] = {
"SPP and Bidirectional (PS/2)",
"EPP and SPP",
"ECP",
"ECP and EPP" };
outb(key, io);
outb(key, io);
outb(1, io);
cr1 = inb(io + 1);
outb(4, io);
cr4 = inb(io + 1);
outb(0x0a, io);
cra = inb(io + 1);
outb(0x23, io);
cr23 = inb(io + 1);
outb(0x26, io);
cr26 = inb(io + 1);
outb(0x27, io);
cr27 = inb(io + 1);
outb(0xaa, io);
if (verbose_probing) {
pr_info("SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
cr1, cr4, cra, cr23, cr26, cr27);
/* The documentation calls DMA and IRQ-Lines by letters, so
the board maker can/will wire them
appropriately/randomly... G=reserved H=IDE-irq, */
pr_info("SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
cr23 * 4,
(cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
(cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
cra & 0x0f);
pr_info("SMSC LPT Config: enabled=%s power=%s\n",
(cr23 * 4 >= 0x100) ? "yes" : "no",
(cr1 & 4) ? "yes" : "no");
pr_info("SMSC LPT Config: Port mode=%s, EPP version =%s\n",
(cr1 & 0x08) ? "Standard mode only (SPP)"
: modes[cr4 & 0x03],
(cr4 & 0x40) ? "1.7" : "1.9");
}
/* Heuristics ! BIOS setup for this mainboard device limits
the choices to standard settings, i.e. io-address and IRQ
are related, however DMA can be 1 or 3, assume DMA_A=DMA1,
DMA_C=DMA3 (this is true e.g. for TYAN 1564D Tomcat IV) */
if (cr23 * 4 >= 0x100) { /* if active */
s = find_free_superio();
if (s == NULL)
pr_info("Super-IO: too many chips!\n");
else {
int d;
switch (cr23 * 4) {
case 0x3bc:
s->io = 0x3bc;
s->irq = 7;
break;
case 0x378:
s->io = 0x378;
s->irq = 7;
break;
case 0x278:
s->io = 0x278;
s->irq = 5;
}
d = (cr26 & 0x0f);
if (d == 1 || d == 3)
s->dma = d;
else
s->dma = PARPORT_DMA_NONE;
}
}
}
static void show_parconfig_winbond(int io, int key)
{
int cr30, cr60, cr61, cr70, cr74, crf0;
struct superio_struct *s;
static const char *const modes[] = {
"Standard (SPP) and Bidirectional(PS/2)", /* 0 */
"EPP-1.9 and SPP",
"ECP",
"ECP and EPP-1.9",
"Standard (SPP)",
"EPP-1.7 and SPP", /* 5 */
"undefined!",
"ECP and EPP-1.7" };
static char *const irqtypes[] = {
"pulsed low, high-Z",
"follows nACK" };
/* The registers are called compatible-PnP because the
register layout is modelled after ISA-PnP, the access
method is just another ... */
outb(key, io);
outb(key, io);
outb(0x07, io); /* Register 7: Select Logical Device */
outb(0x01, io + 1); /* LD1 is Parallel Port */
outb(0x30, io);
cr30 = inb(io + 1);
outb(0x60, io);
cr60 = inb(io + 1);
outb(0x61, io);
cr61 = inb(io + 1);
outb(0x70, io);
cr70 = inb(io + 1);
outb(0x74, io);
cr74 = inb(io + 1);
outb(0xf0, io);
crf0 = inb(io + 1);
outb(0xaa, io);
if (verbose_probing) {
pr_info("Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
cr30, cr60, cr61, cr70, cr74, crf0);
pr_info("Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
(cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
pr_cont("dma=none\n");
else
pr_cont("dma=%d\n", cr74 & 0x07);
pr_info("Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
irqtypes[crf0 >> 7], (crf0 >> 3) & 0x0f);
pr_info("Winbond LPT Config: Port mode=%s\n",
modes[crf0 & 0x07]);
}
if (cr30 & 0x01) { /* the settings can be interrogated later ... */
s = find_free_superio();
if (s == NULL)
pr_info("Super-IO: too many chips!\n");
else {
s->io = (cr60 << 8) | cr61;
s->irq = cr70 & 0x0f;
s->dma = (((cr74 & 0x07) > 3) ?
PARPORT_DMA_NONE : (cr74 & 0x07));
}
}
}
static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
{
const char *type = "unknown";
int id, progif = 2;
if (devid == devrev)
/* simple heuristics, we happened to read some
non-winbond register */
return;
id = (devid << 8) | devrev;
/* Values are from public data sheets pdf files, I can just
confirm 83977TF is correct :-) */
if (id == 0x9771)
type = "83977F/AF";
else if (id == 0x9773)
type = "83977TF / SMSC 97w33x/97w34x";
else if (id == 0x9774)
type = "83977ATF";
else if ((id & ~0x0f) == 0x5270)
type = "83977CTF / SMSC 97w36x";
else if ((id & ~0x0f) == 0x52f0)
type = "83977EF / SMSC 97w35x";
else if ((id & ~0x0f) == 0x5210)
type = "83627";
else if ((id & ~0x0f) == 0x6010)
type = "83697HF";
else if ((oldid & 0x0f) == 0x0a) {
type = "83877F";
progif = 1;
} else if ((oldid & 0x0f) == 0x0b) {
type = "83877AF";
progif = 1;
} else if ((oldid & 0x0f) == 0x0c) {
type = "83877TF";
progif = 1;
} else if ((oldid & 0x0f) == 0x0d) {
type = "83877ATF";
progif = 1;
} else
progif = 0;
if (verbose_probing)
pr_info("Winbond chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x oldid=%02x type=%s\n",
efer, key, devid, devrev, oldid, type);
if (progif == 2)
show_parconfig_winbond(efer, key);
}
static void decode_smsc(int efer, int key, int devid, int devrev)
{
const char *type = "unknown";
void (*func)(int io, int key);
int id;
if (devid == devrev)
/* simple heuristics, we happened to read some
non-smsc register */
return;
func = NULL;
id = (devid << 8) | devrev;
if (id == 0x0302) {
type = "37c669";
func = show_parconfig_smsc37c669;
} else if (id == 0x6582)
type = "37c665IR";
else if (devid == 0x65)
type = "37c665GT";
else if (devid == 0x66)
type = "37c666GT";
if (verbose_probing)
pr_info("SMSC chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x type=%s\n",
efer, key, devid, devrev, type);
if (func)
func(efer, key);
}
static void winbond_check(int io, int key)
{
int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
origval = inb(io); /* Save original value */
/* First probe without key */
outb(0x20, io);
x_devid = inb(io + 1);
outb(0x21, io);
x_devrev = inb(io + 1);
outb(0x09, io);
x_oldid = inb(io + 1);
outb(key, io);
outb(key, io); /* Write Magic Sequence to EFER, extended
function enable register */
outb(0x20, io); /* Write EFIR, extended function index register */
devid = inb(io + 1); /* Read EFDR, extended function data register */
outb(0x21, io);
devrev = inb(io + 1);
outb(0x09, io);
oldid = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
outb(origval, io); /* in case we poked some entirely different hardware */
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
decode_winbond(io, key, devid, devrev, oldid);
out:
release_region(io, 3);
}
static void winbond_check2(int io, int key)
{
int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
origval[0] = inb(io); /* Save original values */
origval[1] = inb(io + 1);
origval[2] = inb(io + 2);
/* First probe without the key */
outb(0x20, io + 2);
x_devid = inb(io + 2);
outb(0x21, io + 1);
x_devrev = inb(io + 2);
outb(0x09, io + 1);
x_oldid = inb(io + 2);
outb(key, io); /* Write Magic Byte to EFER, extended
function enable register */
outb(0x20, io + 2); /* Write EFIR, extended function index register */
devid = inb(io + 2); /* Read EFDR, extended function data register */
outb(0x21, io + 1);
devrev = inb(io + 2);
outb(0x09, io + 1);
oldid = inb(io + 2);
outb(0xaa, io); /* Magic Seal */
outb(origval[0], io); /* in case we poked some entirely different hardware */
outb(origval[1], io + 1);
outb(origval[2], io + 2);
if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
decode_winbond(io, key, devid, devrev, oldid);
out:
release_region(io, 3);
}
static void smsc_check(int io, int key)
{
int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
origval = inb(io); /* Save original value */
/* First probe without the key */
outb(0x0d, io);
x_oldid = inb(io + 1);
outb(0x0e, io);
x_oldrev = inb(io + 1);
outb(0x20, io);
x_id = inb(io + 1);
outb(0x21, io);
x_rev = inb(io + 1);
outb(key, io);
outb(key, io); /* Write Magic Sequence to EFER, extended
function enable register */
outb(0x0d, io); /* Write EFIR, extended function index register */
oldid = inb(io + 1); /* Read EFDR, extended function data register */
outb(0x0e, io);
oldrev = inb(io + 1);
outb(0x20, io);
id = inb(io + 1);
outb(0x21, io);
rev = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
outb(origval, io); /* in case we poked some entirely different hardware */
if (x_id == id && x_oldrev == oldrev &&
x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
decode_smsc(io, key, oldid, oldrev);
out:
release_region(io, 3);
}
static void detect_and_report_winbond(void)
{
if (verbose_probing)
printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n");
winbond_check(0x3f0, 0x87);
winbond_check(0x370, 0x87);
winbond_check(0x2e , 0x87);
winbond_check(0x4e , 0x87);
winbond_check(0x3f0, 0x86);
winbond_check2(0x250, 0x88);
winbond_check2(0x250, 0x89);
}
static void detect_and_report_smsc(void)
{
if (verbose_probing)
printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n");
smsc_check(0x3f0, 0x55);
smsc_check(0x370, 0x55);
smsc_check(0x3f0, 0x44);
smsc_check(0x370, 0x44);
}
static void detect_and_report_it87(void)
{
u16 dev;
u8 origval, r;
if (verbose_probing)
printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
if (!request_muxed_region(0x2e, 2, __func__))
return;
origval = inb(0x2e); /* Save original value */
outb(0x87, 0x2e);
outb(0x01, 0x2e);
outb(0x55, 0x2e);
outb(0x55, 0x2e);
outb(0x20, 0x2e);
dev = inb(0x2f) << 8;
outb(0x21, 0x2e);
dev |= inb(0x2f);
if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
pr_info("IT%04X SuperIO detected\n", dev);
outb(0x07, 0x2E); /* Parallel Port */
outb(0x03, 0x2F);
outb(0xF0, 0x2E); /* BOOT 0x80 off */
r = inb(0x2f);
outb(0xF0, 0x2E);
outb(r | 8, 0x2F);
outb(0x02, 0x2E); /* Lock */
outb(0x02, 0x2F);
} else {
outb(origval, 0x2e); /* Oops, sorry to disturb */
}
release_region(0x2e, 2);
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
static struct superio_struct *find_superio(struct parport *p)
{
int i;
for (i = 0; i < NR_SUPERIOS; i++)
if (superios[i].io == p->base)
return &superios[i];
return NULL;
}
static int get_superio_dma(struct parport *p)
{
struct superio_struct *s = find_superio(p);
if (s)
return s->dma;
return PARPORT_DMA_NONE;
}
static int get_superio_irq(struct parport *p)
{
struct superio_struct *s = find_superio(p);
if (s)
return s->irq;
return PARPORT_IRQ_NONE;
}
/* --- Mode detection ------------------------------------- */
/*
* Checks for port existence, all ports support SPP MODE
* Returns:
* 0 : No parallel port at this address
* PARPORT_MODE_PCSPP : SPP port detected
* (if the user specified an ioport himself,
* this shall always be the case!)
*
*/
static int parport_SPP_supported(struct parport *pb)
{
unsigned char r, w;
/*
* first clear an eventually pending EPP timeout
* I ([email protected]) have an SMSC chipset
* that does not even respond to SPP cycles if an EPP
* timeout is pending
*/
clear_epp_timeout(pb);
/* Do a simple read-write test to make sure the port exists. */
w = 0xc;
outb(w, CONTROL(pb));
/* Is there a control register that we can read from? Some
* ports don't allow reads, so read_control just returns a
* software copy. Some ports _do_ allow reads, so bypass the
* software copy here. In addition, some bits aren't
* writable. */
r = inb(CONTROL(pb));
if ((r & 0xf) == w) {
w = 0xe;
outb(w, CONTROL(pb));
r = inb(CONTROL(pb));
outb(0xc, CONTROL(pb));
if ((r & 0xf) == w)
return PARPORT_MODE_PCSPP;
}
if (user_specified)
/* That didn't work, but the user thinks there's a
* port here. */
pr_info("parport 0x%lx (WARNING): CTR: wrote 0x%02x, read 0x%02x\n",
pb->base, w, r);
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
w = 0xaa;
parport_pc_write_data(pb, w);
r = parport_pc_read_data(pb);
if (r == w) {
w = 0x55;
parport_pc_write_data(pb, w);
r = parport_pc_read_data(pb);
if (r == w)
return PARPORT_MODE_PCSPP;
}
if (user_specified) {
/* Didn't work, but the user is convinced this is the
* place. */
pr_info("parport 0x%lx (WARNING): DATA: wrote 0x%02x, read 0x%02x\n",
pb->base, w, r);
pr_info("parport 0x%lx: You gave this address, but there is probably no parallel port there!\n",
pb->base);
}
/* It's possible that we can't read the control register or
* the data register. In that case just believe the user. */
if (user_specified)
return PARPORT_MODE_PCSPP;
return 0;
}
/* Check for ECR
*
* Old style XT ports alias io ports every 0x400, hence accessing ECR
* on these cards actually accesses the CTR.
*
* Modern cards don't do this but reading from ECR will return 0xff
* regardless of what is written here if the card does NOT support
* ECP.
*
* We first check to see if ECR is the same as CTR. If not, the low
* two bits of ECR aren't writable, so we check by writing ECR and
* reading it back to see if it's what we expect.
*/
static int parport_ECR_present(struct parport *pb)
{
struct parport_pc_private *priv = pb->private_data;
unsigned char r = 0xc;
if (!priv->ecr_writable) {
outb(r, CONTROL(pb));
if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
r = inb(CONTROL(pb));
if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
/* Sure that no ECR register exists */
goto no_reg;
}
if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
goto no_reg;
ECR_WRITE(pb, 0x34);
if (inb(ECONTROL(pb)) != 0x35)
goto no_reg;
}
priv->ecr = 1;
outb(0xc, CONTROL(pb));
/* Go to mode 000 */
frob_set_mode(pb, ECR_SPP);
return 1;
no_reg:
outb(0xc, CONTROL(pb));
return 0;
}
#ifdef CONFIG_PARPORT_1284
/* Detect PS/2 support.
*
* Bit 5 (0x20) sets the PS/2 data direction; setting this high
* allows us to read data from the data lines. In theory we would get back
* 0xff but any peripheral attached to the port may drag some or all of the
* lines down to zero. So if we get back anything that isn't the contents
* of the data register we deem PS/2 support to be present.
*
* Some SPP ports have "half PS/2" ability - you can't turn off the line
* drivers, but an external peripheral with sufficiently beefy drivers of
* its own can overpower them and assert its own levels onto the bus, from
* where they can then be read back as normal. Ports with this property
* and the right type of device attached are likely to fail the SPP test,
* (as they will appear to have stuck bits) and so the fact that they might
* be misdetected here is rather academic.
*/
static int parport_PS2_supported(struct parport *pb)
{
int ok = 0;
clear_epp_timeout(pb);
/* try to tri-state the buffer */
parport_pc_data_reverse(pb);
parport_pc_write_data(pb, 0x55);
if (parport_pc_read_data(pb) != 0x55)
ok++;
parport_pc_write_data(pb, 0xaa);
if (parport_pc_read_data(pb) != 0xaa)
ok++;
/* cancel input mode */
parport_pc_data_forward(pb);
if (ok) {
pb->modes |= PARPORT_MODE_TRISTATE;
} else {
struct parport_pc_private *priv = pb->private_data;
priv->ctr_writable &= ~0x20;
}
return ok;
}
#ifdef CONFIG_PARPORT_PC_FIFO
static int parport_ECP_supported(struct parport *pb)
{
int i;
int config, configb;
int pword;
struct parport_pc_private *priv = pb->private_data;
/* Translate ECP intrLine to ISA irq value */
static const int intrline[] = { 0, 7, 9, 10, 11, 14, 15, 5 };
/* If there is no ECR, we have no hope of supporting ECP. */
if (!priv->ecr)
return 0;
/* Find out FIFO depth */
ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
ECR_WRITE(pb, ECR_TST << 5); /* TEST FIFO */
for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02); i++)
outb(0xaa, FIFO(pb));
/*
* Using LGS chipset it uses ECR register, but
* it doesn't support ECP or FIFO MODE
*/
if (i == 1024) {
ECR_WRITE(pb, ECR_SPP << 5);
return 0;
}
priv->fifo_depth = i;
if (verbose_probing)
printk(KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
/* Find out writeIntrThreshold */
frob_econtrol(pb, 1<<2, 1<<2);
frob_econtrol(pb, 1<<2, 0);
for (i = 1; i <= priv->fifo_depth; i++) {
inb(FIFO(pb));
udelay(50);
if (inb(ECONTROL(pb)) & (1<<2))
break;
}
if (i <= priv->fifo_depth) {
if (verbose_probing)
printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we know we can write if we get an
interrupt. */
i = 0;
priv->writeIntrThreshold = i;
/* Find out readIntrThreshold */
frob_set_mode(pb, ECR_PS2); /* Reset FIFO and enable PS2 */
parport_pc_data_reverse(pb); /* Must be in PS2 mode */
frob_set_mode(pb, ECR_TST); /* Test FIFO */
frob_econtrol(pb, 1<<2, 1<<2);
frob_econtrol(pb, 1<<2, 0);
for (i = 1; i <= priv->fifo_depth; i++) {
outb(0xaa, FIFO(pb));
if (inb(ECONTROL(pb)) & (1<<2))
break;
}
if (i <= priv->fifo_depth) {
if (verbose_probing)
pr_info("0x%lx: readIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we can read if we get an interrupt. */
i = 0;
priv->readIntrThreshold = i;
ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
ECR_WRITE(pb, 0xf4); /* Configuration mode */
config = inb(CONFIGA(pb));
pword = (config >> 4) & 0x7;
switch (pword) {
case 0:
pword = 2;
pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
case 2:
pword = 4;
pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
default:
pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
fallthrough; /* Assume 1 */
case 1:
pword = 1;
}
priv->pword = pword;
if (verbose_probing) {
printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
pb->base, 8 * pword);
printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n",
pb->base, config & 0x80 ? "Level" : "Pulses");
configb = inb(CONFIGB(pb));
printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
pb->base, config, configb);
printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
if ((configb >> 3) & 0x07)
pr_cont("%d", intrline[(configb >> 3) & 0x07]);
else
pr_cont("<none or set by other means>");
pr_cont(" dma=");
if ((configb & 0x03) == 0x00)
pr_cont("<none or set by other means>\n");
else
pr_cont("%d\n", configb & 0x07);
}
/* Go back to mode 000 */
frob_set_mode(pb, ECR_SPP);
return 1;
}
#endif
#ifdef CONFIG_X86_32
static int intel_bug_present_check_epp(struct parport *pb)
{
const struct parport_pc_private *priv = pb->private_data;
int bug_present = 0;
if (priv->ecr) {
/* store value of ECR */
unsigned char ecr = inb(ECONTROL(pb));
unsigned char i;
for (i = 0x00; i < 0x80; i += 0x20) {
ECR_WRITE(pb, i);
if (clear_epp_timeout(pb)) {
/* Phony EPP in ECP. */
bug_present = 1;
break;
}
}
/* return ECR into the inital state */
ECR_WRITE(pb, ecr);
}
return bug_present;
}
static int intel_bug_present(struct parport *pb)
{
/* Check whether the device is legacy, not PCI or PCMCIA. Only legacy is known to be affected. */
if (pb->dev != NULL) {
return 0;
}
return intel_bug_present_check_epp(pb);
}
#else
static int intel_bug_present(struct parport *pb)
{
return 0;
}
#endif /* CONFIG_X86_32 */
static int parport_ECPPS2_supported(struct parport *pb)
{
const struct parport_pc_private *priv = pb->private_data;
int result;
unsigned char oecr;
if (!priv->ecr)
return 0;
oecr = inb(ECONTROL(pb));
ECR_WRITE(pb, ECR_PS2 << 5);
result = parport_PS2_supported(pb);
ECR_WRITE(pb, oecr);
return result;
}
/* EPP mode detection */
static int parport_EPP_supported(struct parport *pb)
{
/*
* Theory:
* Bit 0 of STR is the EPP timeout bit, this bit is 0
* when EPP is possible and is set high when an EPP timeout
* occurs (EPP uses the HALT line to stop the CPU while it does
* the byte transfer, an EPP timeout occurs if the attached
* device fails to respond after 10 micro seconds).
*
* This bit is cleared by either reading it (National Semi)
* or writing a 1 to the bit (SMC, UMC, WinBond), others ???
* This bit is always high in non EPP modes.
*/
/* If EPP timeout bit clear then EPP available */
if (!clear_epp_timeout(pb))
return 0; /* No way to clear timeout */
/* Check for Intel bug. */
if (intel_bug_present(pb))
return 0;
pb->modes |= PARPORT_MODE_EPP;
/* Set up access functions to use EPP hardware. */
pb->ops->epp_read_data = parport_pc_epp_read_data;
pb->ops->epp_write_data = parport_pc_epp_write_data;
pb->ops->epp_read_addr = parport_pc_epp_read_addr;
pb->ops->epp_write_addr = parport_pc_epp_write_addr;
return 1;
}
static int parport_ECPEPP_supported(struct parport *pb)
{
struct parport_pc_private *priv = pb->private_data;
int result;
unsigned char oecr;
if (!priv->ecr)
return 0;
oecr = inb(ECONTROL(pb));
/* Search for SMC style EPP+ECP mode */
ECR_WRITE(pb, 0x80);
outb(0x04, CONTROL(pb));
result = parport_EPP_supported(pb);
ECR_WRITE(pb, oecr);
if (result) {
/* Set up access functions to use ECP+EPP hardware. */
pb->ops->epp_read_data = parport_pc_ecpepp_read_data;
pb->ops->epp_write_data = parport_pc_ecpepp_write_data;
pb->ops->epp_read_addr = parport_pc_ecpepp_read_addr;
pb->ops->epp_write_addr = parport_pc_ecpepp_write_addr;
}
return result;
}
#else /* No IEEE 1284 support */
/* Don't bother probing for modes we know we won't use. */
static int parport_PS2_supported(struct parport *pb) { return 0; }
#ifdef CONFIG_PARPORT_PC_FIFO
static int parport_ECP_supported(struct parport *pb)
{
return 0;
}
#endif
static int parport_EPP_supported(struct parport *pb)
{
return 0;
}
static int parport_ECPEPP_supported(struct parport *pb)
{
return 0;
}
static int parport_ECPPS2_supported(struct parport *pb)
{
return 0;
}
#endif /* No IEEE 1284 support */
/* --- IRQ detection -------------------------------------- */
/* Only if supports ECP mode */
static int programmable_irq_support(struct parport *pb)
{
int irq, intrLine;
unsigned char oecr = inb(ECONTROL(pb));
static const int lookup[8] = {
PARPORT_IRQ_NONE, 7, 9, 10, 11, 14, 15, 5
};
ECR_WRITE(pb, ECR_CNF << 5); /* Configuration MODE */
intrLine = (inb(CONFIGB(pb)) >> 3) & 0x07;
irq = lookup[intrLine];
ECR_WRITE(pb, oecr);
return irq;
}
static int irq_probe_ECP(struct parport *pb)
{
int i;
unsigned long irqs;
irqs = probe_irq_on();
ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
ECR_WRITE(pb, (ECR_TST << 5) | 0x04);
ECR_WRITE(pb, ECR_TST << 5);
/* If Full FIFO sure that writeIntrThreshold is generated */
for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02) ; i++)
outb(0xaa, FIFO(pb));
pb->irq = probe_irq_off(irqs);
ECR_WRITE(pb, ECR_SPP << 5);
if (pb->irq <= 0)
pb->irq = PARPORT_IRQ_NONE;
return pb->irq;
}
/*
* This detection seems that only works in National Semiconductors
* This doesn't work in SMC, LGS, and Winbond
*/
static int irq_probe_EPP(struct parport *pb)
{
#ifndef ADVANCED_DETECT
return PARPORT_IRQ_NONE;
#else
int irqs;
unsigned char oecr;
if (pb->modes & PARPORT_MODE_PCECR)
oecr = inb(ECONTROL(pb));
irqs = probe_irq_on();
if (pb->modes & PARPORT_MODE_PCECR)
frob_econtrol(pb, 0x10, 0x10);
clear_epp_timeout(pb);
parport_pc_frob_control(pb, 0x20, 0x20);
parport_pc_frob_control(pb, 0x10, 0x10);
clear_epp_timeout(pb);
/* Device isn't expecting an EPP read
* and generates an IRQ.
*/
parport_pc_read_epp(pb);
udelay(20);
pb->irq = probe_irq_off(irqs);
if (pb->modes & PARPORT_MODE_PCECR)
ECR_WRITE(pb, oecr);
parport_pc_write_control(pb, 0xc);
if (pb->irq <= 0)
pb->irq = PARPORT_IRQ_NONE;
return pb->irq;
#endif /* Advanced detection */
}
static int irq_probe_SPP(struct parport *pb)
{
/* Don't even try to do this. */
return PARPORT_IRQ_NONE;
}
/* We will attempt to share interrupt requests since other devices
* such as sound cards and network cards seem to like using the
* printer IRQs.
*
* When ECP is available we can autoprobe for IRQs.
* NOTE: If we can autoprobe it, we can register the IRQ.
*/
static int parport_irq_probe(struct parport *pb)
{
struct parport_pc_private *priv = pb->private_data;
if (priv->ecr) {
pb->irq = programmable_irq_support(pb);
if (pb->irq == PARPORT_IRQ_NONE)
pb->irq = irq_probe_ECP(pb);
}
if ((pb->irq == PARPORT_IRQ_NONE) && priv->ecr &&
(pb->modes & PARPORT_MODE_EPP))
pb->irq = irq_probe_EPP(pb);
clear_epp_timeout(pb);
if (pb->irq == PARPORT_IRQ_NONE && (pb->modes & PARPORT_MODE_EPP))
pb->irq = irq_probe_EPP(pb);
clear_epp_timeout(pb);
if (pb->irq == PARPORT_IRQ_NONE)
pb->irq = irq_probe_SPP(pb);
if (pb->irq == PARPORT_IRQ_NONE)
pb->irq = get_superio_irq(pb);
return pb->irq;
}
/* --- DMA detection -------------------------------------- */
/* Only if chipset conforms to ECP ISA Interface Standard */
static int programmable_dma_support(struct parport *p)
{
unsigned char oecr = inb(ECONTROL(p));
int dma;
frob_set_mode(p, ECR_CNF);
dma = inb(CONFIGB(p)) & 0x07;
/* 000: Indicates jumpered 8-bit DMA if read-only.
100: Indicates jumpered 16-bit DMA if read-only. */
if ((dma & 0x03) == 0)
dma = PARPORT_DMA_NONE;
ECR_WRITE(p, oecr);
return dma;
}
static int parport_dma_probe(struct parport *p)
{
const struct parport_pc_private *priv = p->private_data;
if (priv->ecr) /* ask ECP chipset first */
p->dma = programmable_dma_support(p);
if (p->dma == PARPORT_DMA_NONE) {
/* ask known Super-IO chips proper, although these
claim ECP compatible, some don't report their DMA
conforming to ECP standards */
p->dma = get_superio_dma(p);
}
return p->dma;
}
/* --- Initialisation code -------------------------------- */
static LIST_HEAD(ports_list);
static DEFINE_SPINLOCK(ports_lock);
static struct parport *__parport_pc_probe_port(unsigned long int base,
unsigned long int base_hi,
int irq, int dma,
struct device *dev,
int irqflags,
unsigned int mode_mask,
unsigned char ecr_writable)
{
struct parport_pc_private *priv;
struct parport_operations *ops;
struct parport *p;
int probedirq = PARPORT_IRQ_NONE;
struct resource *base_res;
struct resource *ECR_res = NULL;
struct resource *EPP_res = NULL;
struct platform_device *pdev = NULL;
int ret;
if (!dev) {
/* We need a physical device to attach to, but none was
* provided. Create our own. */
pdev = platform_device_register_simple("parport_pc",
base, NULL, 0);
if (IS_ERR(pdev))
return NULL;
dev = &pdev->dev;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
if (ret) {
dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
dma = PARPORT_DMA_NONE;
}
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
if (!ops)
goto out1;
priv = kmalloc(sizeof(struct parport_pc_private), GFP_KERNEL);
if (!priv)
goto out2;
/* a misnomer, actually - it's allocate and reserve parport number */
p = parport_register_port(base, irq, dma, ops);
if (!p)
goto out3;
base_res = request_region(base, 3, p->name);
if (!base_res)
goto out4;
memcpy(ops, &parport_pc_ops, sizeof(struct parport_operations));
priv->ctr = 0xc;
priv->ctr_writable = ~0x10;
priv->ecr = 0;
priv->ecr_writable = ecr_writable;
priv->fifo_depth = 0;
priv->dma_buf = NULL;
priv->dma_handle = 0;
INIT_LIST_HEAD(&priv->list);
priv->port = p;
p->dev = dev;
p->base_hi = base_hi;
p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
p->private_data = priv;
if (base_hi) {
ECR_res = request_region(base_hi, 3, p->name);
if (ECR_res)
parport_ECR_present(p);
}
if (base != 0x3bc) {
EPP_res = request_region(base+0x3, 5, p->name);
if (EPP_res)
if (!parport_EPP_supported(p))
parport_ECPEPP_supported(p);
}
if (!parport_SPP_supported(p))
/* No port. */
goto out5;
if (priv->ecr)
parport_ECPPS2_supported(p);
else
parport_PS2_supported(p);
p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
pr_info("%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && priv->ecr)
pr_cont(" (0x%lx)", p->base_hi);
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
parport_irq_probe(p);
} else if (p->irq == PARPORT_IRQ_PROBEONLY) {
p->irq = PARPORT_IRQ_NONE;
parport_irq_probe(p);
probedirq = p->irq;
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
pr_cont(", irq %d", p->irq);
priv->ctr_writable |= 0x10;
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
parport_dma_probe(p);
}
}
if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
#ifdef CONFIG_PARPORT_PC_FIFO
if (parport_ECP_supported(p) &&
p->dma != PARPORT_DMA_NOFIFO &&
priv->fifo_depth > 0 && p->irq != PARPORT_IRQ_NONE) {
p->modes |= PARPORT_MODE_ECP | PARPORT_MODE_COMPAT;
if (p->dma != PARPORT_DMA_NONE)
p->modes |= PARPORT_MODE_DMA;
} else
/* We can't use the DMA channel after all. */
p->dma = PARPORT_DMA_NONE;
#endif /* Allowed to use FIFO/DMA */
p->modes &= ~mode_mask;
#ifdef CONFIG_PARPORT_PC_FIFO
if ((p->modes & PARPORT_MODE_COMPAT) != 0)
p->ops->compat_write_data = parport_pc_compat_write_block_pio;
#ifdef CONFIG_PARPORT_1284
if ((p->modes & PARPORT_MODE_ECP) != 0)
p->ops->ecp_write_data = parport_pc_ecp_write_block_pio;
#endif
if ((p->modes & (PARPORT_MODE_ECP | PARPORT_MODE_COMPAT)) != 0) {
if ((p->modes & PARPORT_MODE_DMA) != 0)
pr_cont(", dma %d", p->dma);
else
pr_cont(", using FIFO");
}
#endif /* Allowed to use FIFO/DMA */
pr_cont(" [");
#define printmode(x) \
do { \
if (p->modes & PARPORT_MODE_##x) \
pr_cont("%s%s", f++ ? "," : "", #x); \
} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
printmode(COMPAT);
printmode(EPP);
printmode(ECP);
printmode(DMA);
}
#undef printmode
#ifndef CONFIG_PARPORT_1284
pr_cont("(,...)");
#endif /* CONFIG_PARPORT_1284 */
pr_cont("]\n");
if (probedirq != PARPORT_IRQ_NONE)
pr_info("%s: irq %d detected\n", p->name, probedirq);
/* If No ECP release the ports grabbed above. */
if (ECR_res && (p->modes & PARPORT_MODE_ECP) == 0) {
release_region(base_hi, 3);
ECR_res = NULL;
}
/* Likewise for EEP ports */
if (EPP_res && (p->modes & PARPORT_MODE_EPP) == 0) {
release_region(base+3, 5);
EPP_res = NULL;
}
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq(p->irq, parport_irq_handler,
irqflags, p->name, p)) {
pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
}
#ifdef CONFIG_PARPORT_PC_FIFO
#ifdef HAS_DMA
if (p->dma != PARPORT_DMA_NONE) {
if (request_dma(p->dma, p->name)) {
pr_warn("%s: dma %d in use, resorting to PIO operation\n",
p->name, p->dma);
p->dma = PARPORT_DMA_NONE;
} else {
priv->dma_buf =
dma_alloc_coherent(dev,
PAGE_SIZE,
&priv->dma_handle,
GFP_KERNEL);
if (!priv->dma_buf) {
pr_warn("%s: cannot get buffer for DMA, resorting to PIO operation\n",
p->name);
free_dma(p->dma);
p->dma = PARPORT_DMA_NONE;
}
}
}
#endif
#endif
}
/* Done probing. Now put the port into a sensible start-up state. */
if (priv->ecr)
/*
* Put the ECP detected port in PS2 mode.
* Do this also for ports that have ECR but don't do ECP.
*/
ECR_WRITE(p, 0x34);
parport_pc_write_data(p, 0);
parport_pc_data_forward(p);
/* Now that we've told the sharing engine about the port, and
found out its characteristics, let the high-level drivers
know about it. */
spin_lock(&ports_lock);
list_add(&priv->list, &ports_list);
spin_unlock(&ports_lock);
parport_announce_port(p);
return p;
out5:
if (ECR_res)
release_region(base_hi, 3);
if (EPP_res)
release_region(base+0x3, 5);
release_region(base, 3);
out4:
parport_del_port(p);
out3:
kfree(priv);
out2:
kfree(ops);
out1:
if (pdev)
platform_device_unregister(pdev);
return NULL;
}
struct parport *parport_pc_probe_port(unsigned long int base,
unsigned long int base_hi,
int irq, int dma,
struct device *dev,
int irqflags)
{
return __parport_pc_probe_port(base, base_hi, irq, dma,
dev, irqflags, 0, 0);
}
EXPORT_SYMBOL(parport_pc_probe_port);
void parport_pc_unregister_port(struct parport *p)
{
struct parport_pc_private *priv = p->private_data;
struct parport_operations *ops = p->ops;
parport_remove_port(p);
spin_lock(&ports_lock);
list_del_init(&priv->list);
spin_unlock(&ports_lock);
#if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA)
if (p->dma != PARPORT_DMA_NONE)
free_dma(p->dma);
#endif
if (p->irq != PARPORT_IRQ_NONE)
free_irq(p->irq, p);
release_region(p->base, 3);
if (p->size > 3)
release_region(p->base + 3, p->size - 3);
if (p->modes & PARPORT_MODE_ECP)
release_region(p->base_hi, 3);
#if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA)
if (priv->dma_buf)
dma_free_coherent(p->physport->dev, PAGE_SIZE,
priv->dma_buf,
priv->dma_handle);
#endif
kfree(p->private_data);
parport_del_port(p);
kfree(ops); /* hope no-one cached it */
}
EXPORT_SYMBOL(parport_pc_unregister_port);
#ifdef CONFIG_PCI
/* ITE support maintained by Rich Liu <[email protected]> */
static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
const struct parport_pc_via_data *via)
{
short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
u32 ite8872set;
u32 ite8872_lpt, ite8872_lpthi;
u8 ite8872_irq, type;
int irq;
int i;
pr_debug("sio_ite_8872_probe()\n");
/* make sure which one chip */
for (i = 0; i < 5; i++) {
if (request_region(inta_addr[i], 32, "it887x")) {
int test;
pci_write_config_dword(pdev, 0x60,
0xe5000000 | inta_addr[i]);
pci_write_config_dword(pdev, 0x78,
0x00000000 | inta_addr[i]);
test = inb(inta_addr[i]);
if (test != 0xff)
break;
release_region(inta_addr[i], 32);
}
}
if (i >= 5) {
pr_info("parport_pc: cannot find ITE8872 INTA\n");
return 0;
}
type = inb(inta_addr[i] + 0x18);
type &= 0x0f;
switch (type) {
case 0x2:
pr_info("parport_pc: ITE8871 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xa:
pr_info("parport_pc: ITE8875 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xe:
pr_info("parport_pc: ITE8872 found (2S1P)\n");
ite8872set = 0x64e00000;
break;
case 0x6:
pr_info("parport_pc: ITE8873 found (1S)\n");
release_region(inta_addr[i], 32);
return 0;
case 0x8:
pr_info("parport_pc: ITE8874 found (2S)\n");
release_region(inta_addr[i], 32);
return 0;
default:
pr_info("parport_pc: unknown ITE887x\n");
pr_info("parport_pc: please mail 'lspci -nvv' output to [email protected]\n");
release_region(inta_addr[i], 32);
return 0;
}
pci_read_config_byte(pdev, 0x3c, &ite8872_irq);
pci_read_config_dword(pdev, 0x1c, &ite8872_lpt);
ite8872_lpt &= 0x0000ff00;
pci_read_config_dword(pdev, 0x20, &ite8872_lpthi);
ite8872_lpthi &= 0x0000ff00;
pci_write_config_dword(pdev, 0x6c, 0xe3000000 | ite8872_lpt);
pci_write_config_dword(pdev, 0x70, 0xe3000000 | ite8872_lpthi);
pci_write_config_dword(pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
/* SET SPP&EPP , Parallel Port NO DMA , Enable All Function */
/* SET Parallel IRQ */
pci_write_config_dword(pdev, 0x9c,
ite8872set | (ite8872_irq * 0x11111));
pr_debug("ITE887x: The IRQ is %d\n", ite8872_irq);
pr_debug("ITE887x: The PARALLEL I/O port is 0x%x\n", ite8872_lpt);
pr_debug("ITE887x: The PARALLEL I/O porthi is 0x%x\n", ite8872_lpthi);
/* Let the user (or defaults) steer us away from interrupts */
irq = ite8872_irq;
if (autoirq != PARPORT_IRQ_AUTO)
irq = PARPORT_IRQ_NONE;
/*
* Release the resource so that parport_pc_probe_port can get it.
*/
release_region(inta_addr[i], 32);
if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
pr_info("parport_pc: ITE 8872 parallel port: io=0x%X",
ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
pr_cont("\n");
return 1;
}
return 0;
}
/* VIA 8231 support by Pavel Fedin <[email protected]>
based on VIA 686a support code by Jeff Garzik <[email protected]> */
static int parport_init_mode;
/* Data for two known VIA chips */
static struct parport_pc_via_data via_686a_data = {
0x51,
0x50,
0x85,
0x02,
0xE2,
0xF0,
0xE6
};
static struct parport_pc_via_data via_8231_data = {
0x45,
0x44,
0x50,
0x04,
0xF2,
0xFA,
0xF6
};
static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
const struct parport_pc_via_data *via)
{
u8 tmp, tmp2, siofunc;
u8 ppcontrol = 0;
int dma, irq;
unsigned port1, port2;
unsigned have_epp = 0;
printk(KERN_DEBUG "parport_pc: VIA 686A/8231 detected\n");
switch (parport_init_mode) {
case 1:
printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
siofunc = VIA_FUNCTION_PARPORT_SPP;
break;
case 2:
printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
siofunc = VIA_FUNCTION_PARPORT_SPP;
ppcontrol = VIA_PARPORT_BIDIR;
break;
case 3:
printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
siofunc = VIA_FUNCTION_PARPORT_EPP;
ppcontrol = VIA_PARPORT_BIDIR;
have_epp = 1;
break;
case 4:
printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
siofunc = VIA_FUNCTION_PARPORT_ECP;
ppcontrol = VIA_PARPORT_BIDIR;
break;
case 5:
printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
siofunc = VIA_FUNCTION_PARPORT_ECP;
ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
have_epp = 1;
break;
default:
printk(KERN_DEBUG "parport_pc: probing current configuration\n");
siofunc = VIA_FUNCTION_PROBE;
break;
}
/*
* unlock super i/o configuration
*/
pci_read_config_byte(pdev, via->via_pci_superio_config_reg, &tmp);
tmp |= via->via_pci_superio_config_data;
pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
/* Bits 1-0: Parallel Port Mode / Enable */
outb(via->viacfg_function, VIA_CONFIG_INDEX);
tmp = inb(VIA_CONFIG_DATA);
/* Bit 5: EPP+ECP enable; bit 7: PS/2 bidirectional port enable */
outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
tmp2 = inb(VIA_CONFIG_DATA);
if (siofunc == VIA_FUNCTION_PROBE) {
siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
ppcontrol = tmp2;
} else {
tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
tmp |= siofunc;
outb(via->viacfg_function, VIA_CONFIG_INDEX);
outb(tmp, VIA_CONFIG_DATA);
tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
tmp2 |= ppcontrol;
outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
outb(tmp2, VIA_CONFIG_DATA);
}
/* Parallel Port I/O Base Address, bits 9-2 */
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
port1 = inb(VIA_CONFIG_DATA) << 2;
printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
port1);
if (port1 == 0x3BC && have_epp) {
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
outb((0x378 >> 2), VIA_CONFIG_DATA);
printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
port1 = 0x378;
}
/*
* lock super i/o configuration
*/
pci_read_config_byte(pdev, via->via_pci_superio_config_reg, &tmp);
tmp &= ~via->via_pci_superio_config_data;
pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
if (siofunc == VIA_FUNCTION_PARPORT_DISABLE) {
pr_info("parport_pc: VIA parallel port disabled in BIOS\n");
return 0;
}
/* Bits 7-4: PnP Routing for Parallel Port IRQ */
pci_read_config_byte(pdev, via->via_pci_parport_irq_reg, &tmp);
irq = ((tmp & VIA_IRQCONTROL_PARALLEL) >> 4);
if (siofunc == VIA_FUNCTION_PARPORT_ECP) {
/* Bits 3-2: PnP Routing for Parallel Port DMA */
pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
} else
/* if ECP not enabled, DMA is not enabled, assumed
bogus 'dma' value */
dma = PARPORT_DMA_NONE;
/* Let the user (or defaults) steer us away from interrupts and DMA */
if (autoirq == PARPORT_IRQ_NONE) {
irq = PARPORT_IRQ_NONE;
dma = PARPORT_DMA_NONE;
}
if (autodma == PARPORT_DMA_NONE)
dma = PARPORT_DMA_NONE;
switch (port1) {
case 0x3bc:
port2 = 0x7bc; break;
case 0x378:
port2 = 0x778; break;
case 0x278:
port2 = 0x678; break;
default:
pr_info("parport_pc: Weird VIA parport base 0x%X, ignoring\n",
port1);
return 0;
}
/* filter bogus IRQs */
switch (irq) {
case 0:
case 2:
case 8:
case 13:
irq = PARPORT_IRQ_NONE;
break;
default: /* do nothing */
break;
}
/* finally, do the probe with values obtained */
if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
pr_info("parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
pr_cont(", dma=%d", dma);
pr_cont("\n");
return 1;
}
pr_warn("parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
port1, irq, dma);
return 0;
}
enum parport_pc_sio_types {
sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
sio_ite_8872,
last_sio
};
/* each element directly indexed from enum list, above */
static struct parport_pc_superio {
int (*probe) (struct pci_dev *pdev, int autoirq, int autodma,
const struct parport_pc_via_data *via);
const struct parport_pc_via_data *via;
} parport_pc_superio_info[] = {
{ sio_via_probe, &via_686a_data, },
{ sio_via_probe, &via_8231_data, },
{ sio_ite_8872_probe, NULL, },
};
enum parport_pc_pci_cards {
siig_1p_10x = last_sio,
siig_2p_10x,
siig_1p_20x,
siig_2p_20x,
lava_parallel,
lava_parallel_dual_a,
lava_parallel_dual_b,
boca_ioppar,
plx_9050,
timedia_4006a,
timedia_4014,
timedia_4008a,
timedia_4018,
timedia_9018a,
syba_2p_epp,
syba_1p_ecp,
titan_010l,
avlab_1p,
avlab_2p,
oxsemi_952,
oxsemi_954,
oxsemi_840,
oxsemi_pcie_pport,
aks_0100,
mobility_pp,
netmos_9900,
netmos_9705,
netmos_9715,
netmos_9755,
netmos_9805,
netmos_9815,
netmos_9901,
netmos_9865,
asix_ax99100,
quatech_sppxp100,
wch_ch382l,
};
/* each element directly indexed from enum list, above
* (but offset by last_sio) */
static struct parport_pc_pci {
int numports;
struct { /* BAR (base address registers) numbers in the config
space header */
int lo;
int hi;
/* -1 if not there, >6 for offset-method (max BAR is 6) */
} addr[2];
/* Bit field of parport modes to exclude. */
unsigned int mode_mask;
/* If non-zero, sets the bitmask of writable ECR bits. In that
* case additionally bit 0 will be forcibly set on writes. */
unsigned char ecr_writable;
/* If set, this is called immediately after pci_enable_device.
* If it returns non-zero, no probing will take place and the
* ports will not be used. */
int (*preinit_hook) (struct pci_dev *pdev, int autoirq, int autodma);
/* If set, this is called after probing for ports. If 'failed'
* is non-zero we couldn't use any of the ports. */
void (*postinit_hook) (struct pci_dev *pdev, int failed);
} cards[] = {
/* siig_1p_10x */ { 1, { { 2, 3 }, } },
/* siig_2p_10x */ { 2, { { 2, 3 }, { 4, 5 }, } },
/* siig_1p_20x */ { 1, { { 0, 1 }, } },
/* siig_2p_20x */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* lava_parallel */ { 1, { { 0, -1 }, } },
/* lava_parallel_dual_a */ { 1, { { 0, -1 }, } },
/* lava_parallel_dual_b */ { 1, { { 0, -1 }, } },
/* boca_ioppar */ { 1, { { 0, -1 }, } },
/* plx_9050 */ { 2, { { 4, -1 }, { 5, -1 }, } },
/* timedia_4006a */ { 1, { { 0, -1 }, } },
/* timedia_4014 */ { 2, { { 0, -1 }, { 2, -1 }, } },
/* timedia_4008a */ { 1, { { 0, 1 }, } },
/* timedia_4018 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* timedia_9018a */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* SYBA uses fixed offsets in
a 1K io window */
/* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
/* avlab_1p */ { 1, { { 0, 1}, } },
/* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
/* The Oxford Semi cards are unusual: older variants of 954 don't
* support ECP, and 840 locks up if you write 1 to bit 2! None
* implement nFault or service interrupts and all require 00001
* bit pattern to be used for bits 4:0 with ECR writes. */
/* oxsemi_952 */ { 1, { { 0, 1 }, },
PARPORT_MODE_COMPAT, ECR_MODE_MASK },
/* oxsemi_954 */ { 1, { { 0, 1 }, },
PARPORT_MODE_ECP |
PARPORT_MODE_COMPAT, ECR_MODE_MASK },
/* oxsemi_840 */ { 1, { { 0, 1 }, },
PARPORT_MODE_COMPAT, ECR_MODE_MASK },
/* oxsemi_pcie_pport */ { 1, { { 0, 1 }, },
PARPORT_MODE_COMPAT, ECR_MODE_MASK },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
/* netmos_9900 */ { 1, { { 0, -1 }, } },
/* The netmos entries below are untested */
/* netmos_9705 */ { 1, { { 0, -1 }, } },
/* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9805 */ { 1, { { 0, 1 }, } },
/* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* asix_ax99100 */ { 1, { { 0, 1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
/* wch_ch382l */ { 1, { { 2, -1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
/* Super-IO onboard chips */
{ 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a },
{ 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 },
{ PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8872,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_ite_8872 },
/* PCI cards */
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_10x,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_10x,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p_10x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_20x,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_20x,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p_20x },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PARALLEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, lava_parallel },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, lava_parallel_dual_a },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_B,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, lava_parallel_dual_b },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_BOCA_IOPPAR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0, 0, plx_9050 },
/* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
{ 0x1409, 0x7268, 0x1409, 0x0101, 0, 0, timedia_4006a },
{ 0x1409, 0x7268, 0x1409, 0x0102, 0, 0, timedia_4014 },
{ 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
{ 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 },
{ 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a },
{ PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp },
{ PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
/* AFAVLAB_TK9902 */
{ 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
{ 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
{ 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
/* NetMos communication controllers */
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
0xA000, 0x2000, 0, 0, netmos_9900 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9715,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9715 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9755,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9755 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9805,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
0xA000, 0x2000, 0, 0, netmos_9901 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
0xA000, 0x1000, 0, 0, netmos_9865 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
0xA000, 0x2000, 0, 0, netmos_9865 },
/* ASIX AX99100 PCIe to Multi I/O Controller */
{ PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100,
0xA000, 0x2000, 0, 0, asix_ax99100 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
/* WCH CH382L PCI-E single parallel port card */
{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
struct pci_parport_data {
int num;
struct parport *ports[2];
};
static int parport_pc_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int err, count, n, i = id->driver_data;
struct pci_parport_data *data;
if (i < last_sio)
/* This is an onboard Super-IO and has already been probed */
return 0;
/* This is a PCI card */
i -= last_sio;
count = 0;
err = pci_enable_device(dev);
if (err)
return err;
data = kmalloc(sizeof(struct pci_parport_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (cards[i].preinit_hook &&
cards[i].preinit_hook(dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
kfree(data);
return -ENODEV;
}
for (n = 0; n < cards[i].numports; n++) {
int lo = cards[i].addr[n].lo;
int hi = cards[i].addr[n].hi;
int irq;
unsigned long io_lo, io_hi;
io_lo = pci_resource_start(dev, lo);
io_hi = 0;
if ((hi >= 0) && (hi <= 6))
io_hi = pci_resource_start(dev, hi);
else if (hi > 6)
io_lo += hi; /* Reinterpret the meaning of
"hi" as an offset (see SYBA
def.) */
/* TODO: test if sharing interrupts works */
irq = dev->irq;
if (irq == IRQ_NONE) {
printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
id->vendor, id->device, io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
__parport_pc_probe_port(io_lo, io_hi, irq,
PARPORT_DMA_NONE, &dev->dev,
IRQF_SHARED,
cards[i].mode_mask,
cards[i].ecr_writable);
if (data->ports[count])
count++;
}
data->num = count;
if (cards[i].postinit_hook)
cards[i].postinit_hook(dev, count == 0);
if (count) {
pci_set_drvdata(dev, data);
return 0;
}
kfree(data);
return -ENODEV;
}
static void parport_pc_pci_remove(struct pci_dev *dev)
{
struct pci_parport_data *data = pci_get_drvdata(dev);
int i;
if (data) {
for (i = data->num - 1; i >= 0; i--)
parport_pc_unregister_port(data->ports[i]);
kfree(data);
}
}
static struct pci_driver parport_pc_pci_driver = {
.name = "parport_pc",
.id_table = parport_pc_pci_tbl,
.probe = parport_pc_pci_probe,
.remove = parport_pc_pci_remove,
};
static int __init parport_pc_init_superio(int autoirq, int autodma)
{
const struct pci_device_id *id;
struct pci_dev *pdev = NULL;
int ret = 0;
for_each_pci_dev(pdev) {
id = pci_match_id(parport_pc_pci_tbl, pdev);
if (id == NULL || id->driver_data >= last_sio)
continue;
if (parport_pc_superio_info[id->driver_data].probe(
pdev, autoirq, autodma,
parport_pc_superio_info[id->driver_data].via)) {
ret++;
}
}
return ret; /* number of devices found */
}
#else
static struct pci_driver parport_pc_pci_driver;
static int __init parport_pc_init_superio(int autoirq, int autodma)
{
return 0;
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_PNP
static const struct pnp_device_id parport_pc_pnp_tbl[] = {
/* Standard LPT Printer Port */
{.id = "PNP0400", .driver_data = 0},
/* ECP Printer Port */
{.id = "PNP0401", .driver_data = 0},
{ }
};
MODULE_DEVICE_TABLE(pnp, parport_pc_pnp_tbl);
static int parport_pc_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *id)
{
struct parport *pdata;
unsigned long io_lo, io_hi;
int dma, irq;
if (pnp_port_valid(dev, 0) &&
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
io_lo = pnp_port_start(dev, 0);
} else
return -EINVAL;
if (pnp_port_valid(dev, 1) &&
!(pnp_port_flags(dev, 1) & IORESOURCE_DISABLED)) {
io_hi = pnp_port_start(dev, 1);
} else
io_hi = 0;
if (pnp_irq_valid(dev, 0) &&
!(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) {
irq = pnp_irq(dev, 0);
} else
irq = PARPORT_IRQ_NONE;
if (pnp_dma_valid(dev, 0) &&
!(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) {
dma = pnp_dma(dev, 0);
} else
dma = PARPORT_DMA_NONE;
dev_info(&dev->dev, "reported by %s\n", dev->protocol->name);
pdata = parport_pc_probe_port(io_lo, io_hi, irq, dma, &dev->dev, 0);
if (pdata == NULL)
return -ENODEV;
pnp_set_drvdata(dev, pdata);
return 0;
}
static void parport_pc_pnp_remove(struct pnp_dev *dev)
{
struct parport *pdata = (struct parport *)pnp_get_drvdata(dev);
if (!pdata)
return;
parport_pc_unregister_port(pdata);
}
/* we only need the pnp layer to activate the device, at least for now */
static struct pnp_driver parport_pc_pnp_driver = {
.name = "parport_pc",
.id_table = parport_pc_pnp_tbl,
.probe = parport_pc_pnp_probe,
.remove = parport_pc_pnp_remove,
};
#else
static struct pnp_driver parport_pc_pnp_driver;
#endif /* CONFIG_PNP */
static int parport_pc_platform_probe(struct platform_device *pdev)
{
/* Always succeed, the actual probing is done in
* parport_pc_probe_port(). */
return 0;
}
static struct platform_driver parport_pc_platform_driver = {
.driver = {
.name = "parport_pc",
},
.probe = parport_pc_platform_probe,
};
/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */
static int __attribute__((unused))
parport_pc_find_isa_ports(int autoirq, int autodma)
{
int count = 0;
if (parport_pc_probe_port(0x3bc, 0x7bc, autoirq, autodma, NULL, 0))
count++;
if (parport_pc_probe_port(0x378, 0x778, autoirq, autodma, NULL, 0))
count++;
if (parport_pc_probe_port(0x278, 0x678, autoirq, autodma, NULL, 0))
count++;
return count;
}
/* This function is called by parport_pc_init if the user didn't
* specify any ports to probe. Its job is to find some ports. Order
* is important here -- we want ISA ports to be registered first,
* followed by PCI cards (for least surprise), but before that we want
* to do chipset-specific tests for some onboard ports that we know
* about.
*
* autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY
* autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO
*/
static void __init parport_pc_find_ports(int autoirq, int autodma)
{
int count = 0, err;
#ifdef CONFIG_PARPORT_PC_SUPERIO
detect_and_report_it87();
detect_and_report_winbond();
detect_and_report_smsc();
#endif
/* Onboard SuperIO chipsets that show themselves on the PCI bus. */
count += parport_pc_init_superio(autoirq, autodma);
/* PnP ports, skip detection if SuperIO already found them */
if (!count) {
err = pnp_register_driver(&parport_pc_pnp_driver);
if (!err)
pnp_registered_parport = 1;
}
/* ISA ports and whatever (see asm/parport.h). */
parport_pc_find_nonpci_ports(autoirq, autodma);
err = pci_register_driver(&parport_pc_pci_driver);
if (!err)
pci_registered_parport = 1;
}
/*
* Piles of crap below pretend to be a parser for module and kernel
* parameters. Say "thank you" to whoever had come up with that
* syntax and keep in mind that code below is a cleaned up version.
*/
static int __initdata io[PARPORT_PC_MAX_PORTS+1] = {
[0 ... PARPORT_PC_MAX_PORTS] = 0
};
static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] = {
[0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO
};
static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = {
[0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE
};
static int __initdata irqval[PARPORT_PC_MAX_PORTS] = {
[0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY
};
static int __init parport_parse_param(const char *s, int *val,
int automatic, int none, int nofifo)
{
if (!s)
return 0;
if (!strncmp(s, "auto", 4))
*val = automatic;
else if (!strncmp(s, "none", 4))
*val = none;
else if (nofifo && !strncmp(s, "nofifo", 6))
*val = nofifo;
else {
char *ep;
unsigned long r = simple_strtoul(s, &ep, 0);
if (ep != s)
*val = r;
else {
pr_err("parport: bad specifier `%s'\n", s);
return -1;
}
}
return 0;
}
static int __init parport_parse_irq(const char *irqstr, int *val)
{
return parport_parse_param(irqstr, val, PARPORT_IRQ_AUTO,
PARPORT_IRQ_NONE, 0);
}
static int __init parport_parse_dma(const char *dmastr, int *val)
{
return parport_parse_param(dmastr, val, PARPORT_DMA_AUTO,
PARPORT_DMA_NONE, PARPORT_DMA_NOFIFO);
}
#ifdef CONFIG_PCI
static int __init parport_init_mode_setup(char *str)
{
printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n",
str);
if (!strcmp(str, "spp"))
parport_init_mode = 1;
if (!strcmp(str, "ps2"))
parport_init_mode = 2;
if (!strcmp(str, "epp"))
parport_init_mode = 3;
if (!strcmp(str, "ecp"))
parport_init_mode = 4;
if (!strcmp(str, "ecpepp"))
parport_init_mode = 5;
return 1;
}
#endif
#ifdef MODULE
static char *irq[PARPORT_PC_MAX_PORTS];
static char *dma[PARPORT_PC_MAX_PORTS];
MODULE_PARM_DESC(io, "Base I/O address (SPP regs)");
module_param_hw_array(io, int, ioport, NULL, 0);
MODULE_PARM_DESC(io_hi, "Base I/O address (ECR)");
module_param_hw_array(io_hi, int, ioport, NULL, 0);
MODULE_PARM_DESC(irq, "IRQ line");
module_param_hw_array(irq, charp, irq, NULL, 0);
MODULE_PARM_DESC(dma, "DMA channel");
module_param_hw_array(dma, charp, dma, NULL, 0);
#if defined(CONFIG_PARPORT_PC_SUPERIO) || \
(defined(CONFIG_PARPORT_1284) && defined(CONFIG_PARPORT_PC_FIFO))
MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialisation");
module_param(verbose_probing, int, 0644);
#endif
#ifdef CONFIG_PCI
static char *init_mode;
MODULE_PARM_DESC(init_mode,
"Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
module_param(init_mode, charp, 0);
#endif
static int __init parse_parport_params(void)
{
unsigned int i;
int val;
#ifdef CONFIG_PCI
if (init_mode)
parport_init_mode_setup(init_mode);
#endif
for (i = 0; i < PARPORT_PC_MAX_PORTS && io[i]; i++) {
if (parport_parse_irq(irq[i], &val))
return 1;
irqval[i] = val;
if (parport_parse_dma(dma[i], &val))
return 1;
dmaval[i] = val;
}
if (!io[0]) {
/* The user can make us use any IRQs or DMAs we find. */
if (irq[0] && !parport_parse_irq(irq[0], &val))
switch (val) {
case PARPORT_IRQ_NONE:
case PARPORT_IRQ_AUTO:
irqval[0] = val;
break;
default:
pr_warn("parport_pc: irq specified without base address. Use 'io=' to specify one\n");
}
if (dma[0] && !parport_parse_dma(dma[0], &val))
switch (val) {
case PARPORT_DMA_NONE:
case PARPORT_DMA_AUTO:
dmaval[0] = val;
break;
default:
pr_warn("parport_pc: dma specified without base address. Use 'io=' to specify one\n");
}
}
return 0;
}
#else
static int parport_setup_ptr __initdata;
/*
* Acceptable parameters:
*
* parport=0
* parport=auto
* parport=0xBASE[,IRQ[,DMA]]
*
* IRQ/DMA may be numeric or 'auto' or 'none'
*/
static int __init parport_setup(char *str)
{
char *endptr;
char *sep;
int val;
if (!str || !*str || (*str == '0' && !*(str+1))) {
/* Disable parport if "parport=0" in cmdline */
io[0] = PARPORT_DISABLE;
return 1;
}
if (!strncmp(str, "auto", 4)) {
irqval[0] = PARPORT_IRQ_AUTO;
dmaval[0] = PARPORT_DMA_AUTO;
return 1;
}
val = simple_strtoul(str, &endptr, 0);
if (endptr == str) {
pr_warn("parport=%s not understood\n", str);
return 1;
}
if (parport_setup_ptr == PARPORT_PC_MAX_PORTS) {
pr_err("parport=%s ignored, too many ports\n", str);
return 1;
}
io[parport_setup_ptr] = val;
irqval[parport_setup_ptr] = PARPORT_IRQ_NONE;
dmaval[parport_setup_ptr] = PARPORT_DMA_NONE;
sep = strchr(str, ',');
if (sep++) {
if (parport_parse_irq(sep, &val))
return 1;
irqval[parport_setup_ptr] = val;
sep = strchr(sep, ',');
if (sep++) {
if (parport_parse_dma(sep, &val))
return 1;
dmaval[parport_setup_ptr] = val;
}
}
parport_setup_ptr++;
return 1;
}
static int __init parse_parport_params(void)
{
return io[0] == PARPORT_DISABLE;
}
__setup("parport=", parport_setup);
/*
* Acceptable parameters:
*
* parport_init_mode=[spp|ps2|epp|ecp|ecpepp]
*/
#ifdef CONFIG_PCI
__setup("parport_init_mode=", parport_init_mode_setup);
#endif
#endif
/* "Parser" ends here */
static int __init parport_pc_init(void)
{
int err;
if (parse_parport_params())
return -EINVAL;
err = platform_driver_register(&parport_pc_platform_driver);
if (err)
return err;
if (io[0]) {
int i;
/* Only probe the ports we were given. */
user_specified = 1;
for (i = 0; i < PARPORT_PC_MAX_PORTS; i++) {
if (!io[i])
break;
if (io_hi[i] == PARPORT_IOHI_AUTO)
io_hi[i] = 0x400 + io[i];
parport_pc_probe_port(io[i], io_hi[i],
irqval[i], dmaval[i], NULL, 0);
}
} else
parport_pc_find_ports(irqval[0], dmaval[0]);
return 0;
}
static void __exit parport_pc_exit(void)
{
if (pci_registered_parport)
pci_unregister_driver(&parport_pc_pci_driver);
if (pnp_registered_parport)
pnp_unregister_driver(&parport_pc_pnp_driver);
platform_driver_unregister(&parport_pc_platform_driver);
while (!list_empty(&ports_list)) {
struct parport_pc_private *priv;
struct parport *port;
struct device *dev;
priv = list_entry(ports_list.next,
struct parport_pc_private, list);
port = priv->port;
dev = port->dev;
parport_pc_unregister_port(port);
if (dev && dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
}
}
MODULE_AUTHOR("Phil Blundell, Tim Waugh, others");
MODULE_DESCRIPTION("PC-style parallel port driver");
MODULE_LICENSE("GPL");
module_init(parport_pc_init)
module_exit(parport_pc_exit)
| linux-master | drivers/parport/parport_pc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Low-level parallel-support for PC-style hardware integrated in the
* LASI-Controller (on GSC-Bus) for HP-PARISC Workstations
*
* (C) 1999-2001 by Helge Deller <[email protected]>
*
* based on parport_pc.c by
* Grant Guenther <[email protected]>
* Phil Blundell <[email protected]>
* Tim Waugh <[email protected]>
* Jose Renau <[email protected]>
* David Campbell
* Andrea Arcangeli
*/
#undef DEBUG /* undef for production */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/sysctl.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/superio.h>
#include <linux/parport.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
#include <asm/hardware.h>
#include "parport_gsc.h"
MODULE_AUTHOR("Helge Deller <[email protected]>");
MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver");
MODULE_LICENSE("GPL");
/*
* Clear TIMEOUT BIT in EPP MODE
*
* This is also used in SPP detection.
*/
static int clear_epp_timeout(struct parport *pb)
{
unsigned char r;
if (!(parport_gsc_read_status(pb) & 0x01))
return 1;
/* To clear timeout some chips require double read */
parport_gsc_read_status(pb);
r = parport_gsc_read_status(pb);
parport_writeb (r | 0x01, STATUS (pb)); /* Some reset by writing 1 */
parport_writeb (r & 0xfe, STATUS (pb)); /* Others by writing 0 */
r = parport_gsc_read_status(pb);
return !(r & 0x01);
}
/*
* Access functions.
*
* Most of these aren't static because they may be used by the
* parport_xxx_yyy macros. extern __inline__ versions of several
* of these are in parport_gsc.h.
*/
void parport_gsc_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.pc.ctr = 0xc | (dev->irq_func ? 0x10 : 0x0);
}
void parport_gsc_save_state(struct parport *p, struct parport_state *s)
{
s->u.pc.ctr = parport_readb (CONTROL (p));
}
void parport_gsc_restore_state(struct parport *p, struct parport_state *s)
{
parport_writeb (s->u.pc.ctr, CONTROL (p));
}
struct parport_operations parport_gsc_ops =
{
.write_data = parport_gsc_write_data,
.read_data = parport_gsc_read_data,
.write_control = parport_gsc_write_control,
.read_control = parport_gsc_read_control,
.frob_control = parport_gsc_frob_control,
.read_status = parport_gsc_read_status,
.enable_irq = parport_gsc_enable_irq,
.disable_irq = parport_gsc_disable_irq,
.data_forward = parport_gsc_data_forward,
.data_reverse = parport_gsc_data_reverse,
.init_state = parport_gsc_init_state,
.save_state = parport_gsc_save_state,
.restore_state = parport_gsc_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
/* --- Mode detection ------------------------------------- */
/*
* Checks for port existence, all ports support SPP MODE
*/
static int parport_SPP_supported(struct parport *pb)
{
unsigned char r, w;
/*
* first clear an eventually pending EPP timeout
* I ([email protected]) have an SMSC chipset
* that does not even respond to SPP cycles if an EPP
* timeout is pending
*/
clear_epp_timeout(pb);
/* Do a simple read-write test to make sure the port exists. */
w = 0xc;
parport_writeb (w, CONTROL (pb));
/* Is there a control register that we can read from? Some
* ports don't allow reads, so read_control just returns a
* software copy. Some ports _do_ allow reads, so bypass the
* software copy here. In addition, some bits aren't
* writable. */
r = parport_readb (CONTROL (pb));
if ((r & 0xf) == w) {
w = 0xe;
parport_writeb (w, CONTROL (pb));
r = parport_readb (CONTROL (pb));
parport_writeb (0xc, CONTROL (pb));
if ((r & 0xf) == w)
return PARPORT_MODE_PCSPP;
}
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
w = 0xaa;
parport_gsc_write_data (pb, w);
r = parport_gsc_read_data (pb);
if (r == w) {
w = 0x55;
parport_gsc_write_data (pb, w);
r = parport_gsc_read_data (pb);
if (r == w)
return PARPORT_MODE_PCSPP;
}
return 0;
}
/* Detect PS/2 support.
*
* Bit 5 (0x20) sets the PS/2 data direction; setting this high
* allows us to read data from the data lines. In theory we would get back
* 0xff but any peripheral attached to the port may drag some or all of the
* lines down to zero. So if we get back anything that isn't the contents
* of the data register we deem PS/2 support to be present.
*
* Some SPP ports have "half PS/2" ability - you can't turn off the line
* drivers, but an external peripheral with sufficiently beefy drivers of
* its own can overpower them and assert its own levels onto the bus, from
* where they can then be read back as normal. Ports with this property
* and the right type of device attached are likely to fail the SPP test,
* (as they will appear to have stuck bits) and so the fact that they might
* be misdetected here is rather academic.
*/
static int parport_PS2_supported(struct parport *pb)
{
int ok = 0;
clear_epp_timeout(pb);
/* try to tri-state the buffer */
parport_gsc_data_reverse (pb);
parport_gsc_write_data(pb, 0x55);
if (parport_gsc_read_data(pb) != 0x55) ok++;
parport_gsc_write_data(pb, 0xaa);
if (parport_gsc_read_data(pb) != 0xaa) ok++;
/* cancel input mode */
parport_gsc_data_forward (pb);
if (ok) {
pb->modes |= PARPORT_MODE_TRISTATE;
} else {
struct parport_gsc_private *priv = pb->private_data;
priv->ctr_writable &= ~0x20;
}
return ok;
}
/* --- Initialisation code -------------------------------- */
static struct parport *parport_gsc_probe_port(unsigned long base,
unsigned long base_hi, int irq,
struct parisc_device *padev)
{
struct parport_gsc_private *priv;
struct parport_operations *ops;
struct parport tmp;
struct parport *p = &tmp;
priv = kzalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
if (!priv) {
printk(KERN_DEBUG "parport (0x%lx): no memory!\n", base);
return NULL;
}
ops = kmemdup(&parport_gsc_ops, sizeof(struct parport_operations),
GFP_KERNEL);
if (!ops) {
printk(KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
base);
kfree (priv);
return NULL;
}
priv->ctr = 0xc;
priv->ctr_writable = 0xff;
p->base = base;
p->base_hi = base_hi;
p->irq = irq;
p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
p->ops = ops;
p->private_data = priv;
p->physport = p;
if (!parport_SPP_supported (p)) {
/* No port. */
kfree (priv);
kfree(ops);
return NULL;
}
parport_PS2_supported (p);
if (!(p = parport_register_port(base, PARPORT_IRQ_NONE,
PARPORT_DMA_NONE, ops))) {
kfree (priv);
kfree (ops);
return NULL;
}
p->dev = &padev->dev;
p->base_hi = base_hi;
p->modes = tmp.modes;
p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
p->private_data = priv;
pr_info("%s: PC-style at 0x%lx", p->name, p->base);
p->irq = irq;
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE)
pr_cont(", irq %d", p->irq);
pr_cont(" [");
#define printmode(x) \
do { \
if (p->modes & PARPORT_MODE_##x) \
pr_cont("%s%s", f++ ? "," : "", #x); \
} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
printmode(COMPAT);
printmode(EPP);
// printmode(ECP);
// printmode(DMA);
}
#undef printmode
pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
0, p->name, p)) {
pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
}
}
/* Done probing. Now put the port into a sensible start-up state. */
parport_gsc_write_data(p, 0);
parport_gsc_data_forward (p);
/* Now that we've told the sharing engine about the port, and
found out its characteristics, let the high-level drivers
know about it. */
parport_announce_port (p);
return p;
}
#define PARPORT_GSC_OFFSET 0x800
static int parport_count;
static int __init parport_init_chip(struct parisc_device *dev)
{
struct parport *p;
unsigned long port;
if (!dev->irq) {
pr_warn("IRQ not found for parallel device at 0x%llx\n",
(unsigned long long)dev->hpa.start);
return -ENODEV;
}
port = dev->hpa.start + PARPORT_GSC_OFFSET;
/* some older machines with ASP-chip don't support
* the enhanced parport modes.
*/
if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
/* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
pr_info("%s: initialize bidirectional-mode\n", __func__);
parport_writeb ( (0x10 + 0x20), port + 4);
} else {
pr_info("%s: enhanced parport-modes not supported\n", __func__);
}
p = parport_gsc_probe_port(port, 0, dev->irq, dev);
if (p)
parport_count++;
dev_set_drvdata(&dev->dev, p);
return 0;
}
static void __exit parport_remove_chip(struct parisc_device *dev)
{
struct parport *p = dev_get_drvdata(&dev->dev);
if (p) {
struct parport_operations *ops = p->ops;
parport_remove_port(p);
if (p->irq != PARPORT_IRQ_NONE)
free_irq(p->irq, p);
kfree (p->private_data);
parport_put_port(p);
kfree (ops); /* hope no-one cached it */
}
}
static const struct parisc_device_id parport_tbl[] __initconst = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x74 },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, parport_tbl);
static struct parisc_driver parport_driver __refdata = {
.name = "Parallel",
.id_table = parport_tbl,
.probe = parport_init_chip,
.remove = __exit_p(parport_remove_chip),
};
int parport_gsc_init(void)
{
return register_parisc_driver(&parport_driver);
}
static void parport_gsc_exit(void)
{
unregister_parisc_driver(&parport_driver);
}
module_init(parport_gsc_init);
module_exit(parport_gsc_exit);
| linux-master | drivers/parport/parport_gsc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Parallel port device probing code
*
* Authors: Carsten Gross, [email protected]
* Philip Blundell <[email protected]>
*/
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
static const struct {
const char *token;
const char *descr;
} classes[] = {
{ "", "Legacy device" },
{ "PRINTER", "Printer" },
{ "MODEM", "Modem" },
{ "NET", "Network device" },
{ "HDC", "Hard disk" },
{ "PCMCIA", "PCMCIA" },
{ "MEDIA", "Multimedia device" },
{ "FDC", "Floppy disk" },
{ "PORTS", "Ports" },
{ "SCANNER", "Scanner" },
{ "DIGICAM", "Digital camera" },
{ "", "Unknown device" },
{ "", "Unspecified" },
{ "SCSIADAPTER", "SCSI adapter" },
{ NULL, NULL }
};
static void pretty_print(struct parport *port, int device)
{
struct parport_device_info *info = &port->probe_info[device + 1];
pr_info("%s", port->name);
if (device >= 0)
pr_cont(" (addr %d)", device);
pr_cont(": %s", classes[info->class].descr);
if (info->class)
pr_cont(", %s %s", info->mfr, info->model);
pr_cont("\n");
}
static void parse_data(struct parport *port, int device, char *str)
{
char *txt = kmalloc(strlen(str)+1, GFP_KERNEL);
char *p = txt, *q;
int guessed_class = PARPORT_CLASS_UNSPEC;
struct parport_device_info *info = &port->probe_info[device + 1];
if (!txt) {
pr_warn("%s probe: memory squeeze\n", port->name);
return;
}
strcpy(txt, str);
while (p) {
char *sep;
q = strchr(p, ';');
if (q) *q = 0;
sep = strchr(p, ':');
if (sep) {
char *u;
*(sep++) = 0;
/* Get rid of trailing blanks */
u = sep + strlen (sep) - 1;
while (u >= p && *u == ' ')
*u-- = '\0';
string_upper(p, p);
if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) {
kfree(info->mfr);
info->mfr = kstrdup(sep, GFP_KERNEL);
} else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) {
kfree(info->model);
info->model = kstrdup(sep, GFP_KERNEL);
} else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) {
int i;
kfree(info->class_name);
info->class_name = kstrdup(sep, GFP_KERNEL);
string_upper(sep, sep);
for (i = 0; classes[i].token; i++) {
if (!strcmp(classes[i].token, sep)) {
info->class = i;
goto rock_on;
}
}
pr_warn("%s probe: warning, class '%s' not understood\n",
port->name, sep);
info->class = PARPORT_CLASS_OTHER;
} else if (!strcmp(p, "CMD") ||
!strcmp(p, "COMMAND SET")) {
kfree(info->cmdset);
info->cmdset = kstrdup(sep, GFP_KERNEL);
/* if it speaks printer language, it's
probably a printer */
if (strstr(sep, "PJL") || strstr(sep, "PCL"))
guessed_class = PARPORT_CLASS_PRINTER;
} else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) {
kfree(info->description);
info->description = kstrdup(sep, GFP_KERNEL);
}
}
rock_on:
if (q)
p = q + 1;
else
p = NULL;
}
/* If the device didn't tell us its class, maybe we have managed to
guess one from the things it did say. */
if (info->class == PARPORT_CLASS_UNSPEC)
info->class = guessed_class;
pretty_print (port, device);
kfree(txt);
}
/* Read up to count-1 bytes of device id. Terminate buffer with
* '\0'. Buffer begins with two Device ID length bytes as given by
* device. */
static ssize_t parport_read_device_id (struct parport *port, char *buffer,
size_t count)
{
unsigned char length[2];
unsigned lelen, belen;
size_t idlens[4];
unsigned numidlens;
unsigned current_idlen;
ssize_t retval;
size_t len;
/* First two bytes are MSB,LSB of inclusive length. */
retval = parport_read (port, length, 2);
if (retval < 0)
return retval;
if (retval != 2)
return -EIO;
if (count < 2)
return 0;
memcpy(buffer, length, 2);
len = 2;
/* Some devices wrongly send LE length, and some send it two
* bytes short. Construct a sorted array of lengths to try. */
belen = (length[0] << 8) + length[1];
lelen = (length[1] << 8) + length[0];
idlens[0] = min(belen, lelen);
idlens[1] = idlens[0]+2;
if (belen != lelen) {
int off = 2;
/* Don't try lengths of 0x100 and 0x200 as 1 and 2 */
if (idlens[0] <= 2)
off = 0;
idlens[off] = max(belen, lelen);
idlens[off+1] = idlens[off]+2;
numidlens = off+2;
}
else {
/* Some devices don't truly implement Device ID, but
* just return constant nibble forever. This catches
* also those cases. */
if (idlens[0] == 0 || idlens[0] > 0xFFF) {
printk(KERN_DEBUG "%s: reported broken Device ID length of %#zX bytes\n",
port->name, idlens[0]);
return -EIO;
}
numidlens = 2;
}
/* Try to respect the given ID length despite all the bugs in
* the ID length. Read according to shortest possible ID
* first. */
for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
size_t idlen = idlens[current_idlen];
if (idlen+1 >= count)
break;
retval = parport_read (port, buffer+len, idlen-len);
if (retval < 0)
return retval;
len += retval;
if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
if (belen != len) {
printk(KERN_DEBUG "%s: Device ID was %zd bytes while device told it would be %d bytes\n",
port->name, len, belen);
}
goto done;
}
/* This might end reading the Device ID too
* soon. Hopefully the needed fields were already in
* the first 256 bytes or so that we must have read so
* far. */
if (buffer[len-1] == ';') {
printk(KERN_DEBUG "%s: Device ID reading stopped before device told data not available. Current idlen %u of %u, len bytes %02X %02X\n",
port->name, current_idlen, numidlens,
length[0], length[1]);
goto done;
}
}
if (current_idlen < numidlens) {
/* Buffer not large enough, read to end of buffer. */
size_t idlen, len2;
if (len+1 < count) {
retval = parport_read (port, buffer+len, count-len-1);
if (retval < 0)
return retval;
len += retval;
}
/* Read the whole ID since some devices would not
* otherwise give back the Device ID from beginning
* next time when asked. */
idlen = idlens[current_idlen];
len2 = len;
while(len2 < idlen && retval > 0) {
char tmp[4];
retval = parport_read (port, tmp,
min(sizeof tmp, idlen-len2));
if (retval < 0)
return retval;
len2 += retval;
}
}
/* In addition, there are broken devices out there that don't
even finish off with a semi-colon. We do not need to care
about those at this time. */
done:
buffer[len] = '\0';
return len;
}
/* Get Std 1284 Device ID. */
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
parport_claim_or_block (dev);
/* Negotiate to compatibility mode, and then to device ID
* mode. (This so that we start form beginning of device ID if
* already in device ID mode.) */
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
retval = parport_negotiate (dev->port,
IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
if (!retval) {
retval = parport_read_device_id (dev->port, buffer, count);
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
if (retval > 2)
parse_data (dev->port, dev->daisy, buffer+2);
}
parport_release (dev);
parport_close (dev);
return retval;
}
| linux-master | drivers/parport/probe.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* extcon-fsa9480.c - Fairchild Semiconductor FSA9480 extcon driver
*
* Copyright (c) 2019 Tomasz Figa <[email protected]>
*
* Loosely based on old fsa9480 misc-device driver.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/kobject.h>
#include <linux/extcon-provider.h>
#include <linux/irqdomain.h>
#include <linux/regmap.h>
/* FSA9480 I2C registers */
#define FSA9480_REG_DEVID 0x01
#define FSA9480_REG_CTRL 0x02
#define FSA9480_REG_INT1 0x03
#define FSA9480_REG_INT2 0x04
#define FSA9480_REG_INT1_MASK 0x05
#define FSA9480_REG_INT2_MASK 0x06
#define FSA9480_REG_ADC 0x07
#define FSA9480_REG_TIMING1 0x08
#define FSA9480_REG_TIMING2 0x09
#define FSA9480_REG_DEV_T1 0x0a
#define FSA9480_REG_DEV_T2 0x0b
#define FSA9480_REG_BTN1 0x0c
#define FSA9480_REG_BTN2 0x0d
#define FSA9480_REG_CK 0x0e
#define FSA9480_REG_CK_INT1 0x0f
#define FSA9480_REG_CK_INT2 0x10
#define FSA9480_REG_CK_INTMASK1 0x11
#define FSA9480_REG_CK_INTMASK2 0x12
#define FSA9480_REG_MANSW1 0x13
#define FSA9480_REG_MANSW2 0x14
#define FSA9480_REG_END 0x15
/* Control */
#define CON_SWITCH_OPEN (1 << 4)
#define CON_RAW_DATA (1 << 3)
#define CON_MANUAL_SW (1 << 2)
#define CON_WAIT (1 << 1)
#define CON_INT_MASK (1 << 0)
#define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \
CON_MANUAL_SW | CON_WAIT)
/* Device Type 1 */
#define DEV_USB_OTG 7
#define DEV_DEDICATED_CHG 6
#define DEV_USB_CHG 5
#define DEV_CAR_KIT 4
#define DEV_UART 3
#define DEV_USB 2
#define DEV_AUDIO_2 1
#define DEV_AUDIO_1 0
#define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB)
#define DEV_T1_UART_MASK (DEV_UART)
#define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG)
/* Device Type 2 */
#define DEV_AV 14
#define DEV_TTY 13
#define DEV_PPD 12
#define DEV_JIG_UART_OFF 11
#define DEV_JIG_UART_ON 10
#define DEV_JIG_USB_OFF 9
#define DEV_JIG_USB_ON 8
#define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON)
#define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
#define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \
DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
/*
* Manual Switch
* D- [7:5] / D+ [4:2]
* 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO
*/
#define SW_VAUDIO ((4 << 5) | (4 << 2))
#define SW_UART ((3 << 5) | (3 << 2))
#define SW_AUDIO ((2 << 5) | (2 << 2))
#define SW_DHOST ((1 << 5) | (1 << 2))
#define SW_AUTO ((0 << 5) | (0 << 2))
/* Interrupt 1 */
#define INT1_MASK (0xff << 0)
#define INT_DETACH (1 << 1)
#define INT_ATTACH (1 << 0)
/* Interrupt 2 mask */
#define INT2_MASK (0x1f << 0)
/* Timing Set 1 */
#define TIMING1_ADC_500MS (0x6 << 0)
struct fsa9480_usbsw {
struct device *dev;
struct regmap *regmap;
struct extcon_dev *edev;
u16 cable;
};
static const unsigned int fsa9480_extcon_cable[] = {
EXTCON_USB_HOST,
EXTCON_USB,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_ACA,
EXTCON_JACK_LINE_OUT,
EXTCON_JACK_VIDEO_OUT,
EXTCON_JIG,
EXTCON_NONE,
};
static const u64 cable_types[] = {
[DEV_USB_OTG] = BIT_ULL(EXTCON_USB_HOST),
[DEV_DEDICATED_CHG] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_DCP),
[DEV_USB_CHG] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_SDP),
[DEV_CAR_KIT] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_SDP)
| BIT_ULL(EXTCON_JACK_LINE_OUT),
[DEV_UART] = BIT_ULL(EXTCON_JIG),
[DEV_USB] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_SDP),
[DEV_AUDIO_2] = BIT_ULL(EXTCON_JACK_LINE_OUT),
[DEV_AUDIO_1] = BIT_ULL(EXTCON_JACK_LINE_OUT),
[DEV_AV] = BIT_ULL(EXTCON_JACK_LINE_OUT)
| BIT_ULL(EXTCON_JACK_VIDEO_OUT),
[DEV_TTY] = BIT_ULL(EXTCON_JIG),
[DEV_PPD] = BIT_ULL(EXTCON_JACK_LINE_OUT) | BIT_ULL(EXTCON_CHG_USB_ACA),
[DEV_JIG_UART_OFF] = BIT_ULL(EXTCON_JIG),
[DEV_JIG_UART_ON] = BIT_ULL(EXTCON_JIG),
[DEV_JIG_USB_OFF] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_JIG),
[DEV_JIG_USB_ON] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_JIG),
};
/* Define regmap configuration of FSA9480 for I2C communication */
static bool fsa9480_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case FSA9480_REG_INT1_MASK:
return true;
default:
break;
}
return false;
}
static const struct regmap_config fsa9480_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = fsa9480_volatile_reg,
.max_register = FSA9480_REG_END,
};
static int fsa9480_write_reg(struct fsa9480_usbsw *usbsw, int reg, int value)
{
int ret;
ret = regmap_write(usbsw->regmap, reg, value);
if (ret < 0)
dev_err(usbsw->dev, "%s: err %d\n", __func__, ret);
return ret;
}
static int fsa9480_read_reg(struct fsa9480_usbsw *usbsw, int reg)
{
int ret, val;
ret = regmap_read(usbsw->regmap, reg, &val);
if (ret < 0) {
dev_err(usbsw->dev, "%s: err %d\n", __func__, ret);
return ret;
}
return val;
}
static int fsa9480_read_irq(struct fsa9480_usbsw *usbsw, int *value)
{
u8 regs[2];
int ret;
ret = regmap_bulk_read(usbsw->regmap, FSA9480_REG_INT1, regs, 2);
if (ret < 0)
dev_err(usbsw->dev, "%s: err %d\n", __func__, ret);
*value = regs[1] << 8 | regs[0];
return ret;
}
static void fsa9480_handle_change(struct fsa9480_usbsw *usbsw,
u16 mask, bool attached)
{
while (mask) {
int dev = fls64(mask) - 1;
u64 cables = cable_types[dev];
while (cables) {
int cable = fls64(cables) - 1;
extcon_set_state_sync(usbsw->edev, cable, attached);
cables &= ~BIT_ULL(cable);
}
mask &= ~BIT_ULL(dev);
}
}
static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw)
{
int val1, val2;
u16 val;
val1 = fsa9480_read_reg(usbsw, FSA9480_REG_DEV_T1);
val2 = fsa9480_read_reg(usbsw, FSA9480_REG_DEV_T2);
if (val1 < 0 || val2 < 0) {
dev_err(usbsw->dev, "%s: failed to read registers", __func__);
return;
}
val = val2 << 8 | val1;
dev_info(usbsw->dev, "dev1: 0x%x, dev2: 0x%x\n", val1, val2);
/* handle detached cables first */
fsa9480_handle_change(usbsw, usbsw->cable & ~val, false);
/* then handle attached ones */
fsa9480_handle_change(usbsw, val & ~usbsw->cable, true);
usbsw->cable = val;
}
static irqreturn_t fsa9480_irq_handler(int irq, void *data)
{
struct fsa9480_usbsw *usbsw = data;
int intr = 0;
/* clear interrupt */
fsa9480_read_irq(usbsw, &intr);
if (!intr)
return IRQ_NONE;
/* device detection */
fsa9480_detect_dev(usbsw);
return IRQ_HANDLED;
}
static int fsa9480_probe(struct i2c_client *client)
{
struct fsa9480_usbsw *info;
int ret;
if (!client->irq) {
dev_err(&client->dev, "no interrupt provided\n");
return -EINVAL;
}
info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &client->dev;
i2c_set_clientdata(client, info);
/* External connector */
info->edev = devm_extcon_dev_allocate(info->dev,
fsa9480_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(info->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
return ret;
}
ret = devm_extcon_dev_register(info->dev, info->edev);
if (ret) {
dev_err(info->dev, "failed to register extcon device\n");
return ret;
}
info->regmap = devm_regmap_init_i2c(client, &fsa9480_regmap_config);
if (IS_ERR(info->regmap)) {
ret = PTR_ERR(info->regmap);
dev_err(info->dev, "failed to allocate register map: %d\n",
ret);
return ret;
}
/* ADC Detect Time: 500ms */
fsa9480_write_reg(info, FSA9480_REG_TIMING1, TIMING1_ADC_500MS);
/* configure automatic switching */
fsa9480_write_reg(info, FSA9480_REG_CTRL, CON_MASK);
/* unmask interrupt (attach/detach only) */
fsa9480_write_reg(info, FSA9480_REG_INT1_MASK,
INT1_MASK & ~(INT_ATTACH | INT_DETACH));
fsa9480_write_reg(info, FSA9480_REG_INT2_MASK, INT2_MASK);
ret = devm_request_threaded_irq(info->dev, client->irq, NULL,
fsa9480_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"fsa9480", info);
if (ret) {
dev_err(info->dev, "failed to request IRQ\n");
return ret;
}
device_init_wakeup(info->dev, true);
fsa9480_detect_dev(info);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int fsa9480_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (device_may_wakeup(&client->dev) && client->irq)
enable_irq_wake(client->irq);
return 0;
}
static int fsa9480_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (device_may_wakeup(&client->dev) && client->irq)
disable_irq_wake(client->irq);
return 0;
}
#endif
static const struct dev_pm_ops fsa9480_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fsa9480_suspend, fsa9480_resume)
};
static const struct i2c_device_id fsa9480_id[] = {
{ "fsa9480", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, fsa9480_id);
static const struct of_device_id fsa9480_of_match[] = {
{ .compatible = "fcs,fsa9480", },
{ .compatible = "fcs,fsa880", },
{ .compatible = "ti,tsu6111", },
{ },
};
MODULE_DEVICE_TABLE(of, fsa9480_of_match);
static struct i2c_driver fsa9480_i2c_driver = {
.driver = {
.name = "fsa9480",
.pm = &fsa9480_pm_ops,
.of_match_table = fsa9480_of_match,
},
.probe = fsa9480_probe,
.id_table = fsa9480_id,
};
static int __init fsa9480_module_init(void)
{
return i2c_add_driver(&fsa9480_i2c_driver);
}
subsys_initcall(fsa9480_module_init);
static void __exit fsa9480_module_exit(void)
{
i2c_del_driver(&fsa9480_i2c_driver);
}
module_exit(fsa9480_module_exit);
MODULE_DESCRIPTION("Fairchild Semiconductor FSA9480 extcon driver");
MODULE_AUTHOR("Tomasz Figa <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/extcon/extcon-fsa9480.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
*
* Copyright (c) 2017-2018 Hans de Goede <[email protected]>
* Copyright (C) 2015 Intel Corporation
* Author: Ramakrishna Pallala <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/notifier.h>
#include <linux/extcon-provider.h>
#include <linux/regmap.h>
#include <linux/mfd/axp20x.h>
#include <linux/usb/role.h>
#include <linux/workqueue.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
/* Power source status register */
#define PS_STAT_VBUS_TRIGGER BIT(0)
#define PS_STAT_BAT_CHRG_DIR BIT(2)
#define PS_STAT_VBUS_ABOVE_VHOLD BIT(3)
#define PS_STAT_VBUS_VALID BIT(4)
#define PS_STAT_VBUS_PRESENT BIT(5)
/* BC module global register */
#define BC_GLOBAL_RUN BIT(0)
#define BC_GLOBAL_DET_STAT BIT(2)
#define BC_GLOBAL_DBP_TOUT BIT(3)
#define BC_GLOBAL_VLGC_COM_SEL BIT(4)
#define BC_GLOBAL_DCD_TOUT_MASK (BIT(6)|BIT(5))
#define BC_GLOBAL_DCD_TOUT_300MS 0
#define BC_GLOBAL_DCD_TOUT_100MS 1
#define BC_GLOBAL_DCD_TOUT_500MS 2
#define BC_GLOBAL_DCD_TOUT_900MS 3
#define BC_GLOBAL_DCD_DET_SEL BIT(7)
/* BC module vbus control and status register */
#define VBUS_CNTL_DPDM_PD_EN BIT(4)
#define VBUS_CNTL_DPDM_FD_EN BIT(5)
#define VBUS_CNTL_FIRST_PO_STAT BIT(6)
/* BC USB status register */
#define USB_STAT_BUS_STAT_MASK (BIT(3)|BIT(2)|BIT(1)|BIT(0))
#define USB_STAT_BUS_STAT_SHIFT 0
#define USB_STAT_BUS_STAT_ATHD 0
#define USB_STAT_BUS_STAT_CONN 1
#define USB_STAT_BUS_STAT_SUSP 2
#define USB_STAT_BUS_STAT_CONF 3
#define USB_STAT_USB_SS_MODE BIT(4)
#define USB_STAT_DEAD_BAT_DET BIT(6)
#define USB_STAT_DBP_UNCFG BIT(7)
/* BC detect status register */
#define DET_STAT_MASK (BIT(7)|BIT(6)|BIT(5))
#define DET_STAT_SHIFT 5
#define DET_STAT_SDP 1
#define DET_STAT_CDP 2
#define DET_STAT_DCP 3
enum axp288_extcon_reg {
AXP288_PS_STAT_REG = 0x00,
AXP288_PS_BOOT_REASON_REG = 0x02,
AXP288_BC_GLOBAL_REG = 0x2c,
AXP288_BC_VBUS_CNTL_REG = 0x2d,
AXP288_BC_USB_STAT_REG = 0x2e,
AXP288_BC_DET_STAT_REG = 0x2f,
};
enum axp288_extcon_irq {
VBUS_FALLING_IRQ = 0,
VBUS_RISING_IRQ,
MV_CHNG_IRQ,
BC_USB_CHNG_IRQ,
EXTCON_IRQ_END,
};
static const unsigned int axp288_extcon_cables[] = {
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_DCP,
EXTCON_USB,
EXTCON_NONE,
};
struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
struct usb_role_switch *role_sw;
struct work_struct role_work;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
struct extcon_dev *id_extcon;
struct notifier_block id_nb;
unsigned int previous_cable;
bool vbus_attach;
};
static const struct x86_cpu_id cherry_trail_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
/* Power up/down reason string array */
static const char * const axp288_pwr_up_down_info[] = {
"Last wake caused by user pressing the power button",
"Last wake caused by a charger insertion",
"Last wake caused by a battery insertion",
"Last wake caused by SOC initiated global reset",
"Last wake caused by cold reset",
"Last shutdown caused by PMIC UVLO threshold",
"Last shutdown caused by SOC initiated cold off",
"Last shutdown caused by user pressing the power button",
};
/*
* Decode and log the given "reset source indicator" (rsi)
* register and then clear it.
*/
static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
{
unsigned int val, i, clear_mask = 0;
unsigned long bits;
int ret;
ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val);
if (ret < 0) {
dev_err(info->dev, "failed to read reset source indicator\n");
return;
}
bits = val & GENMASK(ARRAY_SIZE(axp288_pwr_up_down_info) - 1, 0);
for_each_set_bit(i, &bits, ARRAY_SIZE(axp288_pwr_up_down_info))
dev_dbg(info->dev, "%s\n", axp288_pwr_up_down_info[i]);
clear_mask = bits;
/* Clear the register value for next reboot (write 1 to clear bit) */
regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
}
/*
* The below code to control the USB role-switch on devices with an AXP288
* may seem out of place, but there are 2 reasons why this is the best place
* to control the USB role-switch on such devices:
* 1) On many devices the USB role is controlled by AML code, but the AML code
* only switches between the host and none roles, because of Windows not
* really using device mode. To make device mode work we need to toggle
* between the none/device roles based on Vbus presence, and this driver
* gets interrupts on Vbus insertion / removal.
* 2) In order for our BC1.2 charger detection to work properly the role
* mux must be properly set to device mode before we do the detection.
*/
/* Returns the id-pin value, note pulled low / false == host-mode */
static bool axp288_get_id_pin(struct axp288_extcon_info *info)
{
enum usb_role role;
if (info->id_extcon)
return extcon_get_state(info->id_extcon, EXTCON_USB_HOST) <= 0;
/* We cannot access the id-pin, see what mode the AML code has set */
role = usb_role_switch_get_role(info->role_sw);
return role != USB_ROLE_HOST;
}
static void axp288_usb_role_work(struct work_struct *work)
{
struct axp288_extcon_info *info =
container_of(work, struct axp288_extcon_info, role_work);
enum usb_role role;
bool id_pin;
int ret;
id_pin = axp288_get_id_pin(info);
if (!id_pin)
role = USB_ROLE_HOST;
else if (info->vbus_attach)
role = USB_ROLE_DEVICE;
else
role = USB_ROLE_NONE;
ret = usb_role_switch_set_role(info->role_sw, role);
if (ret)
dev_err(info->dev, "failed to set role: %d\n", ret);
}
static bool axp288_get_vbus_attach(struct axp288_extcon_info *info)
{
int ret, pwr_stat;
ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
if (ret < 0) {
dev_err(info->dev, "failed to read vbus status\n");
return false;
}
return !!(pwr_stat & PS_STAT_VBUS_VALID);
}
static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
{
int ret, stat, cfg;
u8 chrg_type;
unsigned int cable = info->previous_cable;
bool vbus_attach = false;
ret = iosf_mbi_block_punit_i2c_access();
if (ret < 0)
return ret;
vbus_attach = axp288_get_vbus_attach(info);
if (!vbus_attach)
goto no_vbus;
/* Check charger detection completion status */
ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
if (ret < 0)
goto dev_det_ret;
if (cfg & BC_GLOBAL_DET_STAT) {
dev_dbg(info->dev, "can't complete the charger detection\n");
goto dev_det_ret;
}
ret = regmap_read(info->regmap, AXP288_BC_DET_STAT_REG, &stat);
if (ret < 0)
goto dev_det_ret;
chrg_type = (stat & DET_STAT_MASK) >> DET_STAT_SHIFT;
switch (chrg_type) {
case DET_STAT_SDP:
dev_dbg(info->dev, "sdp cable is connected\n");
cable = EXTCON_CHG_USB_SDP;
break;
case DET_STAT_CDP:
dev_dbg(info->dev, "cdp cable is connected\n");
cable = EXTCON_CHG_USB_CDP;
break;
case DET_STAT_DCP:
dev_dbg(info->dev, "dcp cable is connected\n");
cable = EXTCON_CHG_USB_DCP;
break;
default:
dev_warn(info->dev, "unknown (reserved) bc detect result\n");
cable = EXTCON_CHG_USB_SDP;
}
no_vbus:
iosf_mbi_unblock_punit_i2c_access();
extcon_set_state_sync(info->edev, info->previous_cable, false);
if (info->previous_cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
if (vbus_attach) {
extcon_set_state_sync(info->edev, cable, vbus_attach);
if (cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB,
vbus_attach);
info->previous_cable = cable;
}
if (info->role_sw && info->vbus_attach != vbus_attach) {
info->vbus_attach = vbus_attach;
/* Setting the role can take a while */
queue_work(system_long_wq, &info->role_work);
}
return 0;
dev_det_ret:
iosf_mbi_unblock_punit_i2c_access();
if (ret < 0)
dev_err(info->dev, "failed to detect BC Mod\n");
return ret;
}
static int axp288_extcon_id_evt(struct notifier_block *nb,
unsigned long event, void *param)
{
struct axp288_extcon_info *info =
container_of(nb, struct axp288_extcon_info, id_nb);
/* We may not sleep and setting the role can take a while */
queue_work(system_long_wq, &info->role_work);
return NOTIFY_OK;
}
static irqreturn_t axp288_extcon_isr(int irq, void *data)
{
struct axp288_extcon_info *info = data;
int ret;
ret = axp288_handle_chrg_det_event(info);
if (ret < 0)
dev_err(info->dev, "failed to handle the interrupt\n");
return IRQ_HANDLED;
}
static int axp288_extcon_enable(struct axp288_extcon_info *info)
{
int ret = 0;
ret = iosf_mbi_block_punit_i2c_access();
if (ret < 0)
return ret;
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, 0);
/* Enable the charger detection logic */
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, BC_GLOBAL_RUN);
iosf_mbi_unblock_punit_i2c_access();
return ret;
}
static void axp288_put_role_sw(void *data)
{
struct axp288_extcon_info *info = data;
cancel_work_sync(&info->role_work);
usb_role_switch_put(info->role_sw);
}
static int axp288_extcon_find_role_sw(struct axp288_extcon_info *info)
{
const struct software_node *swnode;
struct fwnode_handle *fwnode;
if (!x86_match_cpu(cherry_trail_cpu_ids))
return 0;
swnode = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
if (!swnode)
return -EPROBE_DEFER;
fwnode = software_node_fwnode(swnode);
info->role_sw = usb_role_switch_find_by_fwnode(fwnode);
fwnode_handle_put(fwnode);
return info->role_sw ? 0 : -EPROBE_DEFER;
}
static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct acpi_device *adev;
int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
INIT_WORK(&info->role_work, axp288_usb_role_work);
info->id_nb.notifier_call = axp288_extcon_id_evt;
platform_set_drvdata(pdev, info);
ret = axp288_extcon_find_role_sw(info);
if (ret)
return ret;
if (info->role_sw) {
ret = devm_add_action_or_reset(dev, axp288_put_role_sw, info);
if (ret)
return ret;
adev = acpi_dev_get_first_match_dev("INT3496", NULL, -1);
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
acpi_dev_put(adev);
if (IS_ERR(info->id_extcon))
return PTR_ERR(info->id_extcon);
dev_info(dev, "controlling USB role\n");
} else {
dev_info(dev, "controlling USB role based on Vbus presence\n");
}
}
ret = iosf_mbi_block_punit_i2c_access();
if (ret < 0)
return ret;
info->vbus_attach = axp288_get_vbus_attach(info);
axp288_extcon_log_rsi(info);
iosf_mbi_unblock_punit_i2c_access();
/* Initialize extcon device */
info->edev = devm_extcon_dev_allocate(&pdev->dev,
axp288_extcon_cables);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return PTR_ERR(info->edev);
}
/* Register extcon device */
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return ret;
}
for (i = 0; i < EXTCON_IRQ_END; i++) {
pirq = platform_get_irq(pdev, i);
if (pirq < 0)
return pirq;
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_err(&pdev->dev,
"failed to get virtual interrupt=%d\n", pirq);
ret = info->irq[i];
return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, info->irq[i],
NULL, axp288_extcon_isr,
IRQF_ONESHOT | IRQF_NO_SUSPEND,
pdev->name, info);
if (ret) {
dev_err(&pdev->dev, "failed to request interrupt=%d\n",
info->irq[i]);
return ret;
}
}
if (info->id_extcon) {
ret = devm_extcon_register_notifier_all(dev, info->id_extcon,
&info->id_nb);
if (ret)
return ret;
}
/* Make sure the role-sw is set correctly before doing BC detection */
if (info->role_sw) {
queue_work(system_long_wq, &info->role_work);
flush_work(&info->role_work);
}
/* Start charger cable type detection */
ret = axp288_extcon_enable(info);
if (ret < 0)
return ret;
device_init_wakeup(dev, true);
platform_set_drvdata(pdev, info);
return 0;
}
static int __maybe_unused axp288_extcon_suspend(struct device *dev)
{
struct axp288_extcon_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(info->irq[VBUS_RISING_IRQ]);
return 0;
}
static int __maybe_unused axp288_extcon_resume(struct device *dev)
{
struct axp288_extcon_info *info = dev_get_drvdata(dev);
/*
* Wakeup when a charger is connected to do charger-type
* connection and generate an extcon event which makes the
* axp288 charger driver set the input current limit.
*/
if (device_may_wakeup(dev))
disable_irq_wake(info->irq[VBUS_RISING_IRQ]);
return 0;
}
static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend,
axp288_extcon_resume);
static const struct platform_device_id axp288_extcon_table[] = {
{ .name = "axp288_extcon" },
{},
};
MODULE_DEVICE_TABLE(platform, axp288_extcon_table);
static struct platform_driver axp288_extcon_driver = {
.probe = axp288_extcon_probe,
.id_table = axp288_extcon_table,
.driver = {
.name = "axp288_extcon",
.pm = &axp288_extcon_pm_ops,
},
};
module_platform_driver(axp288_extcon_driver);
MODULE_AUTHOR("Ramakrishna Pallala <[email protected]>");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_DESCRIPTION("X-Powers AXP288 extcon driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-axp288.c |
// SPDX-License-Identifier: GPL-2.0
// ChromeOS Embedded Controller extcon
//
// Copyright (C) 2017 Google, Inc.
// Author: Benson Leung <[email protected]>
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched.h>
struct cros_ec_extcon_info {
struct device *dev;
struct extcon_dev *edev;
int port_id;
struct cros_ec_device *ec;
struct notifier_block notifier;
unsigned int dr; /* data role */
bool pr; /* power role (true if VBUS enabled) */
bool dp; /* DisplayPort enabled */
bool mux; /* SuperSpeed (usb3) enabled */
unsigned int power_type;
};
static const unsigned int usb_type_c_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_DISP_DP,
EXTCON_NONE,
};
enum usb_data_roles {
DR_NONE,
DR_HOST,
DR_DEVICE,
};
/**
* cros_ec_pd_command() - Send a command to the EC.
* @info: pointer to struct cros_ec_extcon_info
* @command: EC command
* @version: EC command version
* @outdata: EC command output data
* @outsize: Size of outdata
* @indata: EC command input data
* @insize: Size of indata
*
* Return: 0 on success, <0 on failure.
*/
static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
unsigned int command,
unsigned int version,
void *outdata,
unsigned int outsize,
void *indata,
unsigned int insize)
{
struct cros_ec_command *msg;
int ret;
msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->version = version;
msg->command = command;
msg->outsize = outsize;
msg->insize = insize;
if (outsize)
memcpy(msg->data, outdata, outsize);
ret = cros_ec_cmd_xfer_status(info->ec, msg);
if (ret >= 0 && insize)
memcpy(indata, msg->data, insize);
kfree(msg);
return ret;
}
/**
* cros_ec_usb_get_power_type() - Get power type info about PD device attached
* to given port.
* @info: pointer to struct cros_ec_extcon_info
*
* Return: power type on success, <0 on failure.
*/
static int cros_ec_usb_get_power_type(struct cros_ec_extcon_info *info)
{
struct ec_params_usb_pd_power_info req;
struct ec_response_usb_pd_power_info resp;
int ret;
req.port = info->port_id;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_POWER_INFO, 0,
&req, sizeof(req), &resp, sizeof(resp));
if (ret < 0)
return ret;
return resp.type;
}
/**
* cros_ec_usb_get_pd_mux_state() - Get PD mux state for given port.
* @info: pointer to struct cros_ec_extcon_info
*
* Return: PD mux state on success, <0 on failure.
*/
static int cros_ec_usb_get_pd_mux_state(struct cros_ec_extcon_info *info)
{
struct ec_params_usb_pd_mux_info req;
struct ec_response_usb_pd_mux_info resp;
int ret;
req.port = info->port_id;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_MUX_INFO, 0,
&req, sizeof(req),
&resp, sizeof(resp));
if (ret < 0)
return ret;
return resp.flags;
}
/**
* cros_ec_usb_get_role() - Get role info about possible PD device attached to a
* given port.
* @info: pointer to struct cros_ec_extcon_info
* @polarity: pointer to cable polarity (return value)
*
* Return: role info on success, -ENOTCONN if no cable is connected, <0 on
* failure.
*/
static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
bool *polarity)
{
struct ec_params_usb_pd_control pd_control;
struct ec_response_usb_pd_control_v1 resp;
int ret;
pd_control.port = info->port_id;
pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
pd_control.swap = USB_PD_CTRL_SWAP_NONE;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
&pd_control, sizeof(pd_control),
&resp, sizeof(resp));
if (ret < 0)
return ret;
if (!(resp.enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
return -ENOTCONN;
*polarity = resp.polarity;
return resp.role;
}
/**
* cros_ec_pd_get_num_ports() - Get number of EC charge ports.
* @info: pointer to struct cros_ec_extcon_info
*
* Return: number of ports on success, <0 on failure.
*/
static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
{
struct ec_response_usb_pd_ports resp;
int ret;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_PORTS,
0, NULL, 0, &resp, sizeof(resp));
if (ret < 0)
return ret;
return resp.num_ports;
}
static const char *cros_ec_usb_role_string(unsigned int role)
{
return role == DR_NONE ? "DISCONNECTED" :
(role == DR_HOST ? "DFP" : "UFP");
}
static const char *cros_ec_usb_power_type_string(unsigned int type)
{
switch (type) {
case USB_CHG_TYPE_NONE:
return "USB_CHG_TYPE_NONE";
case USB_CHG_TYPE_PD:
return "USB_CHG_TYPE_PD";
case USB_CHG_TYPE_PROPRIETARY:
return "USB_CHG_TYPE_PROPRIETARY";
case USB_CHG_TYPE_C:
return "USB_CHG_TYPE_C";
case USB_CHG_TYPE_BC12_DCP:
return "USB_CHG_TYPE_BC12_DCP";
case USB_CHG_TYPE_BC12_CDP:
return "USB_CHG_TYPE_BC12_CDP";
case USB_CHG_TYPE_BC12_SDP:
return "USB_CHG_TYPE_BC12_SDP";
case USB_CHG_TYPE_OTHER:
return "USB_CHG_TYPE_OTHER";
case USB_CHG_TYPE_VBUS:
return "USB_CHG_TYPE_VBUS";
case USB_CHG_TYPE_UNKNOWN:
return "USB_CHG_TYPE_UNKNOWN";
default:
return "USB_CHG_TYPE_UNKNOWN";
}
}
static bool cros_ec_usb_power_type_is_wall_wart(unsigned int type,
unsigned int role)
{
switch (type) {
/* FIXME : Guppy, Donnettes, and other chargers will be miscategorized
* because they identify with USB_CHG_TYPE_C, but we can't return true
* here from that code because that breaks Suzy-Q and other kinds of
* USB Type-C cables and peripherals.
*/
case USB_CHG_TYPE_PROPRIETARY:
case USB_CHG_TYPE_BC12_DCP:
return true;
case USB_CHG_TYPE_PD:
case USB_CHG_TYPE_C:
case USB_CHG_TYPE_BC12_CDP:
case USB_CHG_TYPE_BC12_SDP:
case USB_CHG_TYPE_OTHER:
case USB_CHG_TYPE_VBUS:
case USB_CHG_TYPE_UNKNOWN:
case USB_CHG_TYPE_NONE:
default:
return false;
}
}
static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
bool force)
{
struct device *dev = info->dev;
int role, power_type;
unsigned int dr = DR_NONE;
bool pr = false;
bool polarity = false;
bool dp = false;
bool mux = false;
bool hpd = false;
power_type = cros_ec_usb_get_power_type(info);
if (power_type < 0) {
dev_err(dev, "failed getting power type err = %d\n",
power_type);
return power_type;
}
role = cros_ec_usb_get_role(info, &polarity);
if (role < 0) {
if (role != -ENOTCONN) {
dev_err(dev, "failed getting role err = %d\n", role);
return role;
}
dev_dbg(dev, "disconnected\n");
} else {
int pd_mux_state;
dr = (role & PD_CTRL_RESP_ROLE_DATA) ? DR_HOST : DR_DEVICE;
pr = (role & PD_CTRL_RESP_ROLE_POWER);
pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
if (pd_mux_state < 0)
pd_mux_state = USB_PD_MUX_USB_ENABLED;
dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
dev_dbg(dev,
"connected role 0x%x pwr type %d dr %d pr %d pol %d mux %d dp %d hpd %d\n",
role, power_type, dr, pr, polarity, mux, dp, hpd);
}
/*
* When there is no USB host (e.g. USB PD charger),
* we are not really a UFP for the AP.
*/
if (dr == DR_DEVICE &&
cros_ec_usb_power_type_is_wall_wart(power_type, role))
dr = DR_NONE;
if (force || info->dr != dr || info->pr != pr || info->dp != dp ||
info->mux != mux || info->power_type != power_type) {
bool host_connected = false, device_connected = false;
dev_dbg(dev, "Type/Role switch! type = %s role = %s\n",
cros_ec_usb_power_type_string(power_type),
cros_ec_usb_role_string(dr));
info->dr = dr;
info->pr = pr;
info->dp = dp;
info->mux = mux;
info->power_type = power_type;
if (dr == DR_DEVICE)
device_connected = true;
else if (dr == DR_HOST)
host_connected = true;
extcon_set_state(info->edev, EXTCON_USB, device_connected);
extcon_set_state(info->edev, EXTCON_USB_HOST, host_connected);
extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
extcon_set_property(info->edev, EXTCON_USB,
EXTCON_PROP_USB_VBUS,
(union extcon_property_value)(int)pr);
extcon_set_property(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_VBUS,
(union extcon_property_value)(int)pr);
extcon_set_property(info->edev, EXTCON_USB,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_USB,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD,
(union extcon_property_value)(int)hpd);
extcon_sync(info->edev, EXTCON_USB);
extcon_sync(info->edev, EXTCON_USB_HOST);
extcon_sync(info->edev, EXTCON_DISP_DP);
} else if (hpd) {
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD,
(union extcon_property_value)(int)hpd);
extcon_sync(info->edev, EXTCON_DISP_DP);
}
return 0;
}
static int extcon_cros_ec_event(struct notifier_block *nb,
unsigned long queued_during_suspend,
void *_notify)
{
struct cros_ec_extcon_info *info;
struct cros_ec_device *ec;
u32 host_event;
info = container_of(nb, struct cros_ec_extcon_info, notifier);
ec = info->ec;
host_event = cros_ec_get_host_event(ec);
if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
extcon_cros_ec_detect_cable(info, false);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static int extcon_cros_ec_probe(struct platform_device *pdev)
{
struct cros_ec_extcon_info *info;
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int numports, ret;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->ec = ec;
if (np) {
u32 port;
ret = of_property_read_u32(np, "google,usb-port-id", &port);
if (ret < 0) {
dev_err(dev, "Missing google,usb-port-id property\n");
return ret;
}
info->port_id = port;
} else {
info->port_id = pdev->id;
}
numports = cros_ec_pd_get_num_ports(info);
if (numports < 0) {
dev_err(dev, "failed getting number of ports! ret = %d\n",
numports);
return numports;
}
if (info->port_id >= numports) {
dev_err(dev, "This system only supports %d ports\n", numports);
return -ENODEV;
}
info->edev = devm_extcon_dev_allocate(dev, usb_type_c_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
ret = devm_extcon_dev_register(dev, info->edev);
if (ret < 0) {
dev_err(dev, "failed to register extcon device\n");
return ret;
}
extcon_set_property_capability(info->edev, EXTCON_USB,
EXTCON_PROP_USB_VBUS);
extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_VBUS);
extcon_set_property_capability(info->edev, EXTCON_USB,
EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_USB,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD);
info->dr = DR_NONE;
info->pr = false;
platform_set_drvdata(pdev, info);
/* Get PD events from the EC */
info->notifier.notifier_call = extcon_cros_ec_event;
ret = blocking_notifier_chain_register(&info->ec->event_notifier,
&info->notifier);
if (ret < 0) {
dev_err(dev, "failed to register notifier\n");
return ret;
}
/* Perform initial detection */
ret = extcon_cros_ec_detect_cable(info, true);
if (ret < 0) {
dev_err(dev, "failed to detect initial cable state\n");
goto unregister_notifier;
}
return 0;
unregister_notifier:
blocking_notifier_chain_unregister(&info->ec->event_notifier,
&info->notifier);
return ret;
}
static int extcon_cros_ec_remove(struct platform_device *pdev)
{
struct cros_ec_extcon_info *info = platform_get_drvdata(pdev);
blocking_notifier_chain_unregister(&info->ec->event_notifier,
&info->notifier);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int extcon_cros_ec_suspend(struct device *dev)
{
return 0;
}
static int extcon_cros_ec_resume(struct device *dev)
{
int ret;
struct cros_ec_extcon_info *info = dev_get_drvdata(dev);
ret = extcon_cros_ec_detect_cable(info, true);
if (ret < 0)
dev_err(dev, "failed to detect cable state on resume\n");
return 0;
}
static const struct dev_pm_ops extcon_cros_ec_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(extcon_cros_ec_suspend, extcon_cros_ec_resume)
};
#define DEV_PM_OPS (&extcon_cros_ec_dev_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_OF
static const struct of_device_id extcon_cros_ec_of_match[] = {
{ .compatible = "google,extcon-usbc-cros-ec" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, extcon_cros_ec_of_match);
#endif /* CONFIG_OF */
static struct platform_driver extcon_cros_ec_driver = {
.driver = {
.name = "extcon-usbc-cros-ec",
.of_match_table = of_match_ptr(extcon_cros_ec_of_match),
.pm = DEV_PM_OPS,
},
.remove = extcon_cros_ec_remove,
.probe = extcon_cros_ec_probe,
};
module_platform_driver(extcon_cros_ec_driver);
MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
MODULE_AUTHOR("Benson Leung <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-usbc-cros-ec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Maxim Integrated MAX3355 USB OTG chip extcon driver
*
* Copyright (C) 2014-2015 Cogent Embedded, Inc.
* Author: Sergei Shtylyov <[email protected]>
*/
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
struct max3355_data {
struct extcon_dev *edev;
struct gpio_desc *id_gpiod;
struct gpio_desc *shdn_gpiod;
};
static const unsigned int max3355_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static irqreturn_t max3355_id_irq(int irq, void *dev_id)
{
struct max3355_data *data = dev_id;
int id = gpiod_get_value_cansleep(data->id_gpiod);
if (id) {
/*
* ID = 1 means USB HOST cable detached.
* As we don't have event for USB peripheral cable attached,
* we simulate USB peripheral attach here.
*/
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, false);
extcon_set_state_sync(data->edev, EXTCON_USB, true);
} else {
/*
* ID = 0 means USB HOST cable attached.
* As we don't have event for USB peripheral cable detached,
* we simulate USB peripheral detach here.
*/
extcon_set_state_sync(data->edev, EXTCON_USB, false);
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, true);
}
return IRQ_HANDLED;
}
static int max3355_probe(struct platform_device *pdev)
{
struct max3355_data *data;
struct gpio_desc *gpiod;
int irq, err;
data = devm_kzalloc(&pdev->dev, sizeof(struct max3355_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
if (IS_ERR(gpiod)) {
dev_err(&pdev->dev, "failed to get ID_OUT GPIO\n");
return PTR_ERR(gpiod);
}
data->id_gpiod = gpiod;
gpiod = devm_gpiod_get(&pdev->dev, "maxim,shdn", GPIOD_OUT_HIGH);
if (IS_ERR(gpiod)) {
dev_err(&pdev->dev, "failed to get SHDN# GPIO\n");
return PTR_ERR(gpiod);
}
data->shdn_gpiod = gpiod;
data->edev = devm_extcon_dev_allocate(&pdev->dev, max3355_cable);
if (IS_ERR(data->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return PTR_ERR(data->edev);
}
err = devm_extcon_dev_register(&pdev->dev, data->edev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return err;
}
irq = gpiod_to_irq(data->id_gpiod);
if (irq < 0) {
dev_err(&pdev->dev, "failed to translate ID_OUT GPIO to IRQ\n");
return irq;
}
err = devm_request_threaded_irq(&pdev->dev, irq, NULL, max3355_id_irq,
IRQF_ONESHOT | IRQF_NO_SUSPEND |
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
pdev->name, data);
if (err < 0) {
dev_err(&pdev->dev, "failed to request ID_OUT IRQ\n");
return err;
}
platform_set_drvdata(pdev, data);
/* Perform initial detection */
max3355_id_irq(irq, data);
return 0;
}
static int max3355_remove(struct platform_device *pdev)
{
struct max3355_data *data = platform_get_drvdata(pdev);
gpiod_set_value_cansleep(data->shdn_gpiod, 0);
return 0;
}
static const struct of_device_id max3355_match_table[] = {
{ .compatible = "maxim,max3355", },
{ }
};
MODULE_DEVICE_TABLE(of, max3355_match_table);
static struct platform_driver max3355_driver = {
.probe = max3355_probe,
.remove = max3355_remove,
.driver = {
.name = "extcon-max3355",
.of_match_table = max3355_match_table,
},
};
module_platform_driver(max3355_driver);
MODULE_AUTHOR("Sergei Shtylyov <[email protected]>");
MODULE_DESCRIPTION("Maxim MAX3355 extcon driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-max3355.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* extcon-rt8973a.c - Richtek RT8973A extcon driver to support USB switches
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* Author: Chanwoo Choi <[email protected]>
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/extcon-provider.h>
#include "extcon-rt8973a.h"
#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
struct muic_irq {
unsigned int irq;
const char *name;
unsigned int virq;
};
struct reg_data {
u8 reg;
u8 mask;
u8 val;
bool invert;
};
struct rt8973a_muic_info {
struct device *dev;
struct extcon_dev *edev;
struct i2c_client *i2c;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_data;
struct muic_irq *muic_irqs;
unsigned int num_muic_irqs;
int irq;
bool irq_attach;
bool irq_detach;
bool irq_ovp;
bool irq_otp;
struct work_struct irq_work;
struct reg_data *reg_data;
unsigned int num_reg_data;
bool auto_config;
struct mutex mutex;
/*
* Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
struct delayed_work wq_detcable;
};
/* Default value of RT8973A register to bring up MUIC device. */
static struct reg_data rt8973a_reg_data[] = {
{
.reg = RT8973A_REG_CONTROL1,
.mask = RT8973A_REG_CONTROL1_ADC_EN_MASK
| RT8973A_REG_CONTROL1_USB_CHD_EN_MASK
| RT8973A_REG_CONTROL1_CHGTYP_MASK
| RT8973A_REG_CONTROL1_SWITCH_OPEN_MASK
| RT8973A_REG_CONTROL1_AUTO_CONFIG_MASK
| RT8973A_REG_CONTROL1_INTM_MASK,
.val = RT8973A_REG_CONTROL1_ADC_EN_MASK
| RT8973A_REG_CONTROL1_USB_CHD_EN_MASK
| RT8973A_REG_CONTROL1_CHGTYP_MASK,
.invert = false,
},
{ /* sentinel */ }
};
/* List of detectable cables */
static const unsigned int rt8973a_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_JIG,
EXTCON_NONE,
};
/* Define OVP (Over Voltage Protection), OTP (Over Temperature Protection) */
enum rt8973a_event_type {
RT8973A_EVENT_ATTACH = 1,
RT8973A_EVENT_DETACH,
RT8973A_EVENT_OVP,
RT8973A_EVENT_OTP,
};
/* Define supported accessory type */
enum rt8973a_muic_acc_type {
RT8973A_MUIC_ADC_OTG = 0x0,
RT8973A_MUIC_ADC_AUDIO_SEND_END_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S1_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S2_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S3_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S4_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S5_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S6_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S7_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S8_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S9_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S10_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S11_BUTTON,
RT8973A_MUIC_ADC_AUDIO_REMOTE_S12_BUTTON,
RT8973A_MUIC_ADC_RESERVED_ACC_1,
RT8973A_MUIC_ADC_RESERVED_ACC_2,
RT8973A_MUIC_ADC_RESERVED_ACC_3,
RT8973A_MUIC_ADC_RESERVED_ACC_4,
RT8973A_MUIC_ADC_RESERVED_ACC_5,
RT8973A_MUIC_ADC_AUDIO_TYPE2,
RT8973A_MUIC_ADC_PHONE_POWERED_DEV,
RT8973A_MUIC_ADC_UNKNOWN_ACC_1,
RT8973A_MUIC_ADC_UNKNOWN_ACC_2,
RT8973A_MUIC_ADC_TA,
RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB,
RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB,
RT8973A_MUIC_ADC_UNKNOWN_ACC_3,
RT8973A_MUIC_ADC_UNKNOWN_ACC_4,
RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART,
RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART,
RT8973A_MUIC_ADC_UNKNOWN_ACC_5,
RT8973A_MUIC_ADC_OPEN = 0x1f,
/*
* The below accessories has same ADC value (0x1f).
* So, Device type1 is used to separate specific accessory.
*/
/* |---------|--ADC| */
/* | [7:5]|[4:0]| */
RT8973A_MUIC_ADC_USB = 0x3f, /* | 001|11111| */
};
/* List of supported interrupt for RT8973A */
static struct muic_irq rt8973a_muic_irqs[] = {
{ RT8973A_INT1_ATTACH, "muic-attach" },
{ RT8973A_INT1_DETACH, "muic-detach" },
{ RT8973A_INT1_CHGDET, "muic-chgdet" },
{ RT8973A_INT1_DCD_T, "muic-dcd-t" },
{ RT8973A_INT1_OVP, "muic-ovp" },
{ RT8973A_INT1_CONNECT, "muic-connect" },
{ RT8973A_INT1_ADC_CHG, "muic-adc-chg" },
{ RT8973A_INT1_OTP, "muic-otp" },
{ RT8973A_INT2_UVLO, "muic-uvlo" },
{ RT8973A_INT2_POR, "muic-por" },
{ RT8973A_INT2_OTP_FET, "muic-otp-fet" },
{ RT8973A_INT2_OVP_FET, "muic-ovp-fet" },
{ RT8973A_INT2_OCP_LATCH, "muic-ocp-latch" },
{ RT8973A_INT2_OCP, "muic-ocp" },
{ RT8973A_INT2_OVP_OCP, "muic-ovp-ocp" },
};
/* Define interrupt list of RT8973A to register regmap_irq */
static const struct regmap_irq rt8973a_irqs[] = {
/* INT1 interrupts */
{ .reg_offset = 0, .mask = RT8973A_INT1_ATTACH_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_DETACH_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_CHGDET_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_DCD_T_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_OVP_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_CONNECT_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_ADC_CHG_MASK, },
{ .reg_offset = 0, .mask = RT8973A_INT1_OTP_MASK, },
/* INT2 interrupts */
{ .reg_offset = 1, .mask = RT8973A_INT2_UVLOT_MASK,},
{ .reg_offset = 1, .mask = RT8973A_INT2_POR_MASK, },
{ .reg_offset = 1, .mask = RT8973A_INT2_OTP_FET_MASK, },
{ .reg_offset = 1, .mask = RT8973A_INT2_OVP_FET_MASK, },
{ .reg_offset = 1, .mask = RT8973A_INT2_OCP_LATCH_MASK, },
{ .reg_offset = 1, .mask = RT8973A_INT2_OCP_MASK, },
{ .reg_offset = 1, .mask = RT8973A_INT2_OVP_OCP_MASK, },
};
static const struct regmap_irq_chip rt8973a_muic_irq_chip = {
.name = "rt8973a",
.status_base = RT8973A_REG_INT1,
.mask_base = RT8973A_REG_INTM1,
.num_regs = 2,
.irqs = rt8973a_irqs,
.num_irqs = ARRAY_SIZE(rt8973a_irqs),
};
/* Define regmap configuration of RT8973A for I2C communication */
static bool rt8973a_muic_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case RT8973A_REG_INTM1:
case RT8973A_REG_INTM2:
return true;
default:
break;
}
return false;
}
static const struct regmap_config rt8973a_muic_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = rt8973a_muic_volatile_reg,
.max_register = RT8973A_REG_END,
};
/* Change DM_CON/DP_CON/VBUSIN switch according to cable type */
static int rt8973a_muic_set_path(struct rt8973a_muic_info *info,
unsigned int con_sw, bool attached)
{
int ret;
/*
* Don't need to set h/w path according to cable type
* if Auto-configuration mode of CONTROL1 register is true.
*/
if (info->auto_config)
return 0;
if (!attached)
con_sw = DM_DP_SWITCH_UART;
switch (con_sw) {
case DM_DP_SWITCH_OPEN:
case DM_DP_SWITCH_USB:
case DM_DP_SWITCH_UART:
ret = regmap_update_bits(info->regmap, RT8973A_REG_MANUAL_SW1,
RT8973A_REG_MANUAL_SW1_DP_MASK |
RT8973A_REG_MANUAL_SW1_DM_MASK,
con_sw);
if (ret < 0) {
dev_err(info->dev,
"cannot update DM_CON/DP_CON switch\n");
return ret;
}
break;
default:
dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
con_sw);
return -EINVAL;
}
return 0;
}
static int rt8973a_muic_get_cable_type(struct rt8973a_muic_info *info)
{
unsigned int adc, dev1;
int ret, cable_type;
/* Read ADC value according to external cable or button */
ret = regmap_read(info->regmap, RT8973A_REG_ADC, &adc);
if (ret) {
dev_err(info->dev, "failed to read ADC register\n");
return ret;
}
cable_type = adc & RT8973A_REG_ADC_MASK;
/* Read Device 1 reigster to identify correct cable type */
ret = regmap_read(info->regmap, RT8973A_REG_DEV1, &dev1);
if (ret) {
dev_err(info->dev, "failed to read DEV1 register\n");
return ret;
}
switch (adc) {
case RT8973A_MUIC_ADC_OPEN:
if (dev1 & RT8973A_REG_DEV1_USB_MASK)
cable_type = RT8973A_MUIC_ADC_USB;
else if (dev1 & RT8973A_REG_DEV1_DCPORT_MASK)
cable_type = RT8973A_MUIC_ADC_TA;
else
cable_type = RT8973A_MUIC_ADC_OPEN;
break;
default:
break;
}
return cable_type;
}
static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
enum rt8973a_event_type event)
{
static unsigned int prev_cable_type;
unsigned int con_sw = DM_DP_SWITCH_UART;
int ret, cable_type;
unsigned int id;
bool attached = false;
switch (event) {
case RT8973A_EVENT_ATTACH:
cable_type = rt8973a_muic_get_cable_type(info);
attached = true;
break;
case RT8973A_EVENT_DETACH:
cable_type = prev_cable_type;
attached = false;
break;
case RT8973A_EVENT_OVP:
case RT8973A_EVENT_OTP:
dev_warn(info->dev,
"happen Over %s issue. Need to disconnect all cables\n",
event == RT8973A_EVENT_OVP ? "Voltage" : "Temperature");
cable_type = prev_cable_type;
attached = false;
break;
default:
dev_err(info->dev,
"Cannot handle this event (event:%d)\n", event);
return -EINVAL;
}
prev_cable_type = cable_type;
switch (cable_type) {
case RT8973A_MUIC_ADC_OTG:
id = EXTCON_USB_HOST;
con_sw = DM_DP_SWITCH_USB;
break;
case RT8973A_MUIC_ADC_TA:
id = EXTCON_CHG_USB_DCP;
con_sw = DM_DP_SWITCH_OPEN;
break;
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB:
id = EXTCON_JIG;
con_sw = DM_DP_SWITCH_USB;
break;
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART:
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART:
id = EXTCON_JIG;
con_sw = DM_DP_SWITCH_UART;
break;
case RT8973A_MUIC_ADC_USB:
id = EXTCON_USB;
con_sw = DM_DP_SWITCH_USB;
break;
case RT8973A_MUIC_ADC_OPEN:
return 0;
case RT8973A_MUIC_ADC_UNKNOWN_ACC_1:
case RT8973A_MUIC_ADC_UNKNOWN_ACC_2:
case RT8973A_MUIC_ADC_UNKNOWN_ACC_3:
case RT8973A_MUIC_ADC_UNKNOWN_ACC_4:
case RT8973A_MUIC_ADC_UNKNOWN_ACC_5:
dev_warn(info->dev,
"Unknown accessory type (adc:0x%x)\n", cable_type);
return 0;
case RT8973A_MUIC_ADC_AUDIO_SEND_END_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S1_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S2_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S3_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S4_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S5_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S6_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S7_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S8_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S9_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S10_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S11_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_REMOTE_S12_BUTTON:
case RT8973A_MUIC_ADC_AUDIO_TYPE2:
dev_warn(info->dev,
"Audio device/button type (adc:0x%x)\n", cable_type);
return 0;
case RT8973A_MUIC_ADC_RESERVED_ACC_1:
case RT8973A_MUIC_ADC_RESERVED_ACC_2:
case RT8973A_MUIC_ADC_RESERVED_ACC_3:
case RT8973A_MUIC_ADC_RESERVED_ACC_4:
case RT8973A_MUIC_ADC_RESERVED_ACC_5:
case RT8973A_MUIC_ADC_PHONE_POWERED_DEV:
return 0;
default:
dev_err(info->dev,
"Cannot handle this cable_type (adc:0x%x)\n",
cable_type);
return -EINVAL;
}
/* Change internal hardware path(DM_CON/DP_CON) */
ret = rt8973a_muic_set_path(info, con_sw, attached);
if (ret < 0)
return ret;
/* Change the state of external accessory */
extcon_set_state_sync(info->edev, id, attached);
if (id == EXTCON_USB)
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
return 0;
}
static void rt8973a_muic_irq_work(struct work_struct *work)
{
struct rt8973a_muic_info *info = container_of(work,
struct rt8973a_muic_info, irq_work);
int ret = 0;
if (!info->edev)
return;
mutex_lock(&info->mutex);
/* Detect attached or detached cables */
if (info->irq_attach) {
ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_ATTACH);
info->irq_attach = false;
}
if (info->irq_detach) {
ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_DETACH);
info->irq_detach = false;
}
if (info->irq_ovp) {
ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_OVP);
info->irq_ovp = false;
}
if (info->irq_otp) {
ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_OTP);
info->irq_otp = false;
}
if (ret < 0)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
}
static irqreturn_t rt8973a_muic_irq_handler(int irq, void *data)
{
struct rt8973a_muic_info *info = data;
int i, irq_type = -1;
for (i = 0; i < info->num_muic_irqs; i++)
if (irq == info->muic_irqs[i].virq)
irq_type = info->muic_irqs[i].irq;
switch (irq_type) {
case RT8973A_INT1_ATTACH:
info->irq_attach = true;
break;
case RT8973A_INT1_DETACH:
info->irq_detach = true;
break;
case RT8973A_INT1_OVP:
info->irq_ovp = true;
break;
case RT8973A_INT1_OTP:
info->irq_otp = true;
break;
case RT8973A_INT1_CHGDET:
case RT8973A_INT1_DCD_T:
case RT8973A_INT1_CONNECT:
case RT8973A_INT1_ADC_CHG:
case RT8973A_INT2_UVLO:
case RT8973A_INT2_POR:
case RT8973A_INT2_OTP_FET:
case RT8973A_INT2_OVP_FET:
case RT8973A_INT2_OCP_LATCH:
case RT8973A_INT2_OCP:
case RT8973A_INT2_OVP_OCP:
default:
dev_dbg(info->dev,
"Cannot handle this interrupt (%d)\n", irq_type);
break;
}
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static void rt8973a_muic_detect_cable_wq(struct work_struct *work)
{
struct rt8973a_muic_info *info = container_of(to_delayed_work(work),
struct rt8973a_muic_info, wq_detcable);
int ret;
/* Notify the state of connector cable or not */
ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_ATTACH);
if (ret < 0)
dev_warn(info->dev, "failed to detect cable state\n");
}
static void rt8973a_init_dev_type(struct rt8973a_muic_info *info)
{
unsigned int data, vendor_id, version_id;
int i, ret;
/* To test I2C, Print version_id and vendor_id of RT8973A */
ret = regmap_read(info->regmap, RT8973A_REG_DEVICE_ID, &data);
if (ret) {
dev_err(info->dev,
"failed to read DEVICE_ID register: %d\n", ret);
return;
}
vendor_id = ((data & RT8973A_REG_DEVICE_ID_VENDOR_MASK) >>
RT8973A_REG_DEVICE_ID_VENDOR_SHIFT);
version_id = ((data & RT8973A_REG_DEVICE_ID_VERSION_MASK) >>
RT8973A_REG_DEVICE_ID_VERSION_SHIFT);
dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
version_id, vendor_id);
/* Initiazle the register of RT8973A device to bring-up */
for (i = 0; i < info->num_reg_data; i++) {
u8 reg = info->reg_data[i].reg;
u8 mask = info->reg_data[i].mask;
u8 val = 0;
if (info->reg_data[i].invert)
val = ~info->reg_data[i].val;
else
val = info->reg_data[i].val;
regmap_update_bits(info->regmap, reg, mask, val);
}
/* Check whether RT8973A is auto switching mode or not */
ret = regmap_read(info->regmap, RT8973A_REG_CONTROL1, &data);
if (ret) {
dev_err(info->dev,
"failed to read CONTROL1 register: %d\n", ret);
return;
}
data &= RT8973A_REG_CONTROL1_AUTO_CONFIG_MASK;
if (data) {
info->auto_config = true;
dev_info(info->dev,
"Enable Auto-configuration for internal path\n");
}
}
static int rt8973a_muic_i2c_probe(struct i2c_client *i2c)
{
struct device_node *np = i2c->dev.of_node;
struct rt8973a_muic_info *info;
int i, ret, irq_flags;
if (!np)
return -EINVAL;
info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
i2c_set_clientdata(i2c, info);
info->dev = &i2c->dev;
info->i2c = i2c;
info->irq = i2c->irq;
info->muic_irqs = rt8973a_muic_irqs;
info->num_muic_irqs = ARRAY_SIZE(rt8973a_muic_irqs);
info->reg_data = rt8973a_reg_data;
info->num_reg_data = ARRAY_SIZE(rt8973a_reg_data);
mutex_init(&info->mutex);
INIT_WORK(&info->irq_work, rt8973a_muic_irq_work);
info->regmap = devm_regmap_init_i2c(i2c, &rt8973a_muic_regmap_config);
if (IS_ERR(info->regmap)) {
ret = PTR_ERR(info->regmap);
dev_err(info->dev, "failed to allocate register map: %d\n",
ret);
return ret;
}
/* Support irq domain for RT8973A MUIC device */
irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED;
ret = regmap_add_irq_chip(info->regmap, info->irq, irq_flags, 0,
&rt8973a_muic_irq_chip, &info->irq_data);
if (ret != 0) {
dev_err(info->dev, "failed to add irq_chip (irq:%d, err:%d)\n",
info->irq, ret);
return ret;
}
for (i = 0; i < info->num_muic_irqs; i++) {
struct muic_irq *muic_irq = &info->muic_irqs[i];
int virq = 0;
virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
if (virq <= 0)
return -EINVAL;
muic_irq->virq = virq;
ret = devm_request_threaded_irq(info->dev, virq, NULL,
rt8973a_muic_irq_handler,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
muic_irq->name, info);
if (ret) {
dev_err(info->dev,
"failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
}
/* Allocate extcon device */
info->edev = devm_extcon_dev_allocate(info->dev, rt8973a_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(info->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
/* Register extcon device */
ret = devm_extcon_dev_register(info->dev, info->edev);
if (ret) {
dev_err(info->dev, "failed to register extcon device\n");
return ret;
}
/*
* Detect accessory after completing the initialization of platform
*
* - Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, rt8973a_muic_detect_cable_wq);
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
msecs_to_jiffies(DELAY_MS_DEFAULT));
/* Initialize RT8973A device and print vendor id and version id */
rt8973a_init_dev_type(info);
return 0;
}
static void rt8973a_muic_i2c_remove(struct i2c_client *i2c)
{
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
regmap_del_irq_chip(info->irq, info->irq_data);
}
static const struct of_device_id rt8973a_dt_match[] = {
{ .compatible = "richtek,rt8973a-muic" },
{ },
};
MODULE_DEVICE_TABLE(of, rt8973a_dt_match);
#ifdef CONFIG_PM_SLEEP
static int rt8973a_muic_suspend(struct device *dev)
{
struct i2c_client *i2c = to_i2c_client(dev);
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
enable_irq_wake(info->irq);
return 0;
}
static int rt8973a_muic_resume(struct device *dev)
{
struct i2c_client *i2c = to_i2c_client(dev);
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
disable_irq_wake(info->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(rt8973a_muic_pm_ops,
rt8973a_muic_suspend, rt8973a_muic_resume);
static const struct i2c_device_id rt8973a_i2c_id[] = {
{ "rt8973a", TYPE_RT8973A },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt8973a_i2c_id);
static struct i2c_driver rt8973a_muic_i2c_driver = {
.driver = {
.name = "rt8973a",
.pm = &rt8973a_muic_pm_ops,
.of_match_table = rt8973a_dt_match,
},
.probe = rt8973a_muic_i2c_probe,
.remove = rt8973a_muic_i2c_remove,
.id_table = rt8973a_i2c_id,
};
static int __init rt8973a_muic_i2c_init(void)
{
return i2c_add_driver(&rt8973a_muic_i2c_driver);
}
subsys_initcall(rt8973a_muic_i2c_init);
MODULE_DESCRIPTION("Richtek RT8973A Extcon driver");
MODULE_AUTHOR("Chanwoo Choi <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/extcon/extcon-rt8973a.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* extcon_gpio.c - Single-state GPIO extcon driver based on extcon class
*
* Copyright (C) 2008 Google, Inc.
* Author: Mike Lockwood <[email protected]>
*
* Modified by MyungJoo Ham <[email protected]> to support extcon
* (originally switch class is supported)
*/
#include <linux/devm-helpers.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
/**
* struct gpio_extcon_data - A simple GPIO-controlled extcon device state container.
* @edev: Extcon device.
* @work: Work fired by the interrupt.
* @debounce_jiffies: Number of jiffies to wait for the GPIO to stabilize, from the debounce
* value.
* @gpiod: GPIO descriptor for this external connector.
* @extcon_id: The unique id of specific external connector.
* @debounce: Debounce time for GPIO IRQ in ms.
* @check_on_resume: Boolean describing whether to check the state of gpio
* while resuming from sleep.
*/
struct gpio_extcon_data {
struct extcon_dev *edev;
struct delayed_work work;
unsigned long debounce_jiffies;
struct gpio_desc *gpiod;
unsigned int extcon_id;
unsigned long debounce;
bool check_on_resume;
};
static void gpio_extcon_work(struct work_struct *work)
{
int state;
struct gpio_extcon_data *data =
container_of(to_delayed_work(work), struct gpio_extcon_data,
work);
state = gpiod_get_value_cansleep(data->gpiod);
extcon_set_state_sync(data->edev, data->extcon_id, state);
}
static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
{
struct gpio_extcon_data *data = dev_id;
queue_delayed_work(system_power_efficient_wq, &data->work,
data->debounce_jiffies);
return IRQ_HANDLED;
}
static int gpio_extcon_probe(struct platform_device *pdev)
{
struct gpio_extcon_data *data;
struct device *dev = &pdev->dev;
unsigned long irq_flags;
int irq;
int ret;
data = devm_kzalloc(dev, sizeof(struct gpio_extcon_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
/*
* FIXME: extcon_id represents the unique identifier of external
* connectors such as EXTCON_USB, EXTCON_DISP_HDMI and so on. extcon_id
* is necessary to register the extcon device. But, it's not yet
* developed to get the extcon id from device-tree or others.
* On later, it have to be solved.
*/
if (data->extcon_id > EXTCON_NONE)
return -EINVAL;
data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
if (IS_ERR(data->gpiod))
return PTR_ERR(data->gpiod);
irq = gpiod_to_irq(data->gpiod);
if (irq <= 0)
return irq;
/*
* It is unlikely that this is an acknowledged interrupt that goes
* away after handling, what we are looking for are falling edges
* if the signal is active low, and rising edges if the signal is
* active high.
*/
if (gpiod_is_active_low(data->gpiod))
irq_flags = IRQF_TRIGGER_FALLING;
else
irq_flags = IRQF_TRIGGER_RISING;
/* Allocate the memory of extcon devie and register extcon device */
data->edev = devm_extcon_dev_allocate(dev, &data->extcon_id);
if (IS_ERR(data->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
ret = devm_extcon_dev_register(dev, data->edev);
if (ret < 0)
return ret;
ret = devm_delayed_work_autocancel(dev, &data->work, gpio_extcon_work);
if (ret)
return ret;
/*
* Request the interrupt of gpio to detect whether external connector
* is attached or detached.
*/
ret = devm_request_any_context_irq(dev, irq,
gpio_irq_handler, irq_flags,
pdev->name, data);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, data);
/* Perform initial detection */
gpio_extcon_work(&data->work.work);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int gpio_extcon_resume(struct device *dev)
{
struct gpio_extcon_data *data;
data = dev_get_drvdata(dev);
if (data->check_on_resume)
queue_delayed_work(system_power_efficient_wq,
&data->work, data->debounce_jiffies);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
static struct platform_driver gpio_extcon_driver = {
.probe = gpio_extcon_probe,
.driver = {
.name = "extcon-gpio",
.pm = &gpio_extcon_pm_ops,
},
};
module_platform_driver(gpio_extcon_driver);
MODULE_AUTHOR("Mike Lockwood <[email protected]>");
MODULE_DESCRIPTION("GPIO extcon driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/extcon/extcon-gpio.c |
// SPDX-License-Identifier: GPL-2.0+
//
// extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
//
// Copyright (C) 2013,2014 Samsung Electronics
// Chanwoo Choi <[email protected]>
// Krzysztof Kozlowski <[email protected]>
#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mfd/max14577.h>
#include <linux/mfd/max14577-private.h>
#include <linux/extcon-provider.h>
#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
enum max14577_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_5MS = 0,
ADC_DEBOUNCE_TIME_10MS,
ADC_DEBOUNCE_TIME_25MS,
ADC_DEBOUNCE_TIME_38_62MS,
};
enum max14577_muic_status {
MAX14577_MUIC_STATUS1 = 0,
MAX14577_MUIC_STATUS2 = 1,
MAX14577_MUIC_STATUS_END,
};
/**
* struct max14577_muic_irq
* @irq: the index of irq list of MUIC device.
* @name: the name of irq.
* @virq: the virtual irq to use irq domain
*/
struct max14577_muic_irq {
unsigned int irq;
const char *name;
unsigned int virq;
};
static struct max14577_muic_irq max14577_muic_irqs[] = {
{ MAX14577_IRQ_INT1_ADC, "muic-ADC" },
{ MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
{ MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
{ MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
{ MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
{ MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
{ MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
{ MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
};
static struct max14577_muic_irq max77836_muic_irqs[] = {
{ MAX14577_IRQ_INT1_ADC, "muic-ADC" },
{ MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
{ MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
{ MAX77836_IRQ_INT1_ADC1K, "muic-ADC1K" },
{ MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
{ MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
{ MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
{ MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
{ MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
{ MAX77836_IRQ_INT2_VIDRM, "muic-VIDRM" },
};
struct max14577_muic_info {
struct device *dev;
struct max14577 *max14577;
struct extcon_dev *edev;
int prev_cable_type;
int prev_chg_type;
u8 status[MAX14577_MUIC_STATUS_END];
struct max14577_muic_irq *muic_irqs;
unsigned int muic_irqs_num;
bool irq_adc;
bool irq_chg;
struct work_struct irq_work;
struct mutex mutex;
/*
* Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
struct delayed_work wq_detcable;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
int path_usb;
int path_uart;
};
enum max14577_muic_cable_group {
MAX14577_CABLE_GROUP_ADC = 0,
MAX14577_CABLE_GROUP_CHG,
};
/* Define supported accessory type */
enum max14577_muic_acc_type {
MAX14577_MUIC_ADC_GROUND = 0x0,
MAX14577_MUIC_ADC_SEND_END_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S1_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S2_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S3_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S4_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S5_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S6_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S7_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S8_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S9_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S10_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S11_BUTTON,
MAX14577_MUIC_ADC_REMOTE_S12_BUTTON,
MAX14577_MUIC_ADC_RESERVED_ACC_1,
MAX14577_MUIC_ADC_RESERVED_ACC_2,
MAX14577_MUIC_ADC_RESERVED_ACC_3,
MAX14577_MUIC_ADC_RESERVED_ACC_4,
MAX14577_MUIC_ADC_RESERVED_ACC_5,
MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2,
MAX14577_MUIC_ADC_PHONE_POWERED_DEV,
MAX14577_MUIC_ADC_TTY_CONVERTER,
MAX14577_MUIC_ADC_UART_CABLE,
MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG,
MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF,
MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON,
MAX14577_MUIC_ADC_AV_CABLE_NOLOAD,
MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG,
MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF,
MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON,
MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1, /* with Remote and Simple Ctrl */
MAX14577_MUIC_ADC_OPEN,
};
static const unsigned int max14577_extcon_cable[] = {
EXTCON_USB,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
EXTCON_CHG_USB_CDP,
EXTCON_JIG,
EXTCON_NONE,
};
/*
* max14577_muic_set_debounce_time - Set the debounce time of ADC
* @info: the instance including private data of max14577 MUIC
* @time: the debounce time of ADC
*/
static int max14577_muic_set_debounce_time(struct max14577_muic_info *info,
enum max14577_muic_adc_debounce_time time)
{
u8 ret;
switch (time) {
case ADC_DEBOUNCE_TIME_5MS:
case ADC_DEBOUNCE_TIME_10MS:
case ADC_DEBOUNCE_TIME_25MS:
case ADC_DEBOUNCE_TIME_38_62MS:
ret = max14577_update_reg(info->max14577->regmap,
MAX14577_MUIC_REG_CONTROL3,
CTRL3_ADCDBSET_MASK,
time << CTRL3_ADCDBSET_SHIFT);
if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
return ret;
}
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
return -EINVAL;
}
return 0;
};
/*
* max14577_muic_set_path - Set hardware line according to attached cable
* @info: the instance including private data of max14577 MUIC
* @value: the path according to attached cable
* @attached: the state of cable (true:attached, false:detached)
*
* The max14577 MUIC device share outside H/W line among a varity of cables
* so, this function set internal path of H/W line according to the type of
* attached cable.
*/
static int max14577_muic_set_path(struct max14577_muic_info *info,
u8 val, bool attached)
{
u8 ctrl1, ctrl2 = 0;
int ret;
/* Set open state to path before changing hw path */
ret = max14577_update_reg(info->max14577->regmap,
MAX14577_MUIC_REG_CONTROL1,
CLEAR_IDBEN_MICEN_MASK, CTRL1_SW_OPEN);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
if (attached)
ctrl1 = val;
else
ctrl1 = CTRL1_SW_OPEN;
ret = max14577_update_reg(info->max14577->regmap,
MAX14577_MUIC_REG_CONTROL1,
CLEAR_IDBEN_MICEN_MASK, ctrl1);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
if (attached)
ctrl2 |= CTRL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
else
ctrl2 |= CTRL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
ret = max14577_update_reg(info->max14577->regmap,
MAX14577_REG_CONTROL2,
CTRL2_LOWPWR_MASK | CTRL2_CPEN_MASK, ctrl2);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
dev_dbg(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
return 0;
}
/*
* max14577_muic_get_cable_type - Return cable type and check cable state
* @info: the instance including private data of max14577 MUIC
* @group: the path according to attached cable
* @attached: store cable state and return
*
* This function check the cable state either attached or detached,
* and then divide precise type of cable according to cable group.
* - max14577_CABLE_GROUP_ADC
* - max14577_CABLE_GROUP_CHG
*/
static int max14577_muic_get_cable_type(struct max14577_muic_info *info,
enum max14577_muic_cable_group group, bool *attached)
{
int cable_type = 0;
int adc;
int chg_type;
switch (group) {
case MAX14577_CABLE_GROUP_ADC:
/*
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
adc = info->status[MAX14577_MUIC_STATUS1] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
* (info->prev_cable_type) for handling cable when cable is
* detached.
*/
if (adc == MAX14577_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_cable_type;
info->prev_cable_type = MAX14577_MUIC_ADC_OPEN;
} else {
*attached = true;
cable_type = info->prev_cable_type = adc;
}
break;
case MAX14577_CABLE_GROUP_CHG:
/*
* Read charger type to check cable type and decide cable state
* according to type of charger cable.
*/
chg_type = info->status[MAX14577_MUIC_STATUS2] &
STATUS2_CHGTYP_MASK;
chg_type >>= STATUS2_CHGTYP_SHIFT;
if (chg_type == MAX14577_CHARGER_TYPE_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX14577_CHARGER_TYPE_NONE;
} else {
*attached = true;
/*
* Check current cable state/cable type and store cable
* type(info->prev_chg_type) for handling cable when
* charger cable is detached.
*/
cable_type = info->prev_chg_type = chg_type;
}
break;
default:
dev_err(info->dev, "Unknown cable group (%d)\n", group);
cable_type = -EINVAL;
break;
}
return cable_type;
}
static int max14577_muic_jig_handler(struct max14577_muic_info *info,
int cable_type, bool attached)
{
int ret = 0;
u8 path = CTRL1_SW_OPEN;
dev_dbg(info->dev,
"external connector is %s (adc:0x%02x)\n",
attached ? "attached" : "detached", cable_type);
switch (cable_type) {
case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
/* PATH:AP_USB */
path = CTRL1_SW_USB;
break;
case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
/* PATH:AP_UART */
path = CTRL1_SW_UART;
break;
default:
dev_err(info->dev, "failed to detect %s jig cable\n",
attached ? "attached" : "detached");
return -EINVAL;
}
ret = max14577_muic_set_path(info, path, attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
static int max14577_muic_adc_handler(struct max14577_muic_info *info)
{
int cable_type;
bool attached;
int ret = 0;
/* Check accessory state which is either detached or attached */
cable_type = max14577_muic_get_cable_type(info,
MAX14577_CABLE_GROUP_ADC, &attached);
dev_dbg(info->dev,
"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
attached ? "attached" : "detached", cable_type,
info->prev_cable_type);
switch (cable_type) {
case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON:
case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF:
/* JIG */
ret = max14577_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX14577_MUIC_ADC_GROUND:
case MAX14577_MUIC_ADC_SEND_END_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S2_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S6_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S8_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S9_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S11_BUTTON:
case MAX14577_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX14577_MUIC_ADC_RESERVED_ACC_1:
case MAX14577_MUIC_ADC_RESERVED_ACC_2:
case MAX14577_MUIC_ADC_RESERVED_ACC_3:
case MAX14577_MUIC_ADC_RESERVED_ACC_4:
case MAX14577_MUIC_ADC_RESERVED_ACC_5:
case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2:
case MAX14577_MUIC_ADC_PHONE_POWERED_DEV:
case MAX14577_MUIC_ADC_TTY_CONVERTER:
case MAX14577_MUIC_ADC_UART_CABLE:
case MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG:
case MAX14577_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG:
case MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON:
case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1:
/*
* This accessory isn't used in general case if it is specially
* needed to detect additional accessory, should implement
* proper operation when this accessory is attached/detached.
*/
dev_info(info->dev,
"accessory is %s but it isn't used (adc:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s accessory (adc:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EINVAL;
}
return 0;
}
static int max14577_muic_chg_handler(struct max14577_muic_info *info)
{
int chg_type;
bool attached;
int ret = 0;
chg_type = max14577_muic_get_cable_type(info,
MAX14577_CABLE_GROUP_CHG, &attached);
dev_dbg(info->dev,
"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
attached ? "attached" : "detached",
chg_type, info->prev_chg_type);
switch (chg_type) {
case MAX14577_CHARGER_TYPE_USB:
/* PATH:AP_USB */
ret = max14577_muic_set_path(info, info->path_usb, attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB, attached);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX14577_CHARGER_TYPE_SPECIAL_1A:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
case MAX14577_CHARGER_TYPE_NONE:
case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
break;
default:
dev_err(info->dev,
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
return -EINVAL;
}
return 0;
}
static void max14577_muic_irq_work(struct work_struct *work)
{
struct max14577_muic_info *info = container_of(work,
struct max14577_muic_info, irq_work);
int ret = 0;
if (!info->edev)
return;
mutex_lock(&info->mutex);
ret = max14577_bulk_read(info->max14577->regmap,
MAX14577_MUIC_REG_STATUS1, info->status, 2);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
mutex_unlock(&info->mutex);
return;
}
if (info->irq_adc) {
ret = max14577_muic_adc_handler(info);
info->irq_adc = false;
}
if (info->irq_chg) {
ret = max14577_muic_chg_handler(info);
info->irq_chg = false;
}
if (ret < 0)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
}
/*
* Sets irq_adc or irq_chg in max14577_muic_info and returns 1.
* Returns 0 if irq_type does not match registered IRQ for this device type.
*/
static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type)
{
switch (irq_type) {
case MAX14577_IRQ_INT1_ADC:
case MAX14577_IRQ_INT1_ADCLOW:
case MAX14577_IRQ_INT1_ADCERR:
/*
* Handle all of accessory except for
* type of charger accessory.
*/
info->irq_adc = true;
return 1;
case MAX14577_IRQ_INT2_CHGTYP:
case MAX14577_IRQ_INT2_CHGDETRUN:
case MAX14577_IRQ_INT2_DCDTMR:
case MAX14577_IRQ_INT2_DBCHG:
case MAX14577_IRQ_INT2_VBVOLT:
/* Handle charger accessory */
info->irq_chg = true;
return 1;
default:
return 0;
}
}
/*
* Sets irq_adc or irq_chg in max14577_muic_info and returns 1.
* Returns 0 if irq_type does not match registered IRQ for this device type.
*/
static int max77836_parse_irq(struct max14577_muic_info *info, int irq_type)
{
/* First check common max14577 interrupts */
if (max14577_parse_irq(info, irq_type))
return 1;
switch (irq_type) {
case MAX77836_IRQ_INT1_ADC1K:
info->irq_adc = true;
return 1;
case MAX77836_IRQ_INT2_VIDRM:
/* Handle charger accessory */
info->irq_chg = true;
return 1;
default:
return 0;
}
}
static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
{
struct max14577_muic_info *info = data;
int i, irq_type = -1;
bool irq_parsed;
/*
* We may be called multiple times for different nested IRQ-s.
* Including changes in INT1_ADC and INT2_CGHTYP at once.
* However we only need to know whether it was ADC, charger
* or both interrupts so decode IRQ and turn on proper flags.
*/
for (i = 0; i < info->muic_irqs_num; i++)
if (irq == info->muic_irqs[i].virq)
irq_type = info->muic_irqs[i].irq;
switch (info->max14577->dev_type) {
case MAXIM_DEVICE_TYPE_MAX77836:
irq_parsed = max77836_parse_irq(info, irq_type);
break;
case MAXIM_DEVICE_TYPE_MAX14577:
default:
irq_parsed = max14577_parse_irq(info, irq_type);
break;
}
if (!irq_parsed) {
dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n",
irq_type);
return IRQ_HANDLED;
}
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static int max14577_muic_detect_accessory(struct max14577_muic_info *info)
{
int ret = 0;
int adc;
int chg_type;
bool attached;
mutex_lock(&info->mutex);
/* Read STATUSx register to detect accessory */
ret = max14577_bulk_read(info->max14577->regmap,
MAX14577_MUIC_REG_STATUS1, info->status, 2);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
mutex_unlock(&info->mutex);
return ret;
}
adc = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC,
&attached);
if (attached && adc != MAX14577_MUIC_ADC_OPEN) {
ret = max14577_muic_adc_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect accessory\n");
mutex_unlock(&info->mutex);
return ret;
}
}
chg_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_CHG,
&attached);
if (attached && chg_type != MAX14577_CHARGER_TYPE_NONE) {
ret = max14577_muic_chg_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect charger accessory\n");
mutex_unlock(&info->mutex);
return ret;
}
}
mutex_unlock(&info->mutex);
return 0;
}
static void max14577_muic_detect_cable_wq(struct work_struct *work)
{
struct max14577_muic_info *info = container_of(to_delayed_work(work),
struct max14577_muic_info, wq_detcable);
max14577_muic_detect_accessory(info);
}
static int max14577_muic_probe(struct platform_device *pdev)
{
struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
struct max14577_muic_info *info;
int delay_jiffies;
int cable_type;
bool attached;
int ret;
int i;
u8 id;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->max14577 = max14577;
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
ret = devm_work_autocancel(&pdev->dev, &info->irq_work,
max14577_muic_irq_work);
if (ret)
return ret;
switch (max14577->dev_type) {
case MAXIM_DEVICE_TYPE_MAX77836:
info->muic_irqs = max77836_muic_irqs;
info->muic_irqs_num = ARRAY_SIZE(max77836_muic_irqs);
break;
case MAXIM_DEVICE_TYPE_MAX14577:
default:
info->muic_irqs = max14577_muic_irqs;
info->muic_irqs_num = ARRAY_SIZE(max14577_muic_irqs);
}
/* Support irq domain for max14577 MUIC device */
for (i = 0; i < info->muic_irqs_num; i++) {
struct max14577_muic_irq *muic_irq = &info->muic_irqs[i];
int virq = 0;
virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
if (virq <= 0)
return -EINVAL;
muic_irq->virq = virq;
ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
max14577_muic_irq_handler,
IRQF_NO_SUSPEND,
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
}
/* Initialize extcon device */
info->edev = devm_extcon_dev_allocate(&pdev->dev,
max14577_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return PTR_ERR(info->edev);
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return ret;
}
/* Default h/w line path */
info->path_usb = CTRL1_SW_USB;
info->path_uart = CTRL1_SW_UART;
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
/* Set initial path for UART when JIG is connected to get serial logs */
ret = max14577_bulk_read(info->max14577->regmap,
MAX14577_MUIC_REG_STATUS1, info->status, 2);
if (ret) {
dev_err(info->dev, "Cannot read STATUS registers\n");
return ret;
}
cable_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC,
&attached);
if (attached && cable_type == MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF)
max14577_muic_set_path(info, info->path_uart, true);
/* Check revision number of MUIC device*/
ret = max14577_read_reg(info->max14577->regmap,
MAX14577_REG_DEVICEID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
return ret;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
/* Set ADC debounce time */
max14577_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
/*
* Detect accessory after completing the initialization of platform
*
* - Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
delay_jiffies);
return ret;
}
static const struct platform_device_id max14577_muic_id[] = {
{ "max14577-muic", MAXIM_DEVICE_TYPE_MAX14577, },
{ "max77836-muic", MAXIM_DEVICE_TYPE_MAX77836, },
{ }
};
MODULE_DEVICE_TABLE(platform, max14577_muic_id);
static const struct of_device_id of_max14577_muic_dt_match[] = {
{ .compatible = "maxim,max14577-muic",
.data = (void *)MAXIM_DEVICE_TYPE_MAX14577, },
{ .compatible = "maxim,max77836-muic",
.data = (void *)MAXIM_DEVICE_TYPE_MAX77836, },
{ },
};
MODULE_DEVICE_TABLE(of, of_max14577_muic_dt_match);
static struct platform_driver max14577_muic_driver = {
.driver = {
.name = "max14577-muic",
.of_match_table = of_max14577_muic_dt_match,
},
.probe = max14577_muic_probe,
.id_table = max14577_muic_id,
};
module_platform_driver(max14577_muic_driver);
MODULE_DESCRIPTION("Maxim 14577/77836 Extcon driver");
MODULE_AUTHOR("Chanwoo Choi <[email protected]>, Krzysztof Kozlowski <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:extcon-max14577");
| linux-master | drivers/extcon/extcon-max14577.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/extcon/extcon.c - External Connector (extcon) framework.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <[email protected]>
*
* Copyright (C) 2012 Samsung Electronics
* Author: Donggeun Kim <[email protected]>
* Author: MyungJoo Ham <[email protected]>
*
* based on android/drivers/switch/switch_class.c
* Copyright (C) 2008 Google, Inc.
* Author: Mike Lockwood <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "extcon.h"
#define SUPPORTED_CABLE_MAX 32
static const struct __extcon_info {
unsigned int type;
unsigned int id;
const char *name;
} extcon_info[] = {
[EXTCON_NONE] = {
.type = EXTCON_TYPE_MISC,
.id = EXTCON_NONE,
.name = "NONE",
},
/* USB external connector */
[EXTCON_USB] = {
.type = EXTCON_TYPE_USB,
.id = EXTCON_USB,
.name = "USB",
},
[EXTCON_USB_HOST] = {
.type = EXTCON_TYPE_USB,
.id = EXTCON_USB_HOST,
.name = "USB-HOST",
},
/* Charging external connector */
[EXTCON_CHG_USB_SDP] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_SDP,
.name = "SDP",
},
[EXTCON_CHG_USB_DCP] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_DCP,
.name = "DCP",
},
[EXTCON_CHG_USB_CDP] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_CDP,
.name = "CDP",
},
[EXTCON_CHG_USB_ACA] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_ACA,
.name = "ACA",
},
[EXTCON_CHG_USB_FAST] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_FAST,
.name = "FAST-CHARGER",
},
[EXTCON_CHG_USB_SLOW] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_SLOW,
.name = "SLOW-CHARGER",
},
[EXTCON_CHG_WPT] = {
.type = EXTCON_TYPE_CHG,
.id = EXTCON_CHG_WPT,
.name = "WPT",
},
[EXTCON_CHG_USB_PD] = {
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
.id = EXTCON_CHG_USB_PD,
.name = "PD",
},
/* Jack external connector */
[EXTCON_JACK_MICROPHONE] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_MICROPHONE,
.name = "MICROPHONE",
},
[EXTCON_JACK_HEADPHONE] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_HEADPHONE,
.name = "HEADPHONE",
},
[EXTCON_JACK_LINE_IN] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_LINE_IN,
.name = "LINE-IN",
},
[EXTCON_JACK_LINE_OUT] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_LINE_OUT,
.name = "LINE-OUT",
},
[EXTCON_JACK_VIDEO_IN] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_VIDEO_IN,
.name = "VIDEO-IN",
},
[EXTCON_JACK_VIDEO_OUT] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_VIDEO_OUT,
.name = "VIDEO-OUT",
},
[EXTCON_JACK_SPDIF_IN] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_SPDIF_IN,
.name = "SPDIF-IN",
},
[EXTCON_JACK_SPDIF_OUT] = {
.type = EXTCON_TYPE_JACK,
.id = EXTCON_JACK_SPDIF_OUT,
.name = "SPDIF-OUT",
},
/* Display external connector */
[EXTCON_DISP_HDMI] = {
.type = EXTCON_TYPE_DISP,
.id = EXTCON_DISP_HDMI,
.name = "HDMI",
},
[EXTCON_DISP_MHL] = {
.type = EXTCON_TYPE_DISP,
.id = EXTCON_DISP_MHL,
.name = "MHL",
},
[EXTCON_DISP_DVI] = {
.type = EXTCON_TYPE_DISP,
.id = EXTCON_DISP_DVI,
.name = "DVI",
},
[EXTCON_DISP_VGA] = {
.type = EXTCON_TYPE_DISP,
.id = EXTCON_DISP_VGA,
.name = "VGA",
},
[EXTCON_DISP_DP] = {
.type = EXTCON_TYPE_DISP | EXTCON_TYPE_USB,
.id = EXTCON_DISP_DP,
.name = "DP",
},
[EXTCON_DISP_HMD] = {
.type = EXTCON_TYPE_DISP | EXTCON_TYPE_USB,
.id = EXTCON_DISP_HMD,
.name = "HMD",
},
[EXTCON_DISP_CVBS] = {
.type = EXTCON_TYPE_DISP,
.id = EXTCON_DISP_CVBS,
.name = "CVBS",
},
[EXTCON_DISP_EDP] = {
.type = EXTCON_TYPE_DISP,
.id = EXTCON_DISP_EDP,
.name = "EDP",
},
/* Miscellaneous external connector */
[EXTCON_DOCK] = {
.type = EXTCON_TYPE_MISC,
.id = EXTCON_DOCK,
.name = "DOCK",
},
[EXTCON_JIG] = {
.type = EXTCON_TYPE_MISC,
.id = EXTCON_JIG,
.name = "JIG",
},
[EXTCON_MECHANICAL] = {
.type = EXTCON_TYPE_MISC,
.id = EXTCON_MECHANICAL,
.name = "MECHANICAL",
},
{ /* sentinel */ }
};
/**
* struct extcon_cable - An internal data for an external connector.
* @edev: the extcon device
* @cable_index: the index of this cable in the edev
* @attr_g: the attribute group for the cable
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
* @attrs: the array pointing to attr_name and attr_state for attr_g
* @usb_propval: the array of USB connector properties
* @chg_propval: the array of charger connector properties
* @jack_propval: the array of jack connector properties
* @disp_propval: the array of display connector properties
* @usb_bits: the bit array of the USB connector property capabilities
* @chg_bits: the bit array of the charger connector property capabilities
* @jack_bits: the bit array of the jack connector property capabilities
* @disp_bits: the bit array of the display connector property capabilities
*/
struct extcon_cable {
struct extcon_dev *edev;
int cable_index;
struct attribute_group attr_g;
struct device_attribute attr_name;
struct device_attribute attr_state;
struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
union extcon_property_value usb_propval[EXTCON_PROP_USB_CNT];
union extcon_property_value chg_propval[EXTCON_PROP_CHG_CNT];
union extcon_property_value jack_propval[EXTCON_PROP_JACK_CNT];
union extcon_property_value disp_propval[EXTCON_PROP_DISP_CNT];
DECLARE_BITMAP(usb_bits, EXTCON_PROP_USB_CNT);
DECLARE_BITMAP(chg_bits, EXTCON_PROP_CHG_CNT);
DECLARE_BITMAP(jack_bits, EXTCON_PROP_JACK_CNT);
DECLARE_BITMAP(disp_bits, EXTCON_PROP_DISP_CNT);
};
static struct class *extcon_class;
static DEFINE_IDA(extcon_dev_ids);
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
int i;
if (!edev->mutually_exclusive)
return 0;
for (i = 0; edev->mutually_exclusive[i]; i++) {
int weight;
u32 correspondants = new_state & edev->mutually_exclusive[i];
/* calculate the total number of bits set */
weight = hweight32(correspondants);
if (weight > 1)
return i + 1;
}
return 0;
}
static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id)
{
int i;
/* Find the index of extcon cable in edev->supported_cable */
for (i = 0; i < edev->max_supported; i++) {
if (edev->supported_cable[i] == id)
return i;
}
return -EINVAL;
}
static int get_extcon_type(unsigned int prop)
{
switch (prop) {
case EXTCON_PROP_USB_MIN ... EXTCON_PROP_USB_MAX:
return EXTCON_TYPE_USB;
case EXTCON_PROP_CHG_MIN ... EXTCON_PROP_CHG_MAX:
return EXTCON_TYPE_CHG;
case EXTCON_PROP_JACK_MIN ... EXTCON_PROP_JACK_MAX:
return EXTCON_TYPE_JACK;
case EXTCON_PROP_DISP_MIN ... EXTCON_PROP_DISP_MAX:
return EXTCON_TYPE_DISP;
default:
return -EINVAL;
}
}
static bool is_extcon_attached(struct extcon_dev *edev, unsigned int index)
{
return !!(edev->state & BIT(index));
}
static bool is_extcon_changed(struct extcon_dev *edev, int index,
bool new_state)
{
int state = !!(edev->state & BIT(index));
return (state != new_state);
}
static bool is_extcon_property_supported(unsigned int id, unsigned int prop)
{
int type;
/* Check whether the property is supported or not. */
type = get_extcon_type(prop);
if (type < 0)
return false;
/* Check whether a specific extcon id supports the property or not. */
return !!(extcon_info[id].type & type);
}
static int is_extcon_property_capability(struct extcon_dev *edev,
unsigned int id, int index,unsigned int prop)
{
struct extcon_cable *cable;
int type, ret;
/* Check whether the property is supported or not. */
type = get_extcon_type(prop);
if (type < 0)
return type;
cable = &edev->cables[index];
switch (type) {
case EXTCON_TYPE_USB:
ret = test_bit(prop - EXTCON_PROP_USB_MIN, cable->usb_bits);
break;
case EXTCON_TYPE_CHG:
ret = test_bit(prop - EXTCON_PROP_CHG_MIN, cable->chg_bits);
break;
case EXTCON_TYPE_JACK:
ret = test_bit(prop - EXTCON_PROP_JACK_MIN, cable->jack_bits);
break;
case EXTCON_TYPE_DISP:
ret = test_bit(prop - EXTCON_PROP_DISP_MIN, cable->disp_bits);
break;
default:
ret = -EINVAL;
}
return ret;
}
static void init_property(struct extcon_dev *edev, unsigned int id, int index)
{
unsigned int type = extcon_info[id].type;
struct extcon_cable *cable = &edev->cables[index];
if (EXTCON_TYPE_USB & type)
memset(cable->usb_propval, 0, sizeof(cable->usb_propval));
if (EXTCON_TYPE_CHG & type)
memset(cable->chg_propval, 0, sizeof(cable->chg_propval));
if (EXTCON_TYPE_JACK & type)
memset(cable->jack_propval, 0, sizeof(cable->jack_propval));
if (EXTCON_TYPE_DISP & type)
memset(cable->disp_propval, 0, sizeof(cable->disp_propval));
}
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int i, count = 0;
struct extcon_dev *edev = dev_get_drvdata(dev);
if (edev->max_supported == 0)
return sysfs_emit(buf, "%u\n", edev->state);
for (i = 0; i < edev->max_supported; i++) {
count += sysfs_emit_at(buf, count, "%s=%d\n",
extcon_info[edev->supported_cable[i]].name,
!!(edev->state & BIT(i)));
}
return count;
}
static DEVICE_ATTR_RO(state);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct extcon_dev *edev = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", edev->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t cable_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct extcon_cable *cable = container_of(attr, struct extcon_cable,
attr_name);
int i = cable->cable_index;
return sysfs_emit(buf, "%s\n",
extcon_info[cable->edev->supported_cable[i]].name);
}
static ssize_t cable_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct extcon_cable *cable = container_of(attr, struct extcon_cable,
attr_state);
int i = cable->cable_index;
return sysfs_emit(buf, "%d\n",
extcon_get_state(cable->edev, cable->edev->supported_cable[i]));
}
/**
* extcon_sync() - Synchronize the state for an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
*
* Note that this function send a notification in order to synchronize
* the state and property of an external connector.
*
* Returns 0 if success or error number if fail.
*/
int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
char name_buf[120];
char state_buf[120];
char *prop_buf;
char *envp[3];
int env_offset = 0;
int length;
int index;
int state;
unsigned long flags;
if (!edev)
return -EINVAL;
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
spin_lock_irqsave(&edev->lock, flags);
state = !!(edev->state & BIT(index));
spin_unlock_irqrestore(&edev->lock, flags);
/*
* Call functions in a raw notifier chain for the specific one
* external connector.
*/
raw_notifier_call_chain(&edev->nh[index], state, edev);
/*
* Call functions in a raw notifier chain for the all supported
* external connectors.
*/
raw_notifier_call_chain(&edev->nh_all, state, edev);
spin_lock_irqsave(&edev->lock, flags);
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (!prop_buf) {
/* Unlock early before uevent */
spin_unlock_irqrestore(&edev->lock, flags);
dev_err(&edev->dev, "out of memory in extcon_set_state\n");
kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
return -ENOMEM;
}
length = name_show(&edev->dev, NULL, prop_buf);
if (length > 0) {
if (prop_buf[length - 1] == '\n')
prop_buf[length - 1] = 0;
snprintf(name_buf, sizeof(name_buf), "NAME=%s", prop_buf);
envp[env_offset++] = name_buf;
}
length = state_show(&edev->dev, NULL, prop_buf);
if (length > 0) {
if (prop_buf[length - 1] == '\n')
prop_buf[length - 1] = 0;
snprintf(state_buf, sizeof(state_buf), "STATE=%s", prop_buf);
envp[env_offset++] = state_buf;
}
envp[env_offset] = NULL;
/* Unlock early before uevent */
spin_unlock_irqrestore(&edev->lock, flags);
kobject_uevent_env(&edev->dev.kobj, KOBJ_CHANGE, envp);
free_page((unsigned long)prop_buf);
return 0;
}
EXPORT_SYMBOL_GPL(extcon_sync);
/**
* extcon_get_state() - Get the state of an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
*
* Returns 0 if success or error number if fail.
*/
int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
{
int index, state;
unsigned long flags;
if (!edev)
return -EINVAL;
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
spin_lock_irqsave(&edev->lock, flags);
state = is_extcon_attached(edev, index);
spin_unlock_irqrestore(&edev->lock, flags);
return state;
}
EXPORT_SYMBOL_GPL(extcon_get_state);
/**
* extcon_set_state() - Set the state of an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @state: the new state of an external connector.
* the default semantics is true: attached / false: detached.
*
* Note that this function set the state of an external connector without
* a notification. To synchronize the state of an external connector,
* have to use extcon_set_state_sync() and extcon_sync().
*
* Returns 0 if success or error number if fail.
*/
int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state)
{
unsigned long flags;
int index, ret = 0;
if (!edev)
return -EINVAL;
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
spin_lock_irqsave(&edev->lock, flags);
/* Check whether the external connector's state is changed. */
if (!is_extcon_changed(edev, index, state))
goto out;
if (check_mutually_exclusive(edev,
(edev->state & ~BIT(index)) | (state & BIT(index)))) {
ret = -EPERM;
goto out;
}
/*
* Initialize the value of extcon property before setting
* the detached state for an external connector.
*/
if (!state)
init_property(edev, id, index);
/* Update the state for an external connector. */
if (state)
edev->state |= BIT(index);
else
edev->state &= ~(BIT(index));
out:
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_set_state);
/**
* extcon_set_state_sync() - Set the state of an external connector with sync.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @state: the new state of external connector.
* the default semantics is true: attached / false: detached.
*
* Note that this function set the state of external connector
* and synchronize the state by sending a notification.
*
* Returns 0 if success or error number if fail.
*/
int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
{
int ret;
ret = extcon_set_state(edev, id, state);
if (ret < 0)
return ret;
return extcon_sync(edev, id);
}
EXPORT_SYMBOL_GPL(extcon_set_state_sync);
/**
* extcon_get_property() - Get the property value of an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @prop: the property id indicating an extcon property
* @prop_val: the pointer which store the value of extcon property
*
* Note that when getting the property value of external connector,
* the external connector should be attached. If detached state, function
* return 0 without property value. Also, the each property should be
* included in the list of supported properties according to extcon type.
*
* Returns 0 if success or error number if fail.
*/
int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val)
{
struct extcon_cable *cable;
unsigned long flags;
int index, ret = 0;
*prop_val = (union extcon_property_value){0};
if (!edev)
return -EINVAL;
/* Check whether the property is supported or not */
if (!is_extcon_property_supported(id, prop))
return -EINVAL;
/* Find the cable index of external connector by using id */
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
spin_lock_irqsave(&edev->lock, flags);
/* Check whether the property is available or not. */
if (!is_extcon_property_capability(edev, id, index, prop)) {
spin_unlock_irqrestore(&edev->lock, flags);
return -EPERM;
}
/*
* Check whether the external connector is attached.
* If external connector is detached, the user can not
* get the property value.
*/
if (!is_extcon_attached(edev, index)) {
spin_unlock_irqrestore(&edev->lock, flags);
return 0;
}
cable = &edev->cables[index];
/* Get the property value according to extcon type */
switch (prop) {
case EXTCON_PROP_USB_MIN ... EXTCON_PROP_USB_MAX:
*prop_val = cable->usb_propval[prop - EXTCON_PROP_USB_MIN];
break;
case EXTCON_PROP_CHG_MIN ... EXTCON_PROP_CHG_MAX:
*prop_val = cable->chg_propval[prop - EXTCON_PROP_CHG_MIN];
break;
case EXTCON_PROP_JACK_MIN ... EXTCON_PROP_JACK_MAX:
*prop_val = cable->jack_propval[prop - EXTCON_PROP_JACK_MIN];
break;
case EXTCON_PROP_DISP_MIN ... EXTCON_PROP_DISP_MAX:
*prop_val = cable->disp_propval[prop - EXTCON_PROP_DISP_MIN];
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_get_property);
/**
* extcon_set_property() - Set the property value of an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @prop: the property id indicating an extcon property
* @prop_val: the pointer including the new value of extcon property
*
* Note that each property should be included in the list of supported
* properties according to the extcon type.
*
* Returns 0 if success or error number if fail.
*/
int extcon_set_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val)
{
struct extcon_cable *cable;
unsigned long flags;
int index, ret = 0;
if (!edev)
return -EINVAL;
/* Check whether the property is supported or not */
if (!is_extcon_property_supported(id, prop))
return -EINVAL;
/* Find the cable index of external connector by using id */
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
spin_lock_irqsave(&edev->lock, flags);
/* Check whether the property is available or not. */
if (!is_extcon_property_capability(edev, id, index, prop)) {
spin_unlock_irqrestore(&edev->lock, flags);
return -EPERM;
}
cable = &edev->cables[index];
/* Set the property value according to extcon type */
switch (prop) {
case EXTCON_PROP_USB_MIN ... EXTCON_PROP_USB_MAX:
cable->usb_propval[prop - EXTCON_PROP_USB_MIN] = prop_val;
break;
case EXTCON_PROP_CHG_MIN ... EXTCON_PROP_CHG_MAX:
cable->chg_propval[prop - EXTCON_PROP_CHG_MIN] = prop_val;
break;
case EXTCON_PROP_JACK_MIN ... EXTCON_PROP_JACK_MAX:
cable->jack_propval[prop - EXTCON_PROP_JACK_MIN] = prop_val;
break;
case EXTCON_PROP_DISP_MIN ... EXTCON_PROP_DISP_MAX:
cable->disp_propval[prop - EXTCON_PROP_DISP_MIN] = prop_val;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_set_property);
/**
* extcon_set_property_sync() - Set property of an external connector with sync.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @prop: the property id indicating an extcon property
* @prop_val: the pointer including the new value of extcon property
*
* Note that when setting the property value of external connector,
* the external connector should be attached. The each property should
* be included in the list of supported properties according to extcon type.
*
* Returns 0 if success or error number if fail.
*/
int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val)
{
int ret;
ret = extcon_set_property(edev, id, prop, prop_val);
if (ret < 0)
return ret;
return extcon_sync(edev, id);
}
EXPORT_SYMBOL_GPL(extcon_set_property_sync);
/**
* extcon_get_property_capability() - Get the capability of the property
* for an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @prop: the property id indicating an extcon property
*
* Returns 1 if the property is available or 0 if not available.
*/
int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id,
unsigned int prop)
{
int index;
if (!edev)
return -EINVAL;
/* Check whether the property is supported or not */
if (!is_extcon_property_supported(id, prop))
return -EINVAL;
/* Find the cable index of external connector by using id */
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
return is_extcon_property_capability(edev, id, index, prop);
}
EXPORT_SYMBOL_GPL(extcon_get_property_capability);
/**
* extcon_set_property_capability() - Set the capability of the property
* for an external connector.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @prop: the property id indicating an extcon property
*
* Note that this function set the capability of the property
* for an external connector in order to mark the bit in capability
* bitmap which mean the available state of the property.
*
* Returns 0 if success or error number if fail.
*/
int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
unsigned int prop)
{
struct extcon_cable *cable;
int index, type, ret = 0;
if (!edev)
return -EINVAL;
/* Check whether the property is supported or not. */
if (!is_extcon_property_supported(id, prop))
return -EINVAL;
/* Find the cable index of external connector by using id. */
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
type = get_extcon_type(prop);
if (type < 0)
return type;
cable = &edev->cables[index];
switch (type) {
case EXTCON_TYPE_USB:
__set_bit(prop - EXTCON_PROP_USB_MIN, cable->usb_bits);
break;
case EXTCON_TYPE_CHG:
__set_bit(prop - EXTCON_PROP_CHG_MIN, cable->chg_bits);
break;
case EXTCON_TYPE_JACK:
__set_bit(prop - EXTCON_PROP_JACK_MIN, cable->jack_bits);
break;
case EXTCON_TYPE_DISP:
__set_bit(prop - EXTCON_PROP_DISP_MIN, cable->disp_bits);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(extcon_set_property_capability);
/**
* extcon_get_extcon_dev() - Get the extcon device instance from the name.
* @extcon_name: the extcon name provided with extcon_dev_register()
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
* NOTE: This function returns -EPROBE_DEFER so it may only be called from
* probe() functions.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
struct extcon_dev *sd;
if (!extcon_name)
return ERR_PTR(-EINVAL);
mutex_lock(&extcon_dev_list_lock);
list_for_each_entry(sd, &extcon_dev_list, entry) {
if (!strcmp(sd->name, extcon_name))
goto out;
}
sd = ERR_PTR(-EPROBE_DEFER);
out:
mutex_unlock(&extcon_dev_list_lock);
return sd;
}
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
* extcon_register_notifier() - Register a notifier block to get notified by
* any state changes from the extcon.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @nb: a notifier block to be registered
*
* Note that the second parameter given to the callback of nb (val) is
* the current state of an external connector and the third pameter
* is the pointer of extcon device.
*
* Returns 0 if success or error number if fail.
*/
int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
unsigned long flags;
int ret, idx;
if (!edev || !nb)
return -EINVAL;
idx = find_cable_index_by_id(edev, id);
if (idx < 0)
return idx;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_register(&edev->nh[idx], nb);
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_register_notifier);
/**
* extcon_unregister_notifier() - Unregister a notifier block from the extcon.
* @edev: the extcon device
* @id: the unique id indicating an external connector
* @nb: a notifier block to be registered
*
* Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
unsigned long flags;
int ret, idx;
if (!edev || !nb)
return -EINVAL;
idx = find_cable_index_by_id(edev, id);
if (idx < 0)
return idx;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
/**
* extcon_register_notifier_all() - Register a notifier block for all connectors.
* @edev: the extcon device
* @nb: a notifier block to be registered
*
* Note that this function registers a notifier block in order to receive
* the state change of all supported external connectors from extcon device.
* And the second parameter given to the callback of nb (val) is
* the current state and the third pameter is the pointer of extcon device.
*
* Returns 0 if success or error number if fail.
*/
int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
{
unsigned long flags;
int ret;
if (!edev || !nb)
return -EINVAL;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_register(&edev->nh_all, nb);
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_register_notifier_all);
/**
* extcon_unregister_notifier_all() - Unregister a notifier block from extcon.
* @edev: the extcon device
* @nb: a notifier block to be registered
*
* Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
{
unsigned long flags;
int ret;
if (!edev || !nb)
return -EINVAL;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_unregister(&edev->nh_all, nb);
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_unregister_notifier_all);
static struct attribute *extcon_attrs[] = {
&dev_attr_state.attr,
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(extcon);
static int create_extcon_class(void)
{
if (extcon_class)
return 0;
extcon_class = class_create("extcon");
if (IS_ERR(extcon_class))
return PTR_ERR(extcon_class);
extcon_class->dev_groups = extcon_groups;
return 0;
}
static void extcon_dev_release(struct device *dev)
{
}
static const char *muex_name = "mutually_exclusive";
static void dummy_sysfs_dev_release(struct device *dev)
{
}
/*
* extcon_dev_allocate() - Allocate the memory of extcon device.
* @supported_cable: the array of the supported external connectors
* ending with EXTCON_NONE.
*
* Note that this function allocates the memory for extcon device
* and initialize default setting for the extcon device.
*
* Returns the pointer memory of allocated extcon_dev if success
* or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
{
struct extcon_dev *edev;
if (!supported_cable)
return ERR_PTR(-EINVAL);
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return ERR_PTR(-ENOMEM);
edev->max_supported = 0;
edev->supported_cable = supported_cable;
return edev;
}
/*
* extcon_dev_free() - Free the memory of extcon device.
* @edev: the extcon device
*/
void extcon_dev_free(struct extcon_dev *edev)
{
kfree(edev);
}
EXPORT_SYMBOL_GPL(extcon_dev_free);
/**
* extcon_alloc_cables() - alloc the cables for extcon device
* @edev: extcon device which has cables
*
* Returns 0 if success or error number if fail.
*/
static int extcon_alloc_cables(struct extcon_dev *edev)
{
int index;
char *str;
struct extcon_cable *cable;
if (!edev)
return -EINVAL;
if (!edev->max_supported)
return 0;
edev->cables = kcalloc(edev->max_supported, sizeof(*edev->cables),
GFP_KERNEL);
if (!edev->cables)
return -ENOMEM;
for (index = 0; index < edev->max_supported; index++) {
cable = &edev->cables[index];
str = kasprintf(GFP_KERNEL, "cable.%d", index);
if (!str) {
for (index--; index >= 0; index--) {
cable = &edev->cables[index];
kfree(cable->attr_g.name);
}
kfree(edev->cables);
return -ENOMEM;
}
cable->edev = edev;
cable->cable_index = index;
cable->attrs[0] = &cable->attr_name.attr;
cable->attrs[1] = &cable->attr_state.attr;
cable->attrs[2] = NULL;
cable->attr_g.name = str;
cable->attr_g.attrs = cable->attrs;
sysfs_attr_init(&cable->attr_name.attr);
cable->attr_name.attr.name = "name";
cable->attr_name.attr.mode = 0444;
cable->attr_name.show = cable_name_show;
sysfs_attr_init(&cable->attr_state.attr);
cable->attr_state.attr.name = "state";
cable->attr_state.attr.mode = 0444;
cable->attr_state.show = cable_state_show;
}
return 0;
}
/**
* extcon_alloc_muex() - alloc the mutual exclusive for extcon device
* @edev: extcon device
*
* Returns 0 if success or error number if fail.
*/
static int extcon_alloc_muex(struct extcon_dev *edev)
{
char *name;
int index;
if (!edev)
return -EINVAL;
if (!(edev->max_supported && edev->mutually_exclusive))
return 0;
/* Count the size of mutually_exclusive array */
for (index = 0; edev->mutually_exclusive[index]; index++)
;
edev->attrs_muex = kcalloc(index + 1, sizeof(*edev->attrs_muex),
GFP_KERNEL);
if (!edev->attrs_muex)
return -ENOMEM;
edev->d_attrs_muex = kcalloc(index, sizeof(*edev->d_attrs_muex),
GFP_KERNEL);
if (!edev->d_attrs_muex) {
kfree(edev->attrs_muex);
return -ENOMEM;
}
for (index = 0; edev->mutually_exclusive[index]; index++) {
name = kasprintf(GFP_KERNEL, "0x%x",
edev->mutually_exclusive[index]);
if (!name) {
for (index--; index >= 0; index--)
kfree(edev->d_attrs_muex[index].attr.name);
kfree(edev->d_attrs_muex);
kfree(edev->attrs_muex);
return -ENOMEM;
}
sysfs_attr_init(&edev->d_attrs_muex[index].attr);
edev->d_attrs_muex[index].attr.name = name;
edev->d_attrs_muex[index].attr.mode = 0000;
edev->attrs_muex[index] = &edev->d_attrs_muex[index].attr;
}
edev->attr_g_muex.name = muex_name;
edev->attr_g_muex.attrs = edev->attrs_muex;
return 0;
}
/**
* extcon_alloc_groups() - alloc the groups for extcon device
* @edev: extcon device
*
* Returns 0 if success or error number if fail.
*/
static int extcon_alloc_groups(struct extcon_dev *edev)
{
int index;
if (!edev)
return -EINVAL;
if (!edev->max_supported)
return 0;
edev->extcon_dev_type.groups = kcalloc(edev->max_supported + 2,
sizeof(*edev->extcon_dev_type.groups),
GFP_KERNEL);
if (!edev->extcon_dev_type.groups)
return -ENOMEM;
edev->extcon_dev_type.name = dev_name(&edev->dev);
edev->extcon_dev_type.release = dummy_sysfs_dev_release;
for (index = 0; index < edev->max_supported; index++)
edev->extcon_dev_type.groups[index] = &edev->cables[index].attr_g;
if (edev->mutually_exclusive)
edev->extcon_dev_type.groups[index] = &edev->attr_g_muex;
edev->dev.type = &edev->extcon_dev_type;
return 0;
}
/**
* extcon_dev_register() - Register an new extcon device
* @edev: the extcon device to be registered
*
* Among the members of edev struct, please set the "user initializing data"
* do not set the values of "internal data", which are initialized by
* this function.
*
* Note that before calling this funciton, have to allocate the memory
* of an extcon device by using the extcon_dev_allocate(). And the extcon
* dev should include the supported_cable information.
*
* Returns 0 if success or error number if fail.
*/
int extcon_dev_register(struct extcon_dev *edev)
{
int ret, index;
ret = create_extcon_class();
if (ret < 0)
return ret;
if (!edev || !edev->supported_cable)
return -EINVAL;
for (index = 0; edev->supported_cable[index] != EXTCON_NONE; index++);
edev->max_supported = index;
if (index > SUPPORTED_CABLE_MAX) {
dev_err(&edev->dev,
"exceed the maximum number of supported cables\n");
return -EINVAL;
}
edev->dev.class = extcon_class;
edev->dev.release = extcon_dev_release;
edev->name = dev_name(edev->dev.parent);
if (IS_ERR_OR_NULL(edev->name)) {
dev_err(&edev->dev,
"extcon device name is null\n");
return -EINVAL;
}
ret = ida_alloc(&extcon_dev_ids, GFP_KERNEL);
if (ret < 0)
return ret;
edev->id = ret;
dev_set_name(&edev->dev, "extcon%d", edev->id);
ret = extcon_alloc_cables(edev);
if (ret < 0)
goto err_alloc_cables;
ret = extcon_alloc_muex(edev);
if (ret < 0)
goto err_alloc_muex;
ret = extcon_alloc_groups(edev);
if (ret < 0)
goto err_alloc_groups;
spin_lock_init(&edev->lock);
if (edev->max_supported) {
edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
GFP_KERNEL);
if (!edev->nh) {
ret = -ENOMEM;
goto err_alloc_nh;
}
}
for (index = 0; index < edev->max_supported; index++)
RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]);
RAW_INIT_NOTIFIER_HEAD(&edev->nh_all);
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
ret = device_register(&edev->dev);
if (ret) {
put_device(&edev->dev);
goto err_dev;
}
mutex_lock(&extcon_dev_list_lock);
list_add(&edev->entry, &extcon_dev_list);
mutex_unlock(&extcon_dev_list_lock);
return 0;
err_dev:
if (edev->max_supported)
kfree(edev->nh);
err_alloc_nh:
if (edev->max_supported)
kfree(edev->extcon_dev_type.groups);
err_alloc_groups:
if (edev->max_supported && edev->mutually_exclusive) {
for (index = 0; edev->mutually_exclusive[index]; index++)
kfree(edev->d_attrs_muex[index].attr.name);
kfree(edev->d_attrs_muex);
kfree(edev->attrs_muex);
}
err_alloc_muex:
for (index = 0; index < edev->max_supported; index++)
kfree(edev->cables[index].attr_g.name);
if (edev->max_supported)
kfree(edev->cables);
err_alloc_cables:
ida_free(&extcon_dev_ids, edev->id);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_dev_register);
/**
* extcon_dev_unregister() - Unregister the extcon device.
* @edev: the extcon device to be unregistered.
*
* Note that this does not call kfree(edev) because edev was not allocated
* by this class.
*/
void extcon_dev_unregister(struct extcon_dev *edev)
{
int index;
if (!edev)
return;
mutex_lock(&extcon_dev_list_lock);
list_del(&edev->entry);
mutex_unlock(&extcon_dev_list_lock);
if (!get_device(&edev->dev)) {
dev_err(&edev->dev, "Failed to unregister extcon_dev\n");
return;
}
ida_free(&extcon_dev_ids, edev->id);
device_unregister(&edev->dev);
if (edev->mutually_exclusive && edev->max_supported) {
for (index = 0; edev->mutually_exclusive[index];
index++)
kfree(edev->d_attrs_muex[index].attr.name);
kfree(edev->d_attrs_muex);
kfree(edev->attrs_muex);
}
for (index = 0; index < edev->max_supported; index++)
kfree(edev->cables[index].attr_g.name);
if (edev->max_supported) {
kfree(edev->extcon_dev_type.groups);
kfree(edev->cables);
kfree(edev->nh);
}
put_device(&edev->dev);
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
#ifdef CONFIG_OF
/*
* extcon_find_edev_by_node - Find the extcon device from devicetree.
* @node : OF node identifying edev
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
{
struct extcon_dev *edev;
mutex_lock(&extcon_dev_list_lock);
list_for_each_entry(edev, &extcon_dev_list, entry)
if (edev->dev.parent && device_match_of_node(edev->dev.parent, node))
goto out;
edev = ERR_PTR(-EPROBE_DEFER);
out:
mutex_unlock(&extcon_dev_list_lock);
return edev;
}
/*
* extcon_get_edev_by_phandle - Get the extcon device from devicetree.
* @dev : the instance to the given device
* @index : the index into list of extcon_dev
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
struct device_node *node, *np = dev_of_node(dev);
struct extcon_dev *edev;
if (!np) {
dev_dbg(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
node = of_parse_phandle(np, "extcon", index);
if (!node) {
dev_dbg(dev, "failed to get phandle in %pOF node\n", np);
return ERR_PTR(-ENODEV);
}
edev = extcon_find_edev_by_node(node);
of_node_put(node);
return edev;
}
#else
struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
{
return ERR_PTR(-ENOSYS);
}
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
return ERR_PTR(-ENOSYS);
}
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(extcon_find_edev_by_node);
EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle);
/**
* extcon_get_edev_name() - Get the name of the extcon device.
* @edev: the extcon device
*/
const char *extcon_get_edev_name(struct extcon_dev *edev)
{
return !edev ? NULL : edev->name;
}
EXPORT_SYMBOL_GPL(extcon_get_edev_name);
static int __init extcon_class_init(void)
{
return create_extcon_class();
}
module_init(extcon_class_init);
static void __exit extcon_class_exit(void)
{
class_destroy(extcon_class);
}
module_exit(extcon_class_exit);
MODULE_AUTHOR("Chanwoo Choi <[email protected]>");
MODULE_AUTHOR("MyungJoo Ham <[email protected]>");
MODULE_DESCRIPTION("External Connector (extcon) framework");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* extcon-sm5502.c - Silicon Mitus SM5502 extcon drvier to support USB switches
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* Author: Chanwoo Choi <[email protected]>
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/extcon-provider.h>
#include "extcon-sm5502.h"
#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
struct muic_irq {
unsigned int irq;
const char *name;
unsigned int virq;
};
struct reg_data {
u8 reg;
unsigned int val;
bool invert;
};
struct sm5502_muic_info {
struct device *dev;
struct extcon_dev *edev;
struct i2c_client *i2c;
struct regmap *regmap;
const struct sm5502_type *type;
struct regmap_irq_chip_data *irq_data;
int irq;
bool irq_attach;
bool irq_detach;
struct work_struct irq_work;
struct mutex mutex;
/*
* Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
struct delayed_work wq_detcable;
};
struct sm5502_type {
struct muic_irq *muic_irqs;
unsigned int num_muic_irqs;
const struct regmap_irq_chip *irq_chip;
struct reg_data *reg_data;
unsigned int num_reg_data;
unsigned int otg_dev_type1;
int (*parse_irq)(struct sm5502_muic_info *info, int irq_type);
};
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
.reg = SM5502_REG_RESET,
.val = SM5502_REG_RESET_MASK,
.invert = true,
}, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
}, {
.reg = SM5502_REG_INTMASK1,
.val = SM5502_REG_INTM1_KP_MASK
| SM5502_REG_INTM1_LKP_MASK
| SM5502_REG_INTM1_LKR_MASK,
.invert = true,
}, {
.reg = SM5502_REG_INTMASK2,
.val = SM5502_REG_INTM2_VBUS_DET_MASK
| SM5502_REG_INTM2_REV_ACCE_MASK
| SM5502_REG_INTM2_ADC_CHG_MASK
| SM5502_REG_INTM2_STUCK_KEY_MASK
| SM5502_REG_INTM2_STUCK_KEY_RCV_MASK
| SM5502_REG_INTM2_MHL_MASK,
.invert = true,
},
};
/* Default value of SM5504 register to bring up MUIC device. */
static struct reg_data sm5504_reg_data[] = {
{
.reg = SM5502_REG_RESET,
.val = SM5502_REG_RESET_MASK,
.invert = true,
}, {
.reg = SM5502_REG_INTMASK1,
.val = SM5504_REG_INTM1_ATTACH_MASK
| SM5504_REG_INTM1_DETACH_MASK,
.invert = false,
}, {
.reg = SM5502_REG_INTMASK2,
.val = SM5504_REG_INTM2_RID_CHG_MASK
| SM5504_REG_INTM2_UVLO_MASK
| SM5504_REG_INTM2_POR_MASK,
.invert = true,
}, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MANUAL_SW_MASK
| SM5504_REG_CONTROL_CHGTYP_MASK
| SM5504_REG_CONTROL_USBCHDEN_MASK
| SM5504_REG_CONTROL_ADC_EN_MASK,
.invert = true,
},
};
/* List of detectable cables */
static const unsigned int sm5502_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_NONE,
};
/* Define supported accessory type */
enum sm5502_muic_acc_type {
SM5502_MUIC_ADC_GROUND = 0x0,
SM5502_MUIC_ADC_SEND_END_BUTTON,
SM5502_MUIC_ADC_REMOTE_S1_BUTTON,
SM5502_MUIC_ADC_REMOTE_S2_BUTTON,
SM5502_MUIC_ADC_REMOTE_S3_BUTTON,
SM5502_MUIC_ADC_REMOTE_S4_BUTTON,
SM5502_MUIC_ADC_REMOTE_S5_BUTTON,
SM5502_MUIC_ADC_REMOTE_S6_BUTTON,
SM5502_MUIC_ADC_REMOTE_S7_BUTTON,
SM5502_MUIC_ADC_REMOTE_S8_BUTTON,
SM5502_MUIC_ADC_REMOTE_S9_BUTTON,
SM5502_MUIC_ADC_REMOTE_S10_BUTTON,
SM5502_MUIC_ADC_REMOTE_S11_BUTTON,
SM5502_MUIC_ADC_REMOTE_S12_BUTTON,
SM5502_MUIC_ADC_RESERVED_ACC_1,
SM5502_MUIC_ADC_RESERVED_ACC_2,
SM5502_MUIC_ADC_RESERVED_ACC_3,
SM5502_MUIC_ADC_RESERVED_ACC_4,
SM5502_MUIC_ADC_RESERVED_ACC_5,
SM5502_MUIC_ADC_AUDIO_TYPE2,
SM5502_MUIC_ADC_PHONE_POWERED_DEV,
SM5502_MUIC_ADC_TTY_CONVERTER,
SM5502_MUIC_ADC_UART_CABLE,
SM5502_MUIC_ADC_TYPE1_CHARGER,
SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB,
SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB,
SM5502_MUIC_ADC_AUDIO_VIDEO_CABLE,
SM5502_MUIC_ADC_TYPE2_CHARGER,
SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART,
SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART,
SM5502_MUIC_ADC_AUDIO_TYPE1,
SM5502_MUIC_ADC_OPEN = 0x1f,
/*
* The below accessories have same ADC value (0x1f or 0x1e).
* So, Device type1 is used to separate specific accessory.
*/
/* |---------|--ADC| */
/* | [7:5]|[4:0]| */
SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */
SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END = 0x5e, /* | 010|11110| */
/* |Dev Type1|--ADC| */
SM5502_MUIC_ADC_GROUND_USB_OTG = 0x80, /* | 100|00000| */
SM5502_MUIC_ADC_OPEN_USB = 0x5f, /* | 010|11111| */
SM5502_MUIC_ADC_OPEN_TA = 0xdf, /* | 110|11111| */
SM5502_MUIC_ADC_OPEN_USB_OTG = 0xff, /* | 111|11111| */
};
/* List of supported interrupt for SM5502 */
static struct muic_irq sm5502_muic_irqs[] = {
{ SM5502_IRQ_INT1_ATTACH, "muic-attach" },
{ SM5502_IRQ_INT1_DETACH, "muic-detach" },
{ SM5502_IRQ_INT1_KP, "muic-kp" },
{ SM5502_IRQ_INT1_LKP, "muic-lkp" },
{ SM5502_IRQ_INT1_LKR, "muic-lkr" },
{ SM5502_IRQ_INT1_OVP_EVENT, "muic-ovp-event" },
{ SM5502_IRQ_INT1_OCP_EVENT, "muic-ocp-event" },
{ SM5502_IRQ_INT1_OVP_OCP_DIS, "muic-ovp-ocp-dis" },
{ SM5502_IRQ_INT2_VBUS_DET, "muic-vbus-det" },
{ SM5502_IRQ_INT2_REV_ACCE, "muic-rev-acce" },
{ SM5502_IRQ_INT2_ADC_CHG, "muic-adc-chg" },
{ SM5502_IRQ_INT2_STUCK_KEY, "muic-stuck-key" },
{ SM5502_IRQ_INT2_STUCK_KEY_RCV, "muic-stuck-key-rcv" },
{ SM5502_IRQ_INT2_MHL, "muic-mhl" },
};
/* Define interrupt list of SM5502 to register regmap_irq */
static const struct regmap_irq sm5502_irqs[] = {
/* INT1 interrupts */
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_ATTACH_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_DETACH_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_KP_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_LKP_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_LKR_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_OVP_EVENT_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_OCP_EVENT_MASK, },
{ .reg_offset = 0, .mask = SM5502_IRQ_INT1_OVP_OCP_DIS_MASK, },
/* INT2 interrupts */
{ .reg_offset = 1, .mask = SM5502_IRQ_INT2_VBUS_DET_MASK,},
{ .reg_offset = 1, .mask = SM5502_IRQ_INT2_REV_ACCE_MASK, },
{ .reg_offset = 1, .mask = SM5502_IRQ_INT2_ADC_CHG_MASK, },
{ .reg_offset = 1, .mask = SM5502_IRQ_INT2_STUCK_KEY_MASK, },
{ .reg_offset = 1, .mask = SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK, },
{ .reg_offset = 1, .mask = SM5502_IRQ_INT2_MHL_MASK, },
};
static const struct regmap_irq_chip sm5502_muic_irq_chip = {
.name = "sm5502",
.status_base = SM5502_REG_INT1,
.mask_base = SM5502_REG_INTMASK1,
.num_regs = 2,
.irqs = sm5502_irqs,
.num_irqs = ARRAY_SIZE(sm5502_irqs),
};
/* List of supported interrupt for SM5504 */
static struct muic_irq sm5504_muic_irqs[] = {
{ SM5504_IRQ_INT1_ATTACH, "muic-attach" },
{ SM5504_IRQ_INT1_DETACH, "muic-detach" },
{ SM5504_IRQ_INT1_CHG_DET, "muic-chg-det" },
{ SM5504_IRQ_INT1_DCD_OUT, "muic-dcd-out" },
{ SM5504_IRQ_INT1_OVP_EVENT, "muic-ovp-event" },
{ SM5504_IRQ_INT1_CONNECT, "muic-connect" },
{ SM5504_IRQ_INT1_ADC_CHG, "muic-adc-chg" },
{ SM5504_IRQ_INT2_RID_CHG, "muic-rid-chg" },
{ SM5504_IRQ_INT2_UVLO, "muic-uvlo" },
{ SM5504_IRQ_INT2_POR, "muic-por" },
{ SM5504_IRQ_INT2_OVP_FET, "muic-ovp-fet" },
{ SM5504_IRQ_INT2_OCP_LATCH, "muic-ocp-latch" },
{ SM5504_IRQ_INT2_OCP_EVENT, "muic-ocp-event" },
{ SM5504_IRQ_INT2_OVP_OCP_EVENT, "muic-ovp-ocp-event" },
};
/* Define interrupt list of SM5504 to register regmap_irq */
static const struct regmap_irq sm5504_irqs[] = {
/* INT1 interrupts */
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_ATTACH_MASK, },
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_DETACH_MASK, },
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_CHG_DET_MASK, },
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_DCD_OUT_MASK, },
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_OVP_MASK, },
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_CONNECT_MASK, },
{ .reg_offset = 0, .mask = SM5504_IRQ_INT1_ADC_CHG_MASK, },
/* INT2 interrupts */
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_RID_CHG_MASK,},
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_UVLO_MASK, },
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_POR_MASK, },
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_OVP_FET_MASK, },
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_OCP_LATCH_MASK, },
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_OCP_EVENT_MASK, },
{ .reg_offset = 1, .mask = SM5504_IRQ_INT2_OVP_OCP_EVENT_MASK, },
};
static const struct regmap_irq_chip sm5504_muic_irq_chip = {
.name = "sm5504",
.status_base = SM5502_REG_INT1,
.mask_base = SM5502_REG_INTMASK1,
.num_regs = 2,
.irqs = sm5504_irqs,
.num_irqs = ARRAY_SIZE(sm5504_irqs),
};
/* Define regmap configuration of SM5502 for I2C communication */
static bool sm5502_muic_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SM5502_REG_INTMASK1:
case SM5502_REG_INTMASK2:
return true;
default:
break;
}
return false;
}
static const struct regmap_config sm5502_muic_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = sm5502_muic_volatile_reg,
.max_register = SM5502_REG_END,
};
/* Change DM_CON/DP_CON/VBUSIN switch according to cable type */
static int sm5502_muic_set_path(struct sm5502_muic_info *info,
unsigned int con_sw, unsigned int vbus_sw,
bool attached)
{
int ret;
if (!attached) {
con_sw = DM_DP_SWITCH_OPEN;
vbus_sw = VBUSIN_SWITCH_OPEN;
}
switch (con_sw) {
case DM_DP_SWITCH_OPEN:
case DM_DP_SWITCH_USB:
case DM_DP_SWITCH_AUDIO:
case DM_DP_SWITCH_UART:
ret = regmap_update_bits(info->regmap, SM5502_REG_MANUAL_SW1,
SM5502_REG_MANUAL_SW1_DP_MASK |
SM5502_REG_MANUAL_SW1_DM_MASK,
con_sw);
if (ret < 0) {
dev_err(info->dev,
"cannot update DM_CON/DP_CON switch\n");
return ret;
}
break;
default:
dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
con_sw);
return -EINVAL;
}
switch (vbus_sw) {
case VBUSIN_SWITCH_OPEN:
case VBUSIN_SWITCH_VBUSOUT:
case VBUSIN_SWITCH_MIC:
case VBUSIN_SWITCH_VBUSOUT_WITH_USB:
ret = regmap_update_bits(info->regmap, SM5502_REG_MANUAL_SW1,
SM5502_REG_MANUAL_SW1_VBUSIN_MASK,
vbus_sw);
if (ret < 0) {
dev_err(info->dev,
"cannot update VBUSIN switch\n");
return ret;
}
break;
default:
dev_err(info->dev, "Unknown VBUS switch type (%d)\n", vbus_sw);
return -EINVAL;
}
return 0;
}
/* Return cable type of attached or detached accessories */
static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
{
unsigned int cable_type, adc, dev_type1;
int ret;
/* Read ADC value according to external cable or button */
ret = regmap_read(info->regmap, SM5502_REG_ADC, &adc);
if (ret) {
dev_err(info->dev, "failed to read ADC register\n");
return ret;
}
/*
* If ADC is SM5502_MUIC_ADC_GROUND(0x0), external cable hasn't
* connected with to MUIC device.
*/
cable_type = adc & SM5502_REG_ADC_MASK;
switch (cable_type) {
case SM5502_MUIC_ADC_GROUND:
ret = regmap_read(info->regmap, SM5502_REG_DEV_TYPE1,
&dev_type1);
if (ret) {
dev_err(info->dev, "failed to read DEV_TYPE1 reg\n");
return ret;
}
if (dev_type1 == info->type->otg_dev_type1) {
cable_type = SM5502_MUIC_ADC_GROUND_USB_OTG;
} else {
dev_dbg(info->dev,
"cannot identify the cable type: adc(0x%x), dev_type1(0x%x)\n",
adc, dev_type1);
return -EINVAL;
}
break;
case SM5502_MUIC_ADC_SEND_END_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S1_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S2_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S3_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S4_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S5_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S6_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S7_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S8_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S9_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S10_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S11_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S12_BUTTON:
case SM5502_MUIC_ADC_RESERVED_ACC_1:
case SM5502_MUIC_ADC_RESERVED_ACC_2:
case SM5502_MUIC_ADC_RESERVED_ACC_3:
case SM5502_MUIC_ADC_RESERVED_ACC_4:
case SM5502_MUIC_ADC_RESERVED_ACC_5:
case SM5502_MUIC_ADC_AUDIO_TYPE2:
case SM5502_MUIC_ADC_PHONE_POWERED_DEV:
case SM5502_MUIC_ADC_TTY_CONVERTER:
case SM5502_MUIC_ADC_UART_CABLE:
case SM5502_MUIC_ADC_TYPE1_CHARGER:
case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB:
case SM5502_MUIC_ADC_AUDIO_VIDEO_CABLE:
case SM5502_MUIC_ADC_TYPE2_CHARGER:
case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART:
case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART:
break;
case SM5502_MUIC_ADC_AUDIO_TYPE1:
/*
* Check whether cable type is
* SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE
* or SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END
* by using Button event.
*/
break;
case SM5502_MUIC_ADC_OPEN:
ret = regmap_read(info->regmap, SM5502_REG_DEV_TYPE1,
&dev_type1);
if (ret) {
dev_err(info->dev, "failed to read DEV_TYPE1 reg\n");
return ret;
}
if (dev_type1 == info->type->otg_dev_type1) {
cable_type = SM5502_MUIC_ADC_OPEN_USB_OTG;
break;
}
switch (dev_type1) {
case SM5502_REG_DEV_TYPE1_USB_SDP_MASK:
cable_type = SM5502_MUIC_ADC_OPEN_USB;
break;
case SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK:
cable_type = SM5502_MUIC_ADC_OPEN_TA;
break;
default:
dev_dbg(info->dev,
"cannot identify the cable type: adc(0x%x)\n",
adc);
return -EINVAL;
}
break;
default:
dev_err(info->dev,
"failed to identify the cable type: adc(0x%x)\n", adc);
return -EINVAL;
}
return cable_type;
}
static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
bool attached)
{
static unsigned int prev_cable_type = SM5502_MUIC_ADC_GROUND;
unsigned int cable_type = SM5502_MUIC_ADC_GROUND;
unsigned int con_sw = DM_DP_SWITCH_OPEN;
unsigned int vbus_sw = VBUSIN_SWITCH_OPEN;
unsigned int id;
int ret;
/* Get the type of attached or detached cable */
if (attached)
cable_type = sm5502_muic_get_cable_type(info);
else
cable_type = prev_cable_type;
prev_cable_type = cable_type;
switch (cable_type) {
case SM5502_MUIC_ADC_OPEN_USB:
id = EXTCON_USB;
con_sw = DM_DP_SWITCH_USB;
vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB;
break;
case SM5502_MUIC_ADC_OPEN_TA:
id = EXTCON_CHG_USB_DCP;
con_sw = DM_DP_SWITCH_OPEN;
vbus_sw = VBUSIN_SWITCH_VBUSOUT;
break;
case SM5502_MUIC_ADC_GROUND_USB_OTG:
case SM5502_MUIC_ADC_OPEN_USB_OTG:
id = EXTCON_USB_HOST;
con_sw = DM_DP_SWITCH_USB;
vbus_sw = VBUSIN_SWITCH_OPEN;
break;
default:
dev_dbg(info->dev,
"cannot handle this cable_type (0x%x)\n", cable_type);
return 0;
}
/* Change internal hardware path(DM_CON/DP_CON, VBUSIN) */
ret = sm5502_muic_set_path(info, con_sw, vbus_sw, attached);
if (ret < 0)
return ret;
/* Change the state of external accessory */
extcon_set_state_sync(info->edev, id, attached);
if (id == EXTCON_USB)
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
return 0;
}
static void sm5502_muic_irq_work(struct work_struct *work)
{
struct sm5502_muic_info *info = container_of(work,
struct sm5502_muic_info, irq_work);
int ret = 0;
if (!info->edev)
return;
mutex_lock(&info->mutex);
/* Detect attached or detached cables */
if (info->irq_attach) {
ret = sm5502_muic_cable_handler(info, true);
info->irq_attach = false;
}
if (info->irq_detach) {
ret = sm5502_muic_cable_handler(info, false);
info->irq_detach = false;
}
if (ret < 0)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
}
/*
* Sets irq_attach or irq_detach in sm5502_muic_info and returns 0.
* Returns -ESRCH if irq_type does not match registered IRQ for this dev type.
*/
static int sm5502_parse_irq(struct sm5502_muic_info *info, int irq_type)
{
switch (irq_type) {
case SM5502_IRQ_INT1_ATTACH:
info->irq_attach = true;
break;
case SM5502_IRQ_INT1_DETACH:
info->irq_detach = true;
break;
case SM5502_IRQ_INT1_KP:
case SM5502_IRQ_INT1_LKP:
case SM5502_IRQ_INT1_LKR:
case SM5502_IRQ_INT1_OVP_EVENT:
case SM5502_IRQ_INT1_OCP_EVENT:
case SM5502_IRQ_INT1_OVP_OCP_DIS:
case SM5502_IRQ_INT2_VBUS_DET:
case SM5502_IRQ_INT2_REV_ACCE:
case SM5502_IRQ_INT2_ADC_CHG:
case SM5502_IRQ_INT2_STUCK_KEY:
case SM5502_IRQ_INT2_STUCK_KEY_RCV:
case SM5502_IRQ_INT2_MHL:
default:
break;
}
return 0;
}
static int sm5504_parse_irq(struct sm5502_muic_info *info, int irq_type)
{
switch (irq_type) {
case SM5504_IRQ_INT1_ATTACH:
info->irq_attach = true;
break;
case SM5504_IRQ_INT1_DETACH:
info->irq_detach = true;
break;
case SM5504_IRQ_INT1_CHG_DET:
case SM5504_IRQ_INT1_DCD_OUT:
case SM5504_IRQ_INT1_OVP_EVENT:
case SM5504_IRQ_INT1_CONNECT:
case SM5504_IRQ_INT1_ADC_CHG:
case SM5504_IRQ_INT2_RID_CHG:
case SM5504_IRQ_INT2_UVLO:
case SM5504_IRQ_INT2_POR:
case SM5504_IRQ_INT2_OVP_FET:
case SM5504_IRQ_INT2_OCP_LATCH:
case SM5504_IRQ_INT2_OCP_EVENT:
case SM5504_IRQ_INT2_OVP_OCP_EVENT:
default:
break;
}
return 0;
}
static irqreturn_t sm5502_muic_irq_handler(int irq, void *data)
{
struct sm5502_muic_info *info = data;
int i, irq_type = -1, ret;
for (i = 0; i < info->type->num_muic_irqs; i++)
if (irq == info->type->muic_irqs[i].virq)
irq_type = info->type->muic_irqs[i].irq;
ret = info->type->parse_irq(info, irq_type);
if (ret < 0) {
dev_warn(info->dev, "cannot handle is interrupt:%d\n",
irq_type);
return IRQ_HANDLED;
}
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static void sm5502_muic_detect_cable_wq(struct work_struct *work)
{
struct sm5502_muic_info *info = container_of(to_delayed_work(work),
struct sm5502_muic_info, wq_detcable);
int ret;
/* Notify the state of connector cable or not */
ret = sm5502_muic_cable_handler(info, true);
if (ret < 0)
dev_warn(info->dev, "failed to detect cable state\n");
}
static void sm5502_init_dev_type(struct sm5502_muic_info *info)
{
unsigned int reg_data, vendor_id, version_id;
int i, ret;
/* To test I2C, Print version_id and vendor_id of SM5502 */
ret = regmap_read(info->regmap, SM5502_REG_DEVICE_ID, ®_data);
if (ret) {
dev_err(info->dev,
"failed to read DEVICE_ID register: %d\n", ret);
return;
}
vendor_id = ((reg_data & SM5502_REG_DEVICE_ID_VENDOR_MASK) >>
SM5502_REG_DEVICE_ID_VENDOR_SHIFT);
version_id = ((reg_data & SM5502_REG_DEVICE_ID_VERSION_MASK) >>
SM5502_REG_DEVICE_ID_VERSION_SHIFT);
dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
version_id, vendor_id);
/* Initiazle the register of SM5502 device to bring-up */
for (i = 0; i < info->type->num_reg_data; i++) {
unsigned int val = 0;
if (!info->type->reg_data[i].invert)
val |= ~info->type->reg_data[i].val;
else
val = info->type->reg_data[i].val;
regmap_write(info->regmap, info->type->reg_data[i].reg, val);
}
}
static int sm5022_muic_i2c_probe(struct i2c_client *i2c)
{
struct device_node *np = i2c->dev.of_node;
struct sm5502_muic_info *info;
int i, ret, irq_flags;
if (!np)
return -EINVAL;
info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
i2c_set_clientdata(i2c, info);
info->dev = &i2c->dev;
info->i2c = i2c;
info->irq = i2c->irq;
info->type = device_get_match_data(info->dev);
if (!info->type)
return -EINVAL;
if (!info->type->parse_irq) {
dev_err(info->dev, "parse_irq missing in struct sm5502_type\n");
return -EINVAL;
}
mutex_init(&info->mutex);
INIT_WORK(&info->irq_work, sm5502_muic_irq_work);
info->regmap = devm_regmap_init_i2c(i2c, &sm5502_muic_regmap_config);
if (IS_ERR(info->regmap)) {
ret = PTR_ERR(info->regmap);
dev_err(info->dev, "failed to allocate register map: %d\n",
ret);
return ret;
}
/* Support irq domain for SM5502 MUIC device */
irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED;
ret = devm_regmap_add_irq_chip(info->dev, info->regmap, info->irq,
irq_flags, 0, info->type->irq_chip,
&info->irq_data);
if (ret != 0) {
dev_err(info->dev, "failed to request IRQ %d: %d\n",
info->irq, ret);
return ret;
}
for (i = 0; i < info->type->num_muic_irqs; i++) {
struct muic_irq *muic_irq = &info->type->muic_irqs[i];
int virq = 0;
virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
if (virq <= 0)
return -EINVAL;
muic_irq->virq = virq;
ret = devm_request_threaded_irq(info->dev, virq, NULL,
sm5502_muic_irq_handler,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
muic_irq->name, info);
if (ret) {
dev_err(info->dev,
"failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
}
/* Allocate extcon device */
info->edev = devm_extcon_dev_allocate(info->dev, sm5502_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(info->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
/* Register extcon device */
ret = devm_extcon_dev_register(info->dev, info->edev);
if (ret) {
dev_err(info->dev, "failed to register extcon device\n");
return ret;
}
/*
* Detect accessory after completing the initialization of platform
*
* - Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, sm5502_muic_detect_cable_wq);
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
msecs_to_jiffies(DELAY_MS_DEFAULT));
/* Initialize SM5502 device and print vendor id and version id */
sm5502_init_dev_type(info);
return 0;
}
static const struct sm5502_type sm5502_data = {
.muic_irqs = sm5502_muic_irqs,
.num_muic_irqs = ARRAY_SIZE(sm5502_muic_irqs),
.irq_chip = &sm5502_muic_irq_chip,
.reg_data = sm5502_reg_data,
.num_reg_data = ARRAY_SIZE(sm5502_reg_data),
.otg_dev_type1 = SM5502_REG_DEV_TYPE1_USB_OTG_MASK,
.parse_irq = sm5502_parse_irq,
};
static const struct sm5502_type sm5504_data = {
.muic_irqs = sm5504_muic_irqs,
.num_muic_irqs = ARRAY_SIZE(sm5504_muic_irqs),
.irq_chip = &sm5504_muic_irq_chip,
.reg_data = sm5504_reg_data,
.num_reg_data = ARRAY_SIZE(sm5504_reg_data),
.otg_dev_type1 = SM5504_REG_DEV_TYPE1_USB_OTG_MASK,
.parse_irq = sm5504_parse_irq,
};
static const struct of_device_id sm5502_dt_match[] = {
{ .compatible = "siliconmitus,sm5502-muic", .data = &sm5502_data },
{ .compatible = "siliconmitus,sm5504-muic", .data = &sm5504_data },
{ .compatible = "siliconmitus,sm5703-muic", .data = &sm5502_data },
{ },
};
MODULE_DEVICE_TABLE(of, sm5502_dt_match);
#ifdef CONFIG_PM_SLEEP
static int sm5502_muic_suspend(struct device *dev)
{
struct i2c_client *i2c = to_i2c_client(dev);
struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
enable_irq_wake(info->irq);
return 0;
}
static int sm5502_muic_resume(struct device *dev)
{
struct i2c_client *i2c = to_i2c_client(dev);
struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
disable_irq_wake(info->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
sm5502_muic_suspend, sm5502_muic_resume);
static const struct i2c_device_id sm5502_i2c_id[] = {
{ "sm5502", (kernel_ulong_t)&sm5502_data },
{ "sm5504", (kernel_ulong_t)&sm5504_data },
{ "sm5703-muic", (kernel_ulong_t)&sm5502_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
static struct i2c_driver sm5502_muic_i2c_driver = {
.driver = {
.name = "sm5502",
.pm = &sm5502_muic_pm_ops,
.of_match_table = sm5502_dt_match,
},
.probe = sm5022_muic_i2c_probe,
.id_table = sm5502_i2c_id,
};
static int __init sm5502_muic_i2c_init(void)
{
return i2c_add_driver(&sm5502_muic_i2c_driver);
}
subsys_initcall(sm5502_muic_i2c_init);
MODULE_DESCRIPTION("Silicon Mitus SM5502 Extcon driver");
MODULE_AUTHOR("Chanwoo Choi <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/extcon/extcon-sm5502.c |
// SPDX-License-Identifier: GPL-2.0-only
/**
* extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID
* and VBUS detection based on extcon-usb-gpio.c.
*
* Copyright (C) 2016 Linaro, Ltd.
* Stephen Boyd <[email protected]>
*/
#include <linux/devm-helpers.h>
#include <linux/extcon-provider.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#define USB_ID_DEBOUNCE_MS 5 /* ms */
struct qcom_usb_extcon_info {
struct extcon_dev *edev;
int id_irq;
int vbus_irq;
struct delayed_work wq_detcable;
unsigned long debounce_jiffies;
};
static const unsigned int qcom_usb_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static void qcom_usb_extcon_detect_cable(struct work_struct *work)
{
bool state = false;
int ret;
union extcon_property_value val;
struct qcom_usb_extcon_info *info = container_of(to_delayed_work(work),
struct qcom_usb_extcon_info,
wq_detcable);
if (info->id_irq > 0) {
/* check ID and update cable state */
ret = irq_get_irqchip_state(info->id_irq,
IRQCHIP_STATE_LINE_LEVEL, &state);
if (ret)
return;
if (!state) {
val.intval = true;
extcon_set_property(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_SS, val);
}
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !state);
}
if (info->vbus_irq > 0) {
/* check VBUS and update cable state */
ret = irq_get_irqchip_state(info->vbus_irq,
IRQCHIP_STATE_LINE_LEVEL, &state);
if (ret)
return;
if (state) {
val.intval = true;
extcon_set_property(info->edev, EXTCON_USB,
EXTCON_PROP_USB_SS, val);
}
extcon_set_state_sync(info->edev, EXTCON_USB, state);
}
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
{
struct qcom_usb_extcon_info *info = dev_id;
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
info->debounce_jiffies);
return IRQ_HANDLED;
}
static int qcom_usb_extcon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_usb_extcon_info *info;
int ret;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->edev = devm_extcon_dev_allocate(dev, qcom_usb_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
ret = devm_extcon_dev_register(dev, info->edev);
if (ret < 0) {
dev_err(dev, "failed to register extcon device\n");
return ret;
}
ret = extcon_set_property_capability(info->edev,
EXTCON_USB, EXTCON_PROP_USB_SS);
ret |= extcon_set_property_capability(info->edev,
EXTCON_USB_HOST, EXTCON_PROP_USB_SS);
if (ret) {
dev_err(dev, "failed to register extcon props rc=%d\n",
ret);
return ret;
}
info->debounce_jiffies = msecs_to_jiffies(USB_ID_DEBOUNCE_MS);
ret = devm_delayed_work_autocancel(dev, &info->wq_detcable,
qcom_usb_extcon_detect_cable);
if (ret)
return ret;
info->id_irq = platform_get_irq_byname_optional(pdev, "usb_id");
if (info->id_irq > 0) {
ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
qcom_usb_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
pdev->name, info);
if (ret < 0) {
dev_err(dev, "failed to request handler for ID IRQ\n");
return ret;
}
}
info->vbus_irq = platform_get_irq_byname_optional(pdev, "usb_vbus");
if (info->vbus_irq > 0) {
ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
qcom_usb_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
pdev->name, info);
if (ret < 0) {
dev_err(dev, "failed to request handler for VBUS IRQ\n");
return ret;
}
}
if (info->id_irq < 0 && info->vbus_irq < 0) {
dev_err(dev, "ID and VBUS IRQ not found\n");
return -EINVAL;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(dev, 1);
/* Perform initial detection */
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int qcom_usb_extcon_suspend(struct device *dev)
{
struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
if (device_may_wakeup(dev)) {
if (info->id_irq > 0)
ret = enable_irq_wake(info->id_irq);
if (info->vbus_irq > 0)
ret = enable_irq_wake(info->vbus_irq);
}
return ret;
}
static int qcom_usb_extcon_resume(struct device *dev)
{
struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
if (device_may_wakeup(dev)) {
if (info->id_irq > 0)
ret = disable_irq_wake(info->id_irq);
if (info->vbus_irq > 0)
ret = disable_irq_wake(info->vbus_irq);
}
return ret;
}
#endif
static SIMPLE_DEV_PM_OPS(qcom_usb_extcon_pm_ops,
qcom_usb_extcon_suspend, qcom_usb_extcon_resume);
static const struct of_device_id qcom_usb_extcon_dt_match[] = {
{ .compatible = "qcom,pm8941-misc", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_usb_extcon_dt_match);
static struct platform_driver qcom_usb_extcon_driver = {
.probe = qcom_usb_extcon_probe,
.driver = {
.name = "extcon-pm8941-misc",
.pm = &qcom_usb_extcon_pm_ops,
.of_match_table = qcom_usb_extcon_dt_match,
},
};
module_platform_driver(qcom_usb_extcon_driver);
MODULE_DESCRIPTION("QCOM USB ID extcon driver");
MODULE_AUTHOR("Stephen Boyd <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-qcom-spmi-misc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com
* Author: Roger Quadros <[email protected]>
*/
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mod_devicetable.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
struct usb_extcon_info {
struct device *dev;
struct extcon_dev *edev;
struct gpio_desc *id_gpiod;
struct gpio_desc *vbus_gpiod;
int id_irq;
int vbus_irq;
unsigned long debounce_jiffies;
struct delayed_work wq_detcable;
};
static const unsigned int usb_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
/*
* "USB" = VBUS and "USB-HOST" = !ID, so we have:
* Both "USB" and "USB-HOST" can't be set as active at the
* same time so if "USB-HOST" is active (i.e. ID is 0) we keep "USB" inactive
* even if VBUS is on.
*
* State | ID | VBUS
* ----------------------------------------
* [1] USB | H | H
* [2] none | H | L
* [3] USB-HOST | L | H
* [4] USB-HOST | L | L
*
* In case we have only one of these signals:
* - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
* - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
*/
static void usb_extcon_detect_cable(struct work_struct *work)
{
int id, vbus;
struct usb_extcon_info *info = container_of(to_delayed_work(work),
struct usb_extcon_info,
wq_detcable);
/* check ID and VBUS and update cable state */
id = info->id_gpiod ?
gpiod_get_value_cansleep(info->id_gpiod) : 1;
vbus = info->vbus_gpiod ?
gpiod_get_value_cansleep(info->vbus_gpiod) : id;
/* at first we clean states which are no longer active */
if (id)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
if (!vbus)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
if (!id) {
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
} else {
if (vbus)
extcon_set_state_sync(info->edev, EXTCON_USB, true);
}
}
static irqreturn_t usb_irq_handler(int irq, void *dev_id)
{
struct usb_extcon_info *info = dev_id;
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
info->debounce_jiffies);
return IRQ_HANDLED;
}
static int usb_extcon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct usb_extcon_info *info;
int ret;
if (!np)
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
GPIOD_IN);
if (!info->id_gpiod && !info->vbus_gpiod) {
dev_err(dev, "failed to get gpios\n");
return -ENODEV;
}
if (IS_ERR(info->id_gpiod))
return PTR_ERR(info->id_gpiod);
if (IS_ERR(info->vbus_gpiod))
return PTR_ERR(info->vbus_gpiod);
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
ret = devm_extcon_dev_register(dev, info->edev);
if (ret < 0) {
dev_err(dev, "failed to register extcon device\n");
return ret;
}
if (info->id_gpiod)
ret = gpiod_set_debounce(info->id_gpiod,
USB_GPIO_DEBOUNCE_MS * 1000);
if (!ret && info->vbus_gpiod)
ret = gpiod_set_debounce(info->vbus_gpiod,
USB_GPIO_DEBOUNCE_MS * 1000);
if (ret < 0)
info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
if (info->id_gpiod) {
info->id_irq = gpiod_to_irq(info->id_gpiod);
if (info->id_irq < 0) {
dev_err(dev, "failed to get ID IRQ\n");
return info->id_irq;
}
ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
usb_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
pdev->name, info);
if (ret < 0) {
dev_err(dev, "failed to request handler for ID IRQ\n");
return ret;
}
}
if (info->vbus_gpiod) {
info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
if (info->vbus_irq < 0) {
dev_err(dev, "failed to get VBUS IRQ\n");
return info->vbus_irq;
}
ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
usb_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
pdev->name, info);
if (ret < 0) {
dev_err(dev, "failed to request handler for VBUS IRQ\n");
return ret;
}
}
platform_set_drvdata(pdev, info);
device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
return 0;
}
static int usb_extcon_remove(struct platform_device *pdev)
{
struct usb_extcon_info *info = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&info->wq_detcable);
device_init_wakeup(&pdev->dev, false);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int usb_extcon_suspend(struct device *dev)
{
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
if (device_may_wakeup(dev)) {
if (info->id_gpiod) {
ret = enable_irq_wake(info->id_irq);
if (ret)
return ret;
}
if (info->vbus_gpiod) {
ret = enable_irq_wake(info->vbus_irq);
if (ret) {
if (info->id_gpiod)
disable_irq_wake(info->id_irq);
return ret;
}
}
}
if (!device_may_wakeup(dev))
pinctrl_pm_select_sleep_state(dev);
return ret;
}
static int usb_extcon_resume(struct device *dev)
{
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
if (!device_may_wakeup(dev))
pinctrl_pm_select_default_state(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod) {
ret = disable_irq_wake(info->id_irq);
if (ret)
return ret;
}
if (info->vbus_gpiod) {
ret = disable_irq_wake(info->vbus_irq);
if (ret) {
if (info->id_gpiod)
enable_irq_wake(info->id_irq);
return ret;
}
}
}
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
return ret;
}
#endif
static SIMPLE_DEV_PM_OPS(usb_extcon_pm_ops,
usb_extcon_suspend, usb_extcon_resume);
static const struct of_device_id usb_extcon_dt_match[] = {
{ .compatible = "linux,extcon-usb-gpio", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
static const struct platform_device_id usb_extcon_platform_ids[] = {
{ .name = "extcon-usb-gpio", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
static struct platform_driver usb_extcon_driver = {
.probe = usb_extcon_probe,
.remove = usb_extcon_remove,
.driver = {
.name = "extcon-usb-gpio",
.pm = &usb_extcon_pm_ops,
.of_match_table = usb_extcon_dt_match,
},
.id_table = usb_extcon_platform_ids,
};
module_platform_driver(usb_extcon_driver);
MODULE_AUTHOR("Roger Quadros <[email protected]>");
MODULE_DESCRIPTION("USB GPIO extcon driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-usb-gpio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Palmas USB transceiver driver
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
* Author: Graeme Gregory <[email protected]>
* Author: Kishon Vijay Abraham I <[email protected]>
* Based on twl6030_usb.c
* Author: Hema HK <[email protected]>
*/
#include <linux/devm-helpers.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/mfd/palmas.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/gpio/consumer.h>
#include <linux/workqueue.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
static const unsigned int palmas_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static void palmas_usb_wakeup(struct palmas *palmas, int enable)
{
if (enable)
palmas_write(palmas, PALMAS_USB_OTG_BASE, PALMAS_USB_WAKEUP,
PALMAS_USB_WAKEUP_ID_WK_UP_COMP);
else
palmas_write(palmas, PALMAS_USB_OTG_BASE, PALMAS_USB_WAKEUP, 0);
}
static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
{
struct palmas_usb *palmas_usb = _palmas_usb;
struct extcon_dev *edev = palmas_usb->edev;
unsigned int vbus_line_state;
palmas_read(palmas_usb->palmas, PALMAS_INTERRUPT_BASE,
PALMAS_INT3_LINE_STATE, &vbus_line_state);
if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) {
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
extcon_set_state_sync(edev, EXTCON_USB, true);
dev_dbg(palmas_usb->dev, "USB cable is attached\n");
} else {
dev_dbg(palmas_usb->dev,
"Spurious connect event detected\n");
}
} else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) {
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_state_sync(edev, EXTCON_USB, false);
dev_dbg(palmas_usb->dev, "USB cable is detached\n");
} else {
dev_dbg(palmas_usb->dev,
"Spurious disconnect event detected\n");
}
}
return IRQ_HANDLED;
}
static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
{
unsigned int set, id_src;
struct palmas_usb *palmas_usb = _palmas_usb;
struct extcon_dev *edev = palmas_usb->edev;
palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_SET, &set);
palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_SRC, &id_src);
if ((set & PALMAS_USB_ID_INT_SRC_ID_GND) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
} else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
(!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
}
return IRQ_HANDLED;
}
static void palmas_gpio_id_detect(struct work_struct *work)
{
int id;
struct palmas_usb *palmas_usb = container_of(to_delayed_work(work),
struct palmas_usb,
wq_detectid);
struct extcon_dev *edev = palmas_usb->edev;
if (!palmas_usb->id_gpiod)
return;
id = gpiod_get_value_cansleep(palmas_usb->id_gpiod);
if (id) {
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
} else {
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
}
}
static irqreturn_t palmas_gpio_id_irq_handler(int irq, void *_palmas_usb)
{
struct palmas_usb *palmas_usb = _palmas_usb;
queue_delayed_work(system_power_efficient_wq, &palmas_usb->wq_detectid,
palmas_usb->sw_debounce_jiffies);
return IRQ_HANDLED;
}
static void palmas_enable_irq(struct palmas_usb *palmas_usb)
{
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_VBUS_CTRL_SET,
PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP);
if (palmas_usb->enable_id_detection) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_CTRL_SET,
PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_EN_HI_SET,
PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
}
if (palmas_usb->enable_vbus_detection)
palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
/* cold plug for host mode needs this delay */
if (palmas_usb->enable_id_detection) {
msleep(30);
palmas_id_irq_handler(palmas_usb->id_irq, palmas_usb);
}
}
static int palmas_usb_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct palmas_usb *palmas_usb;
int status;
if (!palmas) {
dev_err(&pdev->dev, "failed to get valid parent\n");
return -EINVAL;
}
palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
if (!palmas_usb)
return -ENOMEM;
if (node && !pdata) {
palmas_usb->wakeup = of_property_read_bool(node, "ti,wakeup");
palmas_usb->enable_id_detection = of_property_read_bool(node,
"ti,enable-id-detection");
palmas_usb->enable_vbus_detection = of_property_read_bool(node,
"ti,enable-vbus-detection");
} else {
palmas_usb->wakeup = true;
palmas_usb->enable_id_detection = true;
palmas_usb->enable_vbus_detection = true;
if (pdata)
palmas_usb->wakeup = pdata->wakeup;
}
palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
GPIOD_IN);
if (IS_ERR(palmas_usb->id_gpiod))
return dev_err_probe(&pdev->dev, PTR_ERR(palmas_usb->id_gpiod),
"failed to get id gpio\n");
palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
GPIOD_IN);
if (IS_ERR(palmas_usb->vbus_gpiod))
return dev_err_probe(&pdev->dev, PTR_ERR(palmas_usb->vbus_gpiod),
"failed to get id gpio\n");
if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
palmas_usb->enable_id_detection = false;
palmas_usb->enable_gpio_id_detection = true;
}
if (palmas_usb->enable_vbus_detection && palmas_usb->vbus_gpiod) {
palmas_usb->enable_vbus_detection = false;
palmas_usb->enable_gpio_vbus_detection = true;
}
if (palmas_usb->enable_gpio_id_detection) {
u32 debounce;
if (of_property_read_u32(node, "debounce-delay-ms", &debounce))
debounce = USB_GPIO_DEBOUNCE_MS;
status = gpiod_set_debounce(palmas_usb->id_gpiod,
debounce * 1000);
if (status < 0)
palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce);
}
status = devm_delayed_work_autocancel(&pdev->dev,
&palmas_usb->wq_detectid,
palmas_gpio_id_detect);
if (status)
return status;
palmas->usb = palmas_usb;
palmas_usb->palmas = palmas;
palmas_usb->dev = &pdev->dev;
palmas_usb_wakeup(palmas, palmas_usb->wakeup);
platform_set_drvdata(pdev, palmas_usb);
palmas_usb->edev = devm_extcon_dev_allocate(&pdev->dev,
palmas_extcon_cable);
if (IS_ERR(palmas_usb->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
if (status) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return status;
}
if (palmas_usb->enable_id_detection) {
palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
PALMAS_ID_OTG_IRQ);
palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
PALMAS_ID_IRQ);
status = devm_request_threaded_irq(palmas_usb->dev,
palmas_usb->id_irq,
NULL, palmas_id_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"palmas_usb_id", palmas_usb);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->id_irq, status);
return status;
}
} else if (palmas_usb->enable_gpio_id_detection) {
palmas_usb->gpio_id_irq = gpiod_to_irq(palmas_usb->id_gpiod);
if (palmas_usb->gpio_id_irq < 0) {
dev_err(&pdev->dev, "failed to get id irq\n");
return palmas_usb->gpio_id_irq;
}
status = devm_request_threaded_irq(&pdev->dev,
palmas_usb->gpio_id_irq,
NULL,
palmas_gpio_id_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
"palmas_usb_id",
palmas_usb);
if (status < 0) {
dev_err(&pdev->dev,
"failed to request handler for id irq\n");
return status;
}
}
if (palmas_usb->enable_vbus_detection) {
palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
PALMAS_VBUS_OTG_IRQ);
palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
PALMAS_VBUS_IRQ);
status = devm_request_threaded_irq(palmas_usb->dev,
palmas_usb->vbus_irq, NULL,
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"palmas_usb_vbus", palmas_usb);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
return status;
}
} else if (palmas_usb->enable_gpio_vbus_detection) {
/* remux GPIO_1 as VBUSDET */
status = palmas_update_bits(palmas,
PALMAS_PU_PD_OD_BASE,
PALMAS_PRIMARY_SECONDARY_PAD1,
PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK,
(1 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT));
if (status < 0) {
dev_err(&pdev->dev, "can't remux GPIO1\n");
return status;
}
palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
PALMAS_VBUS_OTG_IRQ);
palmas_usb->gpio_vbus_irq = gpiod_to_irq(palmas_usb->vbus_gpiod);
if (palmas_usb->gpio_vbus_irq < 0) {
dev_err(&pdev->dev, "failed to get vbus irq\n");
return palmas_usb->gpio_vbus_irq;
}
status = devm_request_threaded_irq(&pdev->dev,
palmas_usb->gpio_vbus_irq,
NULL,
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"palmas_usb_vbus",
palmas_usb);
if (status < 0) {
dev_err(&pdev->dev,
"failed to request handler for vbus irq\n");
return status;
}
}
palmas_enable_irq(palmas_usb);
/* perform initial detection */
if (palmas_usb->enable_gpio_vbus_detection)
palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
device_set_wakeup_capable(&pdev->dev, true);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int palmas_usb_suspend(struct device *dev)
{
struct palmas_usb *palmas_usb = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (palmas_usb->enable_vbus_detection)
enable_irq_wake(palmas_usb->vbus_irq);
if (palmas_usb->enable_gpio_vbus_detection)
enable_irq_wake(palmas_usb->gpio_vbus_irq);
if (palmas_usb->enable_id_detection)
enable_irq_wake(palmas_usb->id_irq);
if (palmas_usb->enable_gpio_id_detection)
enable_irq_wake(palmas_usb->gpio_id_irq);
}
return 0;
}
static int palmas_usb_resume(struct device *dev)
{
struct palmas_usb *palmas_usb = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (palmas_usb->enable_vbus_detection)
disable_irq_wake(palmas_usb->vbus_irq);
if (palmas_usb->enable_gpio_vbus_detection)
disable_irq_wake(palmas_usb->gpio_vbus_irq);
if (palmas_usb->enable_id_detection)
disable_irq_wake(palmas_usb->id_irq);
if (palmas_usb->enable_gpio_id_detection)
disable_irq_wake(palmas_usb->gpio_id_irq);
}
/* check if GPIO states changed while suspend/resume */
if (palmas_usb->enable_gpio_vbus_detection)
palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
return 0;
};
#endif
static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume);
static const struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-usb", },
{ .compatible = "ti,palmas-usb-vid", },
{ .compatible = "ti,twl6035-usb", },
{ .compatible = "ti,twl6035-usb-vid", },
{ /* end */ }
};
static struct platform_driver palmas_usb_driver = {
.probe = palmas_usb_probe,
.driver = {
.name = "palmas-usb",
.of_match_table = of_palmas_match_tbl,
.pm = &palmas_pm_ops,
},
};
module_platform_driver(palmas_usb_driver);
MODULE_ALIAS("platform:palmas-usb");
MODULE_AUTHOR("Graeme Gregory <[email protected]>");
MODULE_DESCRIPTION("Palmas USB transceiver driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, of_palmas_match_tbl);
| linux-master | drivers/extcon/extcon-palmas.c |
// SPDX-License-Identifier: GPL-2.0+
//
// extcon-max77843.c - Maxim MAX77843 extcon driver to support
// MUIC(Micro USB Interface Controller)
//
// Copyright (C) 2015 Samsung Electronics
// Author: Jaewon Kim <[email protected]>
#include <linux/extcon-provider.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#define DELAY_MS_DEFAULT 15000 /* unit: millisecond */
enum max77843_muic_status {
MAX77843_MUIC_STATUS1 = 0,
MAX77843_MUIC_STATUS2,
MAX77843_MUIC_STATUS3,
MAX77843_MUIC_STATUS_NUM,
};
struct max77843_muic_info {
struct device *dev;
struct max77693_dev *max77843;
struct extcon_dev *edev;
struct mutex mutex;
struct work_struct irq_work;
struct delayed_work wq_detcable;
u8 status[MAX77843_MUIC_STATUS_NUM];
int prev_cable_type;
int prev_chg_type;
int prev_gnd_type;
bool irq_adc;
bool irq_chg;
};
enum max77843_muic_cable_group {
MAX77843_CABLE_GROUP_ADC = 0,
MAX77843_CABLE_GROUP_ADC_GND,
MAX77843_CABLE_GROUP_CHG,
};
enum max77843_muic_adc_debounce_time {
MAX77843_DEBOUNCE_TIME_5MS = 0,
MAX77843_DEBOUNCE_TIME_10MS,
MAX77843_DEBOUNCE_TIME_25MS,
MAX77843_DEBOUNCE_TIME_38_62MS,
};
/* Define accessory cable type */
enum max77843_muic_accessory_type {
MAX77843_MUIC_ADC_GROUND = 0,
MAX77843_MUIC_ADC_SEND_END_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S1_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S2_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S3_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S4_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S5_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S6_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S7_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S8_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S9_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S10_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S11_BUTTON,
MAX77843_MUIC_ADC_REMOTE_S12_BUTTON,
MAX77843_MUIC_ADC_RESERVED_ACC_1,
MAX77843_MUIC_ADC_RESERVED_ACC_2,
MAX77843_MUIC_ADC_RESERVED_ACC_3, /* SmartDock */
MAX77843_MUIC_ADC_RESERVED_ACC_4,
MAX77843_MUIC_ADC_RESERVED_ACC_5,
MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2,
MAX77843_MUIC_ADC_PHONE_POWERED_DEV,
MAX77843_MUIC_ADC_TTY_CONVERTER,
MAX77843_MUIC_ADC_UART_CABLE,
MAX77843_MUIC_ADC_CEA936A_TYPE1_CHG,
MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF,
MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON,
MAX77843_MUIC_ADC_AV_CABLE_NOLOAD,
MAX77843_MUIC_ADC_CEA936A_TYPE2_CHG,
MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF,
MAX77843_MUIC_ADC_FACTORY_MODE_UART_ON,
MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
MAX77843_MUIC_ADC_OPEN,
/*
* The below accessories should check
* not only ADC value but also ADC1K and VBVolt value.
*/
/* Offset|ADC1K|VBVolt| */
MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */
MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */
MAX77843_MUIC_GND_MHL = 0x102, /* 0x1| 1| 0| */
MAX77843_MUIC_GND_MHL_VB = 0x103, /* 0x1| 1| 1| */
};
/* Define charger cable type */
enum max77843_muic_charger_type {
MAX77843_MUIC_CHG_NONE = 0,
MAX77843_MUIC_CHG_USB,
MAX77843_MUIC_CHG_DOWNSTREAM,
MAX77843_MUIC_CHG_DEDICATED,
MAX77843_MUIC_CHG_SPECIAL_500MA,
MAX77843_MUIC_CHG_SPECIAL_1A,
MAX77843_MUIC_CHG_SPECIAL_BIAS,
MAX77843_MUIC_CHG_RESERVED,
MAX77843_MUIC_CHG_GND,
MAX77843_MUIC_CHG_DOCK,
};
static const unsigned int max77843_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
EXTCON_DISP_MHL,
EXTCON_DOCK,
EXTCON_JIG,
EXTCON_NONE,
};
struct max77843_muic_irq {
unsigned int irq;
const char *name;
unsigned int virq;
};
static struct max77843_muic_irq max77843_muic_irqs[] = {
{ MAX77843_MUIC_IRQ_INT1_ADC, "MUIC-ADC" },
{ MAX77843_MUIC_IRQ_INT1_ADCERROR, "MUIC-ADC_ERROR" },
{ MAX77843_MUIC_IRQ_INT1_ADC1K, "MUIC-ADC1K" },
{ MAX77843_MUIC_IRQ_INT2_CHGTYP, "MUIC-CHGTYP" },
{ MAX77843_MUIC_IRQ_INT2_CHGDETRUN, "MUIC-CHGDETRUN" },
{ MAX77843_MUIC_IRQ_INT2_DCDTMR, "MUIC-DCDTMR" },
{ MAX77843_MUIC_IRQ_INT2_DXOVP, "MUIC-DXOVP" },
{ MAX77843_MUIC_IRQ_INT2_VBVOLT, "MUIC-VBVOLT" },
{ MAX77843_MUIC_IRQ_INT3_VBADC, "MUIC-VBADC" },
{ MAX77843_MUIC_IRQ_INT3_VDNMON, "MUIC-VDNMON" },
{ MAX77843_MUIC_IRQ_INT3_DNRES, "MUIC-DNRES" },
{ MAX77843_MUIC_IRQ_INT3_MPNACK, "MUIC-MPNACK"},
{ MAX77843_MUIC_IRQ_INT3_MRXBUFOW, "MUIC-MRXBUFOW"},
{ MAX77843_MUIC_IRQ_INT3_MRXTRF, "MUIC-MRXTRF"},
{ MAX77843_MUIC_IRQ_INT3_MRXPERR, "MUIC-MRXPERR"},
{ MAX77843_MUIC_IRQ_INT3_MRXRDY, "MUIC-MRXRDY"},
};
static const struct regmap_config max77843_muic_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX77843_MUIC_REG_END,
};
static const struct regmap_irq max77843_muic_irq[] = {
/* INT1 interrupt */
{ .reg_offset = 0, .mask = MAX77843_MUIC_ADC, },
{ .reg_offset = 0, .mask = MAX77843_MUIC_ADCERROR, },
{ .reg_offset = 0, .mask = MAX77843_MUIC_ADC1K, },
/* INT2 interrupt */
{ .reg_offset = 1, .mask = MAX77843_MUIC_CHGTYP, },
{ .reg_offset = 1, .mask = MAX77843_MUIC_CHGDETRUN, },
{ .reg_offset = 1, .mask = MAX77843_MUIC_DCDTMR, },
{ .reg_offset = 1, .mask = MAX77843_MUIC_DXOVP, },
{ .reg_offset = 1, .mask = MAX77843_MUIC_VBVOLT, },
/* INT3 interrupt */
{ .reg_offset = 2, .mask = MAX77843_MUIC_VBADC, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_VDNMON, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_DNRES, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_MPNACK, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_MRXBUFOW, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_MRXTRF, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_MRXPERR, },
{ .reg_offset = 2, .mask = MAX77843_MUIC_MRXRDY, },
};
static const struct regmap_irq_chip max77843_muic_irq_chip = {
.name = "max77843-muic",
.status_base = MAX77843_MUIC_REG_INT1,
.unmask_base = MAX77843_MUIC_REG_INTMASK1,
.num_regs = 3,
.irqs = max77843_muic_irq,
.num_irqs = ARRAY_SIZE(max77843_muic_irq),
};
static int max77843_muic_set_path(struct max77843_muic_info *info,
u8 val, bool attached, bool nobccomp)
{
struct max77693_dev *max77843 = info->max77843;
int ret = 0;
unsigned int ctrl1, ctrl2;
if (attached)
ctrl1 = val;
else
ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN;
if (nobccomp) {
/* Disable BC1.2 protocol and force manual switch control */
ctrl1 |= MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK;
}
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL1,
MAX77843_MUIC_CONTROL1_COM_SW |
MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK,
ctrl1);
if (ret < 0) {
dev_err(info->dev, "Cannot switch MUIC port\n");
return ret;
}
if (attached)
ctrl2 = MAX77843_MUIC_CONTROL2_CPEN_MASK;
else
ctrl2 = MAX77843_MUIC_CONTROL2_LOWPWR_MASK;
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL2,
MAX77843_MUIC_CONTROL2_LOWPWR_MASK |
MAX77843_MUIC_CONTROL2_CPEN_MASK, ctrl2);
if (ret < 0) {
dev_err(info->dev, "Cannot update lowpower mode\n");
return ret;
}
dev_dbg(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
return 0;
}
static void max77843_charger_set_otg_vbus(struct max77843_muic_info *info,
bool on)
{
struct max77693_dev *max77843 = info->max77843;
unsigned int cnfg00;
if (on)
cnfg00 = MAX77843_CHG_OTG_MASK | MAX77843_CHG_BOOST_MASK;
else
cnfg00 = MAX77843_CHG_ENABLE | MAX77843_CHG_BUCK_MASK;
regmap_update_bits(max77843->regmap_chg, MAX77843_CHG_REG_CHG_CNFG_00,
MAX77843_CHG_MODE_MASK, cnfg00);
}
static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
enum max77843_muic_cable_group group, bool *attached)
{
int adc, chg_type, cable_type, gnd_type;
adc = info->status[MAX77843_MUIC_STATUS1] &
MAX77843_MUIC_STATUS1_ADC_MASK;
adc >>= MAX77843_MUIC_STATUS1_ADC_SHIFT;
switch (group) {
case MAX77843_CABLE_GROUP_ADC:
if (adc == MAX77843_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_cable_type;
info->prev_cable_type = MAX77843_MUIC_ADC_OPEN;
} else {
*attached = true;
cable_type = info->prev_cable_type = adc;
}
break;
case MAX77843_CABLE_GROUP_CHG:
chg_type = info->status[MAX77843_MUIC_STATUS2] &
MAX77843_MUIC_STATUS2_CHGTYP_MASK;
/* Check GROUND accessory with charger cable */
if (adc == MAX77843_MUIC_ADC_GROUND) {
if (chg_type == MAX77843_MUIC_CHG_NONE) {
/*
* The following state when charger cable is
* disconnected but the GROUND accessory still
* connected.
*/
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
} else {
/*
* The following state when charger cable is
* connected on the GROUND accessory.
*/
*attached = true;
cable_type = MAX77843_MUIC_CHG_GND;
info->prev_chg_type = MAX77843_MUIC_CHG_GND;
}
break;
}
if (adc == MAX77843_MUIC_ADC_RESERVED_ACC_3) { /* SmartDock */
if (chg_type == MAX77843_MUIC_CHG_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
} else {
*attached = true;
cable_type = MAX77843_MUIC_CHG_DOCK;
info->prev_chg_type = MAX77843_MUIC_CHG_DOCK;
}
break;
}
if (chg_type == MAX77843_MUIC_CHG_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
} else {
*attached = true;
cable_type = info->prev_chg_type = chg_type;
}
break;
case MAX77843_CABLE_GROUP_ADC_GND:
if (adc == MAX77843_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_gnd_type;
info->prev_gnd_type = MAX77843_MUIC_ADC_OPEN;
} else {
*attached = true;
/*
* Offset|ADC1K|VBVolt|
* 0x1| 0| 0| USB-HOST
* 0x1| 0| 1| USB-HOST with VB
* 0x1| 1| 0| MHL
* 0x1| 1| 1| MHL with VB
*/
/* Get ADC1K register bit */
gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
MAX77843_MUIC_STATUS1_ADC1K_MASK);
/* Get VBVolt register bit */
gnd_type |= (info->status[MAX77843_MUIC_STATUS2] &
MAX77843_MUIC_STATUS2_VBVOLT_MASK);
gnd_type >>= MAX77843_MUIC_STATUS2_VBVOLT_SHIFT;
/* Offset of GND cable */
gnd_type |= MAX77843_MUIC_GND_USB_HOST;
cable_type = info->prev_gnd_type = gnd_type;
}
break;
default:
dev_err(info->dev, "Unknown cable group (%d)\n", group);
cable_type = -EINVAL;
break;
}
return cable_type;
}
static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
{
int ret, gnd_cable_type;
bool attached;
gnd_cable_type = max77843_muic_get_cable_type(info,
MAX77843_CABLE_GROUP_ADC_GND, &attached);
dev_dbg(info->dev, "external connector is %s (gnd:0x%02x)\n",
attached ? "attached" : "detached", gnd_cable_type);
switch (gnd_cable_type) {
case MAX77843_MUIC_GND_USB_HOST:
case MAX77843_MUIC_GND_USB_HOST_VB:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_USB,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
max77843_charger_set_otg_vbus(info, attached);
break;
case MAX77843_MUIC_GND_MHL_VB:
case MAX77843_MUIC_GND_MHL:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
break;
default:
dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
attached ? "attached" : "detached", gnd_cable_type);
return -EINVAL;
}
return 0;
}
static int max77843_muic_jig_handler(struct max77843_muic_info *info,
int cable_type, bool attached)
{
int ret;
u8 path = MAX77843_MUIC_CONTROL1_SW_OPEN;
dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
attached ? "attached" : "detached", cable_type);
switch (cable_type) {
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
path = MAX77843_MUIC_CONTROL1_SW_USB;
break;
case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
path = MAX77843_MUIC_CONTROL1_SW_UART;
break;
default:
return -EINVAL;
}
ret = max77843_muic_set_path(info, path, attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
static int max77843_muic_dock_handler(struct max77843_muic_info *info,
bool attached)
{
int ret;
dev_dbg(info->dev, "external connector is %s (adc: 0x10)\n",
attached ? "attached" : "detached");
ret = max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_USB,
attached, attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
return 0;
}
static int max77843_muic_adc_handler(struct max77843_muic_info *info)
{
int ret, cable_type;
bool attached;
cable_type = max77843_muic_get_cable_type(info,
MAX77843_CABLE_GROUP_ADC, &attached);
dev_dbg(info->dev,
"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
attached ? "attached" : "detached", cable_type,
info->prev_cable_type);
switch (cable_type) {
case MAX77843_MUIC_ADC_RESERVED_ACC_3: /* SmartDock */
ret = max77843_muic_dock_handler(info, attached);
if (ret < 0)
return ret;
break;
case MAX77843_MUIC_ADC_GROUND:
ret = max77843_muic_adc_gnd_handler(info);
if (ret < 0)
return ret;
break;
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
ret = max77843_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX77843_MUIC_ADC_SEND_END_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S2_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S6_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S8_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S9_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S11_BUTTON:
case MAX77843_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX77843_MUIC_ADC_RESERVED_ACC_1:
case MAX77843_MUIC_ADC_RESERVED_ACC_2:
case MAX77843_MUIC_ADC_RESERVED_ACC_4:
case MAX77843_MUIC_ADC_RESERVED_ACC_5:
case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2:
case MAX77843_MUIC_ADC_PHONE_POWERED_DEV:
case MAX77843_MUIC_ADC_TTY_CONVERTER:
case MAX77843_MUIC_ADC_UART_CABLE:
case MAX77843_MUIC_ADC_CEA936A_TYPE1_CHG:
case MAX77843_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX77843_MUIC_ADC_CEA936A_TYPE2_CHG:
case MAX77843_MUIC_ADC_FACTORY_MODE_UART_ON:
case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1:
case MAX77843_MUIC_ADC_OPEN:
dev_err(info->dev,
"accessory is %s but it isn't used (adc:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s accessory (adc:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EINVAL;
}
return 0;
}
static int max77843_muic_chg_handler(struct max77843_muic_info *info)
{
int ret, chg_type, gnd_type;
bool attached;
chg_type = max77843_muic_get_cable_type(info,
MAX77843_CABLE_GROUP_CHG, &attached);
dev_dbg(info->dev,
"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
attached ? "attached" : "detached",
chg_type, info->prev_chg_type);
switch (chg_type) {
case MAX77843_MUIC_CHG_USB:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_USB,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB, attached);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX77843_MUIC_CHG_DOWNSTREAM:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX77843_MUIC_CHG_DEDICATED:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_500MA:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_1A:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
case MAX77843_MUIC_CHG_GND:
gnd_type = max77843_muic_get_cable_type(info,
MAX77843_CABLE_GROUP_ADC_GND, &attached);
/* Charger cable on MHL accessory is attach or detach */
if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
true);
else if (gnd_type == MAX77843_MUIC_GND_MHL)
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
false);
break;
case MAX77843_MUIC_CHG_DOCK:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP, attached);
break;
case MAX77843_MUIC_CHG_NONE:
break;
default:
dev_err(info->dev,
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN,
attached, false);
return -EINVAL;
}
return 0;
}
static void max77843_muic_irq_work(struct work_struct *work)
{
struct max77843_muic_info *info = container_of(work,
struct max77843_muic_info, irq_work);
struct max77693_dev *max77843 = info->max77843;
int ret = 0;
mutex_lock(&info->mutex);
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_STATUS1, info->status,
MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(info->dev, "Cannot read STATUS registers\n");
mutex_unlock(&info->mutex);
return;
}
if (info->irq_adc) {
ret = max77843_muic_adc_handler(info);
if (ret)
dev_err(info->dev, "Unknown cable type\n");
info->irq_adc = false;
}
if (info->irq_chg) {
ret = max77843_muic_chg_handler(info);
if (ret)
dev_err(info->dev, "Unknown charger type\n");
info->irq_chg = false;
}
mutex_unlock(&info->mutex);
}
static irqreturn_t max77843_muic_irq_handler(int irq, void *data)
{
struct max77843_muic_info *info = data;
int i, irq_type = -1;
for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++)
if (irq == max77843_muic_irqs[i].virq)
irq_type = max77843_muic_irqs[i].irq;
switch (irq_type) {
case MAX77843_MUIC_IRQ_INT1_ADC:
case MAX77843_MUIC_IRQ_INT1_ADCERROR:
case MAX77843_MUIC_IRQ_INT1_ADC1K:
info->irq_adc = true;
break;
case MAX77843_MUIC_IRQ_INT2_CHGTYP:
case MAX77843_MUIC_IRQ_INT2_CHGDETRUN:
case MAX77843_MUIC_IRQ_INT2_DCDTMR:
case MAX77843_MUIC_IRQ_INT2_DXOVP:
case MAX77843_MUIC_IRQ_INT2_VBVOLT:
info->irq_chg = true;
break;
case MAX77843_MUIC_IRQ_INT3_VBADC:
case MAX77843_MUIC_IRQ_INT3_VDNMON:
case MAX77843_MUIC_IRQ_INT3_DNRES:
case MAX77843_MUIC_IRQ_INT3_MPNACK:
case MAX77843_MUIC_IRQ_INT3_MRXBUFOW:
case MAX77843_MUIC_IRQ_INT3_MRXTRF:
case MAX77843_MUIC_IRQ_INT3_MRXPERR:
case MAX77843_MUIC_IRQ_INT3_MRXRDY:
break;
default:
dev_err(info->dev, "Cannot recognize IRQ(%d)\n", irq_type);
break;
}
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static void max77843_muic_detect_cable_wq(struct work_struct *work)
{
struct max77843_muic_info *info = container_of(to_delayed_work(work),
struct max77843_muic_info, wq_detcable);
struct max77693_dev *max77843 = info->max77843;
int chg_type, adc, ret;
bool attached;
mutex_lock(&info->mutex);
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_STATUS1, info->status,
MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(info->dev, "Cannot read STATUS registers\n");
goto err_cable_wq;
}
adc = max77843_muic_get_cable_type(info,
MAX77843_CABLE_GROUP_ADC, &attached);
if (attached && adc != MAX77843_MUIC_ADC_OPEN) {
ret = max77843_muic_adc_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect accessory\n");
goto err_cable_wq;
}
}
chg_type = max77843_muic_get_cable_type(info,
MAX77843_CABLE_GROUP_CHG, &attached);
if (attached && chg_type != MAX77843_MUIC_CHG_NONE) {
ret = max77843_muic_chg_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect charger accessory\n");
goto err_cable_wq;
}
}
err_cable_wq:
mutex_unlock(&info->mutex);
}
static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
enum max77843_muic_adc_debounce_time time)
{
struct max77693_dev *max77843 = info->max77843;
int ret;
switch (time) {
case MAX77843_DEBOUNCE_TIME_5MS:
case MAX77843_DEBOUNCE_TIME_10MS:
case MAX77843_DEBOUNCE_TIME_25MS:
case MAX77843_DEBOUNCE_TIME_38_62MS:
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL4,
MAX77843_MUIC_CONTROL4_ADCDBSET_MASK,
time << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT);
if (ret < 0) {
dev_err(info->dev, "Cannot write MUIC regmap\n");
return ret;
}
break;
default:
dev_err(info->dev, "Invalid ADC debounce time\n");
return -EINVAL;
}
return 0;
}
static int max77843_init_muic_regmap(struct max77693_dev *max77843)
{
int ret;
max77843->i2c_muic = i2c_new_dummy_device(max77843->i2c->adapter,
I2C_ADDR_MUIC);
if (IS_ERR(max77843->i2c_muic)) {
dev_err(&max77843->i2c->dev,
"Cannot allocate I2C device for MUIC\n");
return PTR_ERR(max77843->i2c_muic);
}
i2c_set_clientdata(max77843->i2c_muic, max77843);
max77843->regmap_muic = devm_regmap_init_i2c(max77843->i2c_muic,
&max77843_muic_regmap_config);
if (IS_ERR(max77843->regmap_muic)) {
ret = PTR_ERR(max77843->regmap_muic);
goto err_muic_i2c;
}
ret = regmap_add_irq_chip(max77843->regmap_muic, max77843->irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
0, &max77843_muic_irq_chip, &max77843->irq_data_muic);
if (ret < 0) {
dev_err(&max77843->i2c->dev, "Cannot add MUIC IRQ chip\n");
goto err_muic_i2c;
}
return 0;
err_muic_i2c:
i2c_unregister_device(max77843->i2c_muic);
return ret;
}
static int max77843_muic_probe(struct platform_device *pdev)
{
struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent);
struct max77843_muic_info *info;
unsigned int id;
int cable_type;
bool attached;
int i, ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->max77843 = max77843;
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
/* Initialize i2c and regmap */
ret = max77843_init_muic_regmap(max77843);
if (ret) {
dev_err(&pdev->dev, "Failed to init MUIC regmap\n");
return ret;
}
/* Turn off auto detection configuration */
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL4,
MAX77843_MUIC_CONTROL4_USBAUTO_MASK |
MAX77843_MUIC_CONTROL4_FCTAUTO_MASK,
CONTROL4_AUTO_DISABLE);
/* Initialize extcon device */
info->edev = devm_extcon_dev_allocate(&pdev->dev,
max77843_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "Failed to allocate memory for extcon\n");
ret = PTR_ERR(info->edev);
goto err_muic_irq;
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "Failed to register extcon device\n");
goto err_muic_irq;
}
/* Set ADC debounce time */
max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
/* Set initial path for UART when JIG is connected to get serial logs */
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_STATUS1, info->status,
MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(info->dev, "Cannot read STATUS registers\n");
goto err_muic_irq;
}
cable_type = max77843_muic_get_cable_type(info, MAX77843_CABLE_GROUP_ADC,
&attached);
if (attached && cable_type == MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF)
max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART,
true, false);
/* Check revision number of MUIC device */
ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read revision number\n");
goto err_muic_irq;
}
dev_info(info->dev, "MUIC device ID : 0x%x\n", id);
/* Support virtual irq domain for max77843 MUIC device */
INIT_WORK(&info->irq_work, max77843_muic_irq_work);
/* Clear IRQ bits before request IRQs */
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_INT1, info->status,
MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
goto err_muic_irq;
}
for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
int virq = 0;
virq = regmap_irq_get_virq(max77843->irq_data_muic,
muic_irq->irq);
if (virq <= 0) {
ret = -EINVAL;
goto err_muic_irq;
}
muic_irq->virq = virq;
ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
max77843_muic_irq_handler, IRQF_NO_SUSPEND,
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"Failed to request irq (IRQ: %d, error: %d)\n",
muic_irq->irq, ret);
goto err_muic_irq;
}
}
/* Detect accessory after completing the initialization of platform */
INIT_DELAYED_WORK(&info->wq_detcable, max77843_muic_detect_cable_wq);
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, msecs_to_jiffies(DELAY_MS_DEFAULT));
return 0;
err_muic_irq:
regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
i2c_unregister_device(max77843->i2c_muic);
return ret;
}
static int max77843_muic_remove(struct platform_device *pdev)
{
struct max77843_muic_info *info = platform_get_drvdata(pdev);
struct max77693_dev *max77843 = info->max77843;
cancel_work_sync(&info->irq_work);
regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
i2c_unregister_device(max77843->i2c_muic);
return 0;
}
static const struct platform_device_id max77843_muic_id[] = {
{ "max77843-muic", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, max77843_muic_id);
static struct platform_driver max77843_muic_driver = {
.driver = {
.name = "max77843-muic",
},
.probe = max77843_muic_probe,
.remove = max77843_muic_remove,
.id_table = max77843_muic_id,
};
static int __init max77843_muic_init(void)
{
return platform_driver_register(&max77843_muic_driver);
}
subsys_initcall(max77843_muic_init);
MODULE_DESCRIPTION("Maxim MAX77843 Extcon driver");
MODULE_AUTHOR("Jaewon Kim <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/extcon/extcon-max77843.c |
// SPDX-License-Identifier: GPL-2.0+
//
// extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
//
// Copyright (C) 2012 Samsung Electronics
// Donggeun Kim <[email protected]>
#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/kobject.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
#include <linux/extcon-provider.h>
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
enum max8997_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_0_5MS = 0, /* 0.5ms */
ADC_DEBOUNCE_TIME_10MS, /* 10ms */
ADC_DEBOUNCE_TIME_25MS, /* 25ms */
ADC_DEBOUNCE_TIME_38_62MS, /* 38.62ms */
};
struct max8997_muic_irq {
unsigned int irq;
const char *name;
unsigned int virq;
};
static struct max8997_muic_irq muic_irqs[] = {
{ MAX8997_MUICIRQ_ADCError, "muic-ADCERROR" },
{ MAX8997_MUICIRQ_ADCLow, "muic-ADCLOW" },
{ MAX8997_MUICIRQ_ADC, "muic-ADC" },
{ MAX8997_MUICIRQ_VBVolt, "muic-VBVOLT" },
{ MAX8997_MUICIRQ_DBChg, "muic-DBCHG" },
{ MAX8997_MUICIRQ_DCDTmr, "muic-DCDTMR" },
{ MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
{ MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
{ MAX8997_MUICIRQ_OVP, "muic-OVP" },
{ MAX8997_PMICIRQ_CHGINS, "pmic-CHGINS" },
{ MAX8997_PMICIRQ_CHGRM, "pmic-CHGRM" },
};
/* Define supported cable type */
enum max8997_muic_acc_type {
MAX8997_MUIC_ADC_GROUND = 0x0,
MAX8997_MUIC_ADC_MHL, /* MHL*/
MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
MAX8997_MUIC_ADC_RESERVED_ACC_1,
MAX8997_MUIC_ADC_RESERVED_ACC_2,
MAX8997_MUIC_ADC_RESERVED_ACC_3,
MAX8997_MUIC_ADC_RESERVED_ACC_4,
MAX8997_MUIC_ADC_RESERVED_ACC_5,
MAX8997_MUIC_ADC_CEA936_AUDIO,
MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
MAX8997_MUIC_ADC_TTY_CONVERTER,
MAX8997_MUIC_ADC_UART_CABLE,
MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF, /* JIG-USB-OFF */
MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON, /* JIG-USB-ON */
MAX8997_MUIC_ADC_AV_CABLE_NOLOAD, /* DESKDOCK */
MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF, /* JIG-UART */
MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON, /* CARDOCK */
MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
MAX8997_MUIC_ADC_OPEN, /* OPEN */
};
enum max8997_muic_cable_group {
MAX8997_CABLE_GROUP_ADC = 0,
MAX8997_CABLE_GROUP_ADC_GND,
MAX8997_CABLE_GROUP_CHG,
MAX8997_CABLE_GROUP_VBVOLT,
};
enum max8997_muic_usb_type {
MAX8997_USB_HOST,
MAX8997_USB_DEVICE,
};
enum max8997_muic_charger_type {
MAX8997_CHARGER_TYPE_NONE = 0,
MAX8997_CHARGER_TYPE_USB,
MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
MAX8997_CHARGER_TYPE_DEDICATED_CHG,
MAX8997_CHARGER_TYPE_500MA,
MAX8997_CHARGER_TYPE_1A,
MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
};
struct max8997_muic_info {
struct device *dev;
struct i2c_client *muic;
struct extcon_dev *edev;
int prev_cable_type;
int prev_chg_type;
u8 status[2];
int irq;
struct work_struct irq_work;
struct mutex mutex;
struct max8997_muic_platform_data *muic_pdata;
enum max8997_muic_charger_type pre_charger_type;
/*
* Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
struct delayed_work wq_detcable;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
int path_usb;
int path_uart;
};
static const unsigned int max8997_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
EXTCON_CHG_USB_CDP,
EXTCON_DISP_MHL,
EXTCON_DOCK,
EXTCON_JIG,
EXTCON_NONE,
};
/*
* max8997_muic_set_debounce_time - Set the debounce time of ADC
* @info: the instance including private data of max8997 MUIC
* @time: the debounce time of ADC
*/
static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
enum max8997_muic_adc_debounce_time time)
{
int ret;
switch (time) {
case ADC_DEBOUNCE_TIME_0_5MS:
case ADC_DEBOUNCE_TIME_10MS:
case ADC_DEBOUNCE_TIME_25MS:
case ADC_DEBOUNCE_TIME_38_62MS:
ret = max8997_update_reg(info->muic,
MAX8997_MUIC_REG_CONTROL3,
time << CONTROL3_ADCDBSET_SHIFT,
CONTROL3_ADCDBSET_MASK);
if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
return ret;
}
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
return -EINVAL;
}
return 0;
};
/*
* max8997_muic_set_path - Set hardware line according to attached cable
* @info: the instance including private data of max8997 MUIC
* @value: the path according to attached cable
* @attached: the state of cable (true:attached, false:detached)
*
* The max8997 MUIC device share outside H/W line among a varity of cables,
* so this function set internal path of H/W line according to the type of
* attached cable.
*/
static int max8997_muic_set_path(struct max8997_muic_info *info,
u8 val, bool attached)
{
int ret;
u8 ctrl1, ctrl2 = 0;
if (attached)
ctrl1 = val;
else
ctrl1 = CONTROL1_SW_OPEN;
ret = max8997_update_reg(info->muic,
MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
if (attached)
ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
else
ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
ret = max8997_update_reg(info->muic,
MAX8997_MUIC_REG_CONTROL2, ctrl2,
CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
dev_info(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
return 0;
}
/*
* max8997_muic_get_cable_type - Return cable type and check cable state
* @info: the instance including private data of max8997 MUIC
* @group: the path according to attached cable
* @attached: store cable state and return
*
* This function check the cable state either attached or detached,
* and then divide precise type of cable according to cable group.
* - MAX8997_CABLE_GROUP_ADC
* - MAX8997_CABLE_GROUP_CHG
*/
static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
enum max8997_muic_cable_group group, bool *attached)
{
int cable_type = 0;
int adc;
int chg_type;
switch (group) {
case MAX8997_CABLE_GROUP_ADC:
/*
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
adc = info->status[0] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
* (info->prev_cable_type) for handling cable when cable is
* detached.
*/
if (adc == MAX8997_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_cable_type;
info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
} else {
*attached = true;
cable_type = info->prev_cable_type = adc;
}
break;
case MAX8997_CABLE_GROUP_CHG:
/*
* Read charger type to check cable type and decide cable state
* according to type of charger cable.
*/
chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
chg_type >>= STATUS2_CHGTYP_SHIFT;
if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
} else {
*attached = true;
/*
* Check current cable state/cable type and store cable
* type(info->prev_chg_type) for handling cable when
* charger cable is detached.
*/
cable_type = info->prev_chg_type = chg_type;
}
break;
default:
dev_err(info->dev, "Unknown cable group (%d)\n", group);
cable_type = -EINVAL;
break;
}
return cable_type;
}
static int max8997_muic_handle_usb(struct max8997_muic_info *info,
enum max8997_muic_usb_type usb_type, bool attached)
{
int ret = 0;
ret = max8997_muic_set_path(info, info->path_usb, attached);
if (ret < 0) {
dev_err(info->dev, "failed to update muic register\n");
return ret;
}
switch (usb_type) {
case MAX8997_USB_HOST:
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX8997_USB_DEVICE:
extcon_set_state_sync(info->edev, EXTCON_USB, attached);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
default:
dev_err(info->dev, "failed to detect %s usb cable\n",
attached ? "attached" : "detached");
return -EINVAL;
}
return 0;
}
static int max8997_muic_handle_dock(struct max8997_muic_info *info,
int cable_type, bool attached)
{
int ret = 0;
ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
return ret;
}
switch (cable_type) {
case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
attached ? "attached" : "detached");
return -EINVAL;
}
return 0;
}
static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
bool attached)
{
int ret = 0;
/* switch to UART */
ret = max8997_muic_set_path(info, info->path_uart, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
return ret;
}
extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
static int max8997_muic_adc_handler(struct max8997_muic_info *info)
{
int cable_type;
bool attached;
int ret = 0;
/* Check cable state which is either detached or attached */
cable_type = max8997_muic_get_cable_type(info,
MAX8997_CABLE_GROUP_ADC, &attached);
switch (cable_type) {
case MAX8997_MUIC_ADC_GROUND:
ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
if (ret < 0)
return ret;
break;
case MAX8997_MUIC_ADC_MHL:
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
break;
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
ret = max8997_muic_handle_usb(info,
MAX8997_USB_DEVICE, attached);
if (ret < 0)
return ret;
break;
case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
ret = max8997_muic_handle_dock(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
ret = max8997_muic_handle_jig_uart(info, attached);
break;
case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX8997_MUIC_ADC_RESERVED_ACC_1:
case MAX8997_MUIC_ADC_RESERVED_ACC_2:
case MAX8997_MUIC_ADC_RESERVED_ACC_3:
case MAX8997_MUIC_ADC_RESERVED_ACC_4:
case MAX8997_MUIC_ADC_RESERVED_ACC_5:
case MAX8997_MUIC_ADC_CEA936_AUDIO:
case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
case MAX8997_MUIC_ADC_TTY_CONVERTER:
case MAX8997_MUIC_ADC_UART_CABLE:
case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
/*
* This cable isn't used in general case if it is specially
* needed to detect additional cable, should implement
* proper operation when this cable is attached/detached.
*/
dev_info(info->dev,
"cable is %s but it isn't used (type:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s unknown cable (type:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EINVAL;
}
return 0;
}
static int max8997_muic_chg_handler(struct max8997_muic_info *info)
{
int chg_type;
bool attached;
int adc;
chg_type = max8997_muic_get_cable_type(info,
MAX8997_CABLE_GROUP_CHG, &attached);
switch (chg_type) {
case MAX8997_CHARGER_TYPE_NONE:
break;
case MAX8997_CHARGER_TYPE_USB:
adc = info->status[0] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
max8997_muic_handle_usb(info,
MAX8997_USB_DEVICE, attached);
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX8997_CHARGER_TYPE_1A:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
default:
dev_err(info->dev,
"failed to detect %s unknown chg cable (type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
return -EINVAL;
}
return 0;
}
static void max8997_muic_irq_work(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(work,
struct max8997_muic_info, irq_work);
int irq_type = 0;
int i, ret;
if (!info->edev)
return;
mutex_lock(&info->mutex);
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
if (info->irq == muic_irqs[i].virq)
irq_type = muic_irqs[i].irq;
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
2, info->status);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
mutex_unlock(&info->mutex);
return;
}
switch (irq_type) {
case MAX8997_MUICIRQ_ADCError:
case MAX8997_MUICIRQ_ADCLow:
case MAX8997_MUICIRQ_ADC:
/* Handle all of cable except for charger cable */
ret = max8997_muic_adc_handler(info);
break;
case MAX8997_MUICIRQ_VBVolt:
case MAX8997_MUICIRQ_DBChg:
case MAX8997_MUICIRQ_DCDTmr:
case MAX8997_MUICIRQ_ChgDetRun:
case MAX8997_MUICIRQ_ChgTyp:
case MAX8997_PMICIRQ_CHGINS:
case MAX8997_PMICIRQ_CHGRM:
/* Handle charger cable */
ret = max8997_muic_chg_handler(info);
break;
case MAX8997_MUICIRQ_OVP:
break;
default:
dev_info(info->dev, "misc interrupt: irq %d occurred\n",
irq_type);
mutex_unlock(&info->mutex);
return;
}
if (ret < 0)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
}
static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
{
struct max8997_muic_info *info = data;
dev_dbg(info->dev, "irq:%d\n", irq);
info->irq = irq;
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static int max8997_muic_detect_dev(struct max8997_muic_info *info)
{
int ret = 0;
int adc;
int chg_type;
bool attached;
mutex_lock(&info->mutex);
/* Read STATUSx register to detect accessory */
ret = max8997_bulk_read(info->muic,
MAX8997_MUIC_REG_STATUS1, 2, info->status);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
mutex_unlock(&info->mutex);
return ret;
}
adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
&attached);
if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
ret = max8997_muic_adc_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect ADC cable\n");
mutex_unlock(&info->mutex);
return ret;
}
}
chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
&attached);
if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
ret = max8997_muic_chg_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect charger cable\n");
mutex_unlock(&info->mutex);
return ret;
}
}
mutex_unlock(&info->mutex);
return 0;
}
static void max8997_muic_detect_cable_wq(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(to_delayed_work(work),
struct max8997_muic_info, wq_detcable);
int ret;
ret = max8997_muic_detect_dev(info);
if (ret < 0)
dev_err(info->dev, "failed to detect cable type\n");
}
static int max8997_muic_probe(struct platform_device *pdev)
{
struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
struct max8997_muic_info *info;
int delay_jiffies;
int cable_type;
bool attached;
int ret, i;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->muic = max8997->muic;
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
INIT_WORK(&info->irq_work, max8997_muic_irq_work);
ret = devm_work_autocancel(&pdev->dev, &info->irq_work,
max8997_muic_irq_work);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
unsigned int virq = 0;
virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
if (!virq)
return -EINVAL;
muic_irq->virq = virq;
ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
max8997_muic_irq_handler,
IRQF_NO_SUSPEND,
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
}
/* External connector */
info->edev = devm_extcon_dev_allocate(&pdev->dev, max8997_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return PTR_ERR(info->edev);
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return ret;
}
if (pdata && pdata->muic_pdata) {
struct max8997_muic_platform_data *muic_pdata
= pdata->muic_pdata;
/* Initialize registers according to platform data */
for (i = 0; i < muic_pdata->num_init_data; i++) {
max8997_write_reg(info->muic,
muic_pdata->init_data[i].addr,
muic_pdata->init_data[i].data);
}
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (muic_pdata->path_uart)
info->path_uart = muic_pdata->path_uart;
else
info->path_uart = CONTROL1_SW_UART;
if (muic_pdata->path_usb)
info->path_usb = muic_pdata->path_usb;
else
info->path_usb = CONTROL1_SW_USB;
/*
* Default delay time for detecting cable state
* after certain time.
*/
if (muic_pdata->detcable_delay_ms)
delay_jiffies =
msecs_to_jiffies(muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
} else {
info->path_uart = CONTROL1_SW_UART;
info->path_usb = CONTROL1_SW_USB;
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
}
/* Set initial path for UART when JIG is connected to get serial logs */
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
2, info->status);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
return ret;
}
cable_type = max8997_muic_get_cable_type(info,
MAX8997_CABLE_GROUP_ADC, &attached);
if (attached && cable_type == MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF)
max8997_muic_set_path(info, info->path_uart, true);
/* Set ADC debounce time */
max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
/*
* Detect accessory after completing the initialization of platform
*
* - Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
delay_jiffies);
return 0;
}
static struct platform_driver max8997_muic_driver = {
.driver = {
.name = DEV_NAME,
},
.probe = max8997_muic_probe,
};
module_platform_driver(max8997_muic_driver);
MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver");
MODULE_AUTHOR("Donggeun Kim <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max8997-muic");
| linux-master | drivers/extcon/extcon-max8997.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/extcon/devres.c - EXTCON device's resource management
*
* Copyright (C) 2016 Samsung Electronics
* Author: Chanwoo Choi <[email protected]>
*/
#include "extcon.h"
static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
{
struct extcon_dev **r = res;
if (WARN_ON(!r || !*r))
return 0;
return *r == data;
}
static void devm_extcon_dev_release(struct device *dev, void *res)
{
extcon_dev_free(*(struct extcon_dev **)res);
}
static void devm_extcon_dev_unreg(struct device *dev, void *res)
{
extcon_dev_unregister(*(struct extcon_dev **)res);
}
struct extcon_dev_notifier_devres {
struct extcon_dev *edev;
unsigned int id;
struct notifier_block *nb;
};
static void devm_extcon_dev_notifier_unreg(struct device *dev, void *res)
{
struct extcon_dev_notifier_devres *this = res;
extcon_unregister_notifier(this->edev, this->id, this->nb);
}
static void devm_extcon_dev_notifier_all_unreg(struct device *dev, void *res)
{
struct extcon_dev_notifier_devres *this = res;
extcon_unregister_notifier_all(this->edev, this->nb);
}
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
* @dev: the device owning the extcon device being created
* @supported_cable: the array of the supported external connectors
* ending with EXTCON_NONE.
*
* This function manages automatically the memory of extcon device using device
* resource management and simplify the control of freeing the memory of extcon
* device.
*
* Returns the pointer memory of allocated extcon_dev if success
* or ERR_PTR(err) if fail
*/
struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
const unsigned int *supported_cable)
{
struct extcon_dev **ptr, *edev;
ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
edev = extcon_dev_allocate(supported_cable);
if (IS_ERR(edev)) {
devres_free(ptr);
return edev;
}
edev->dev.parent = dev;
*ptr = edev;
devres_add(dev, ptr);
return edev;
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
/**
* devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
* @dev: the device owning the extcon device being created
* @edev: the extcon device to be freed
*
* Free the memory that is allocated with devm_extcon_dev_allocate()
* function.
*/
void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
{
WARN_ON(devres_release(dev, devm_extcon_dev_release,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
/**
* devm_extcon_dev_register() - Resource-managed extcon_dev_register()
* @dev: the device owning the extcon device being created
* @edev: the extcon device to be registered
*
* this function, that extcon device is automatically unregistered on driver
* detach. Internally this function calls extcon_dev_register() function.
* To get more information, refer that function.
*
* If extcon device is registered with this function and the device needs to be
* unregistered separately, devm_extcon_dev_unregister() should be used.
*
* Returns 0 if success or negaive error number if failure.
*/
int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
{
struct extcon_dev **ptr;
int ret;
ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = extcon_dev_register(edev);
if (ret) {
devres_free(ptr);
return ret;
}
*ptr = edev;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
/**
* devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
* @dev: the device owning the extcon device being created
* @edev: the extcon device to unregistered
*
* Unregister extcon device that is registered with devm_extcon_dev_register()
* function.
*/
void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
{
WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
/**
* devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
* @dev: the device owning the extcon device being created
* @edev: the extcon device
* @id: the unique id among the extcon enumeration
* @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
* the notifier of extcon device.
*
* Note that the second parameter given to the callback of nb (val) is
* "old_state", not the current state. The current state can be retrieved
* by looking at the third pameter (edev pointer)'s state value.
*
* Returns 0 if success or negaive error number if failure.
*/
int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev,
unsigned int id, struct notifier_block *nb)
{
struct extcon_dev_notifier_devres *ptr;
int ret;
ptr = devres_alloc(devm_extcon_dev_notifier_unreg, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = extcon_register_notifier(edev, id, nb);
if (ret) {
devres_free(ptr);
return ret;
}
ptr->edev = edev;
ptr->id = id;
ptr->nb = nb;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
* - Resource-managed extcon_unregister_notifier()
* @dev: the device owning the extcon device being created
* @edev: the extcon device
* @id: the unique id among the extcon enumeration
* @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
WARN_ON(devres_release(dev, devm_extcon_dev_notifier_unreg,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL(devm_extcon_unregister_notifier);
/**
* devm_extcon_register_notifier_all()
* - Resource-managed extcon_register_notifier_all()
* @dev: the device owning the extcon device being created
* @edev: the extcon device
* @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
* the notifier of extcon device. To get more information, refer that function.
*
* Returns 0 if success or negaive error number if failure.
*/
int devm_extcon_register_notifier_all(struct device *dev, struct extcon_dev *edev,
struct notifier_block *nb)
{
struct extcon_dev_notifier_devres *ptr;
int ret;
ptr = devres_alloc(devm_extcon_dev_notifier_all_unreg, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = extcon_register_notifier_all(edev, nb);
if (ret) {
devres_free(ptr);
return ret;
}
ptr->edev = edev;
ptr->nb = nb;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL(devm_extcon_register_notifier_all);
/**
* devm_extcon_unregister_notifier_all()
* - Resource-managed extcon_unregister_notifier_all()
* @dev: the device owning the extcon device being created
* @edev: the extcon device
* @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb)
{
WARN_ON(devres_release(dev, devm_extcon_dev_notifier_all_unreg,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL(devm_extcon_unregister_notifier_all);
| linux-master | drivers/extcon/devres.c |
// SPDX-License-Identifier: GPL-2.0+
//
// extcon-ptn5150.c - PTN5150 CC logic extcon driver to support USB detection
//
// Based on extcon-sm5502.c driver
// Copyright (c) 2018-2019 by Vijai Kumar K
// Author: Vijai Kumar K <[email protected]>
// Copyright (c) 2020 Krzysztof Kozlowski <[email protected]>
#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/usb/role.h>
/* PTN5150 registers */
#define PTN5150_REG_DEVICE_ID 0x01
#define PTN5150_REG_CONTROL 0x02
#define PTN5150_REG_INT_STATUS 0x03
#define PTN5150_REG_CC_STATUS 0x04
#define PTN5150_REG_CON_DET 0x09
#define PTN5150_REG_VCONN_STATUS 0x0a
#define PTN5150_REG_RESET 0x0b
#define PTN5150_REG_INT_MASK 0x18
#define PTN5150_REG_INT_REG_STATUS 0x19
#define PTN5150_REG_END PTN5150_REG_INT_REG_STATUS
#define PTN5150_DFP_ATTACHED 0x1
#define PTN5150_UFP_ATTACHED 0x2
/* Define PTN5150 MASK/SHIFT constant */
#define PTN5150_REG_DEVICE_ID_VERSION GENMASK(7, 3)
#define PTN5150_REG_DEVICE_ID_VENDOR GENMASK(2, 0)
#define PTN5150_REG_CC_PORT_ATTACHMENT GENMASK(4, 2)
#define PTN5150_REG_CC_VBUS_DETECTION BIT(7)
#define PTN5150_REG_INT_CABLE_ATTACH_MASK BIT(0)
#define PTN5150_REG_INT_CABLE_DETACH_MASK BIT(1)
struct ptn5150_info {
struct device *dev;
struct extcon_dev *edev;
struct i2c_client *i2c;
struct regmap *regmap;
struct gpio_desc *int_gpiod;
struct gpio_desc *vbus_gpiod;
int irq;
struct work_struct irq_work;
struct mutex mutex;
struct usb_role_switch *role_sw;
};
/* List of detectable cables */
static const unsigned int ptn5150_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static const struct regmap_config ptn5150_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = PTN5150_REG_END,
};
static void ptn5150_check_state(struct ptn5150_info *info)
{
unsigned int port_status, reg_data, vbus;
enum usb_role usb_role = USB_ROLE_NONE;
int ret;
ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, ®_data);
if (ret) {
dev_err(info->dev, "failed to read CC STATUS %d\n", ret);
return;
}
port_status = FIELD_GET(PTN5150_REG_CC_PORT_ATTACHMENT, reg_data);
switch (port_status) {
case PTN5150_DFP_ATTACHED:
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
extcon_set_state_sync(info->edev, EXTCON_USB, true);
usb_role = USB_ROLE_DEVICE;
break;
case PTN5150_UFP_ATTACHED:
extcon_set_state_sync(info->edev, EXTCON_USB, false);
vbus = FIELD_GET(PTN5150_REG_CC_VBUS_DETECTION, reg_data);
if (vbus)
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
else
gpiod_set_value_cansleep(info->vbus_gpiod, 1);
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
usb_role = USB_ROLE_HOST;
break;
default:
break;
}
if (usb_role) {
ret = usb_role_switch_set_role(info->role_sw, usb_role);
if (ret)
dev_err(info->dev, "failed to set %s role: %d\n",
usb_role_string(usb_role), ret);
}
}
static void ptn5150_irq_work(struct work_struct *work)
{
struct ptn5150_info *info = container_of(work,
struct ptn5150_info, irq_work);
int ret = 0;
unsigned int int_status;
if (!info->edev)
return;
mutex_lock(&info->mutex);
/* Clear interrupt. Read would clear the register */
ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &int_status);
if (ret) {
dev_err(info->dev, "failed to read INT STATUS %d\n", ret);
mutex_unlock(&info->mutex);
return;
}
if (int_status) {
unsigned int cable_attach;
cable_attach = int_status & PTN5150_REG_INT_CABLE_ATTACH_MASK;
if (cable_attach) {
ptn5150_check_state(info);
} else {
extcon_set_state_sync(info->edev,
EXTCON_USB_HOST, false);
extcon_set_state_sync(info->edev,
EXTCON_USB, false);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
ret = usb_role_switch_set_role(info->role_sw,
USB_ROLE_NONE);
if (ret)
dev_err(info->dev,
"failed to set none role: %d\n",
ret);
}
}
/* Clear interrupt. Read would clear the register */
ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS,
&int_status);
if (ret) {
dev_err(info->dev,
"failed to read INT REG STATUS %d\n", ret);
mutex_unlock(&info->mutex);
return;
}
mutex_unlock(&info->mutex);
}
static irqreturn_t ptn5150_irq_handler(int irq, void *data)
{
struct ptn5150_info *info = data;
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static int ptn5150_init_dev_type(struct ptn5150_info *info)
{
unsigned int reg_data, vendor_id, version_id;
int ret;
ret = regmap_read(info->regmap, PTN5150_REG_DEVICE_ID, ®_data);
if (ret) {
dev_err(info->dev, "failed to read DEVICE_ID %d\n", ret);
return -EINVAL;
}
vendor_id = FIELD_GET(PTN5150_REG_DEVICE_ID_VENDOR, reg_data);
version_id = FIELD_GET(PTN5150_REG_DEVICE_ID_VERSION, reg_data);
dev_dbg(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
version_id, vendor_id);
/* Clear any existing interrupts */
ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, ®_data);
if (ret) {
dev_err(info->dev,
"failed to read PTN5150_REG_INT_STATUS %d\n",
ret);
return -EINVAL;
}
ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS, ®_data);
if (ret) {
dev_err(info->dev,
"failed to read PTN5150_REG_INT_REG_STATUS %d\n", ret);
return -EINVAL;
}
return 0;
}
static void ptn5150_work_sync_and_put(void *data)
{
struct ptn5150_info *info = data;
cancel_work_sync(&info->irq_work);
usb_role_switch_put(info->role_sw);
}
static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct device_node *np = i2c->dev.of_node;
struct ptn5150_info *info;
int ret;
if (!np)
return -EINVAL;
info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
i2c_set_clientdata(i2c, info);
info->dev = &i2c->dev;
info->i2c = i2c;
info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_OUT_LOW);
if (IS_ERR(info->vbus_gpiod)) {
ret = PTR_ERR(info->vbus_gpiod);
if (ret == -ENOENT) {
dev_info(dev, "No VBUS GPIO, ignoring VBUS control\n");
info->vbus_gpiod = NULL;
} else {
return dev_err_probe(dev, ret, "failed to get VBUS GPIO\n");
}
}
mutex_init(&info->mutex);
INIT_WORK(&info->irq_work, ptn5150_irq_work);
info->regmap = devm_regmap_init_i2c(i2c, &ptn5150_regmap_config);
if (IS_ERR(info->regmap)) {
return dev_err_probe(info->dev, PTR_ERR(info->regmap),
"failed to allocate register map\n");
}
if (i2c->irq > 0) {
info->irq = i2c->irq;
} else {
info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN);
if (IS_ERR(info->int_gpiod)) {
return dev_err_probe(dev, PTR_ERR(info->int_gpiod),
"failed to get INT GPIO\n");
}
info->irq = gpiod_to_irq(info->int_gpiod);
if (info->irq < 0) {
dev_err(dev, "failed to get INTB IRQ\n");
return info->irq;
}
}
ret = devm_request_threaded_irq(dev, info->irq, NULL,
ptn5150_irq_handler,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
i2c->name, info);
if (ret < 0) {
dev_err(dev, "failed to request handler for INTB IRQ\n");
return ret;
}
/* Allocate extcon device */
info->edev = devm_extcon_dev_allocate(info->dev, ptn5150_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(info->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
/* Register extcon device */
ret = devm_extcon_dev_register(info->dev, info->edev);
if (ret) {
dev_err(info->dev, "failed to register extcon device\n");
return ret;
}
extcon_set_property_capability(info->edev, EXTCON_USB,
EXTCON_PROP_USB_VBUS);
extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_VBUS);
extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_TYPEC_POLARITY);
/* Initialize PTN5150 device and print vendor id and version id */
ret = ptn5150_init_dev_type(info);
if (ret)
return -EINVAL;
info->role_sw = usb_role_switch_get(info->dev);
if (IS_ERR(info->role_sw))
return dev_err_probe(info->dev, PTR_ERR(info->role_sw),
"failed to get role switch\n");
ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
if (ret)
return ret;
/*
* Update current extcon state if for example OTG connection was there
* before the probe
*/
mutex_lock(&info->mutex);
ptn5150_check_state(info);
mutex_unlock(&info->mutex);
return 0;
}
static const struct of_device_id ptn5150_dt_match[] = {
{ .compatible = "nxp,ptn5150" },
{ },
};
MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
static const struct i2c_device_id ptn5150_i2c_id[] = {
{ "ptn5150", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
static struct i2c_driver ptn5150_i2c_driver = {
.driver = {
.name = "ptn5150",
.of_match_table = ptn5150_dt_match,
},
.probe = ptn5150_i2c_probe,
.id_table = ptn5150_i2c_id,
};
module_i2c_driver(ptn5150_i2c_driver);
MODULE_DESCRIPTION("NXP PTN5150 CC logic Extcon driver");
MODULE_AUTHOR("Vijai Kumar K <[email protected]>");
MODULE_AUTHOR("Krzysztof Kozlowski <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-ptn5150.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver
*
* Copyright (C) 2020 National Instruments Corporation
* Author: Michael Auchter <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/extcon-provider.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/usb/typec.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/role.h>
#define TUSB320_REG8 0x8
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6)
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB 0x0
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A 0x1
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A 0x2
#define TUSB320_REG8_CURRENT_MODE_DETECT GENMASK(5, 4)
#define TUSB320_REG8_CURRENT_MODE_DETECT_DEF 0x0
#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1
#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2
#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3
#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 1)
#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0
#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4
#define TUSB320_REG8_ACCESSORY_CONNECTED_ACHRG 0x5
#define TUSB320_REG8_ACCESSORY_CONNECTED_DBGDFP 0x6
#define TUSB320_REG8_ACCESSORY_CONNECTED_DBGUFP 0x7
#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0)
#define TUSB320_REG9 0x9
#define TUSB320_REG9_ATTACHED_STATE GENMASK(7, 6)
#define TUSB320_REG9_CABLE_DIRECTION BIT(5)
#define TUSB320_REG9_INTERRUPT_STATUS BIT(4)
#define TUSB320_REGA 0xa
#define TUSB320L_REGA_DISABLE_TERM BIT(0)
#define TUSB320_REGA_I2C_SOFT_RESET BIT(3)
#define TUSB320_REGA_MODE_SELECT_SHIFT 4
#define TUSB320_REGA_MODE_SELECT_MASK 0x3
#define TUSB320L_REGA0_REVISION 0xa0
enum tusb320_attached_state {
TUSB320_ATTACHED_STATE_NONE,
TUSB320_ATTACHED_STATE_DFP,
TUSB320_ATTACHED_STATE_UFP,
TUSB320_ATTACHED_STATE_ACC,
};
enum tusb320_mode {
TUSB320_MODE_PORT,
TUSB320_MODE_UFP,
TUSB320_MODE_DFP,
TUSB320_MODE_DRP,
};
struct tusb320_priv;
struct tusb320_ops {
int (*set_mode)(struct tusb320_priv *priv, enum tusb320_mode mode);
int (*get_revision)(struct tusb320_priv *priv, unsigned int *revision);
};
struct tusb320_priv {
struct device *dev;
struct regmap *regmap;
struct extcon_dev *edev;
struct tusb320_ops *ops;
enum tusb320_attached_state state;
struct typec_port *port;
struct typec_capability cap;
enum typec_port_type port_type;
enum typec_pwr_opmode pwr_opmode;
struct fwnode_handle *connector_fwnode;
struct usb_role_switch *role_sw;
};
static const char * const tusb_attached_states[] = {
[TUSB320_ATTACHED_STATE_NONE] = "not attached",
[TUSB320_ATTACHED_STATE_DFP] = "downstream facing port",
[TUSB320_ATTACHED_STATE_UFP] = "upstream facing port",
[TUSB320_ATTACHED_STATE_ACC] = "accessory",
};
static const unsigned int tusb320_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static int tusb320_check_signature(struct tusb320_priv *priv)
{
static const char sig[] = { '\0', 'T', 'U', 'S', 'B', '3', '2', '0' };
unsigned val;
int i, ret;
for (i = 0; i < sizeof(sig); i++) {
ret = regmap_read(priv->regmap, sizeof(sig) - 1 - i, &val);
if (ret < 0)
return ret;
if (val != sig[i]) {
dev_err(priv->dev, "signature mismatch!\n");
return -ENODEV;
}
}
return 0;
}
static int tusb320_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode)
{
int ret;
/* Mode cannot be changed while cable is attached */
if (priv->state != TUSB320_ATTACHED_STATE_NONE)
return -EBUSY;
/* Write mode */
ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT,
mode << TUSB320_REGA_MODE_SELECT_SHIFT);
if (ret) {
dev_err(priv->dev, "failed to write mode: %d\n", ret);
return ret;
}
return 0;
}
static int tusb320l_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode)
{
int ret;
/* Disable CC state machine */
ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
TUSB320L_REGA_DISABLE_TERM, 1);
if (ret) {
dev_err(priv->dev,
"failed to disable CC state machine: %d\n", ret);
return ret;
}
/* Write mode */
ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT,
mode << TUSB320_REGA_MODE_SELECT_SHIFT);
if (ret) {
dev_err(priv->dev, "failed to write mode: %d\n", ret);
goto err;
}
msleep(5);
err:
/* Re-enable CC state machine */
ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
TUSB320L_REGA_DISABLE_TERM, 0);
if (ret)
dev_err(priv->dev,
"failed to re-enable CC state machine: %d\n", ret);
return ret;
}
static int tusb320_reset(struct tusb320_priv *priv)
{
int ret;
/* Set mode to default (follow PORT pin) */
ret = priv->ops->set_mode(priv, TUSB320_MODE_PORT);
if (ret && ret != -EBUSY) {
dev_err(priv->dev,
"failed to set mode to PORT: %d\n", ret);
return ret;
}
/* Perform soft reset */
ret = regmap_write_bits(priv->regmap, TUSB320_REGA,
TUSB320_REGA_I2C_SOFT_RESET, 1);
if (ret) {
dev_err(priv->dev,
"failed to write soft reset bit: %d\n", ret);
return ret;
}
/* Wait for chip to go through reset */
msleep(95);
return 0;
}
static int tusb320l_get_revision(struct tusb320_priv *priv, unsigned int *revision)
{
return regmap_read(priv->regmap, TUSB320L_REGA0_REVISION, revision);
}
static struct tusb320_ops tusb320_ops = {
.set_mode = tusb320_set_mode,
};
static struct tusb320_ops tusb320l_ops = {
.set_mode = tusb320l_set_mode,
.get_revision = tusb320l_get_revision,
};
static int tusb320_set_adv_pwr_mode(struct tusb320_priv *priv)
{
u8 mode;
if (priv->pwr_opmode == TYPEC_PWR_MODE_USB)
mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB;
else if (priv->pwr_opmode == TYPEC_PWR_MODE_1_5A)
mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A;
else if (priv->pwr_opmode == TYPEC_PWR_MODE_3_0A)
mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A;
else /* No other mode is supported. */
return -EINVAL;
return regmap_write_bits(priv->regmap, TUSB320_REG8,
TUSB320_REG8_CURRENT_MODE_ADVERTISE,
FIELD_PREP(TUSB320_REG8_CURRENT_MODE_ADVERTISE,
mode));
}
static int tusb320_port_type_set(struct typec_port *port,
enum typec_port_type type)
{
struct tusb320_priv *priv = typec_get_drvdata(port);
if (type == TYPEC_PORT_SRC)
return priv->ops->set_mode(priv, TUSB320_MODE_DFP);
else if (type == TYPEC_PORT_SNK)
return priv->ops->set_mode(priv, TUSB320_MODE_UFP);
else if (type == TYPEC_PORT_DRP)
return priv->ops->set_mode(priv, TUSB320_MODE_DRP);
else
return priv->ops->set_mode(priv, TUSB320_MODE_PORT);
}
static const struct typec_operations tusb320_typec_ops = {
.port_type_set = tusb320_port_type_set,
};
static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg)
{
int state, polarity;
state = FIELD_GET(TUSB320_REG9_ATTACHED_STATE, reg);
polarity = !!(reg & TUSB320_REG9_CABLE_DIRECTION);
dev_dbg(priv->dev, "attached state: %s, polarity: %d\n",
tusb_attached_states[state], polarity);
extcon_set_state(priv->edev, EXTCON_USB,
state == TUSB320_ATTACHED_STATE_UFP);
extcon_set_state(priv->edev, EXTCON_USB_HOST,
state == TUSB320_ATTACHED_STATE_DFP);
extcon_set_property(priv->edev, EXTCON_USB,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)polarity);
extcon_set_property(priv->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)polarity);
extcon_sync(priv->edev, EXTCON_USB);
extcon_sync(priv->edev, EXTCON_USB_HOST);
priv->state = state;
}
static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
{
struct typec_port *port = priv->port;
struct device *dev = priv->dev;
int typec_mode;
enum usb_role usb_role;
enum typec_role pwr_role;
enum typec_data_role data_role;
u8 state, mode, accessory;
int ret, reg8;
bool ori;
ret = regmap_read(priv->regmap, TUSB320_REG8, ®8);
if (ret) {
dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
return;
}
ori = reg9 & TUSB320_REG9_CABLE_DIRECTION;
typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE :
TYPEC_ORIENTATION_NORMAL);
state = FIELD_GET(TUSB320_REG9_ATTACHED_STATE, reg9);
accessory = FIELD_GET(TUSB320_REG8_ACCESSORY_CONNECTED, reg8);
switch (state) {
case TUSB320_ATTACHED_STATE_DFP:
typec_mode = TYPEC_MODE_USB2;
usb_role = USB_ROLE_HOST;
pwr_role = TYPEC_SOURCE;
data_role = TYPEC_HOST;
break;
case TUSB320_ATTACHED_STATE_UFP:
typec_mode = TYPEC_MODE_USB2;
usb_role = USB_ROLE_DEVICE;
pwr_role = TYPEC_SINK;
data_role = TYPEC_DEVICE;
break;
case TUSB320_ATTACHED_STATE_ACC:
/*
* Accessory detected. For debug accessories, just make some
* qualified guesses as to the role for lack of a better option.
*/
if (accessory == TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO ||
accessory == TUSB320_REG8_ACCESSORY_CONNECTED_ACHRG) {
typec_mode = TYPEC_MODE_AUDIO;
usb_role = USB_ROLE_NONE;
pwr_role = TYPEC_SINK;
data_role = TYPEC_DEVICE;
break;
} else if (accessory ==
TUSB320_REG8_ACCESSORY_CONNECTED_DBGDFP) {
typec_mode = TYPEC_MODE_DEBUG;
pwr_role = TYPEC_SOURCE;
usb_role = USB_ROLE_HOST;
data_role = TYPEC_HOST;
break;
} else if (accessory ==
TUSB320_REG8_ACCESSORY_CONNECTED_DBGUFP) {
typec_mode = TYPEC_MODE_DEBUG;
pwr_role = TYPEC_SINK;
usb_role = USB_ROLE_DEVICE;
data_role = TYPEC_DEVICE;
break;
}
dev_warn(priv->dev, "unexpected ACCESSORY_CONNECTED state %d\n",
accessory);
fallthrough;
default:
typec_mode = TYPEC_MODE_USB2;
usb_role = USB_ROLE_NONE;
pwr_role = TYPEC_SINK;
data_role = TYPEC_DEVICE;
break;
}
typec_set_vconn_role(port, pwr_role);
typec_set_pwr_role(port, pwr_role);
typec_set_data_role(port, data_role);
typec_set_mode(port, typec_mode);
usb_role_switch_set_role(priv->role_sw, usb_role);
mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8);
if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF)
typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_MED)
typec_set_pwr_opmode(port, TYPEC_PWR_MODE_1_5A);
else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_HI)
typec_set_pwr_opmode(port, TYPEC_PWR_MODE_3_0A);
else /* Charge through accessory */
typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
}
static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv,
bool force_update)
{
unsigned int reg;
if (regmap_read(priv->regmap, TUSB320_REG9, ®)) {
dev_err(priv->dev, "error during i2c read!\n");
return IRQ_NONE;
}
if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS))
return IRQ_NONE;
tusb320_extcon_irq_handler(priv, reg);
/*
* Type-C support is optional. Only call the Type-C handler if a
* port had been registered previously.
*/
if (priv->port)
tusb320_typec_irq_handler(priv, reg);
regmap_write(priv->regmap, TUSB320_REG9, reg);
return IRQ_HANDLED;
}
static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
{
struct tusb320_priv *priv = dev_id;
return tusb320_state_update_handler(priv, false);
}
static const struct regmap_config tusb320_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int tusb320_extcon_probe(struct tusb320_priv *priv)
{
int ret;
priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
if (IS_ERR(priv->edev)) {
dev_err(priv->dev, "failed to allocate extcon device\n");
return PTR_ERR(priv->edev);
}
ret = devm_extcon_dev_register(priv->dev, priv->edev);
if (ret < 0) {
dev_err(priv->dev, "failed to register extcon device\n");
return ret;
}
extcon_set_property_capability(priv->edev, EXTCON_USB,
EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
EXTCON_PROP_USB_TYPEC_POLARITY);
return 0;
}
static int tusb320_typec_probe(struct i2c_client *client,
struct tusb320_priv *priv)
{
struct fwnode_handle *connector;
const char *cap_str;
int ret;
/* The Type-C connector is optional, for backward compatibility. */
connector = device_get_named_child_node(&client->dev, "connector");
if (!connector)
return 0;
/* Type-C connector found. */
ret = typec_get_fw_cap(&priv->cap, connector);
if (ret)
goto err_put;
priv->port_type = priv->cap.type;
/* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */
ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
if (ret)
goto err_put;
ret = typec_find_pwr_opmode(cap_str);
if (ret < 0)
goto err_put;
priv->pwr_opmode = ret;
/* Initialize the hardware with the devicetree settings. */
ret = tusb320_set_adv_pwr_mode(priv);
if (ret)
goto err_put;
priv->cap.revision = USB_TYPEC_REV_1_1;
priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
priv->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG;
priv->cap.orientation_aware = true;
priv->cap.driver_data = priv;
priv->cap.ops = &tusb320_typec_ops;
priv->cap.fwnode = connector;
priv->port = typec_register_port(&client->dev, &priv->cap);
if (IS_ERR(priv->port)) {
ret = PTR_ERR(priv->port);
goto err_put;
}
/* Find any optional USB role switch that needs reporting to */
priv->role_sw = fwnode_usb_role_switch_get(connector);
if (IS_ERR(priv->role_sw)) {
ret = PTR_ERR(priv->role_sw);
goto err_unreg;
}
priv->connector_fwnode = connector;
return 0;
err_unreg:
typec_unregister_port(priv->port);
err_put:
fwnode_handle_put(connector);
return ret;
}
static void tusb320_typec_remove(struct tusb320_priv *priv)
{
usb_role_switch_put(priv->role_sw);
typec_unregister_port(priv->port);
fwnode_handle_put(priv->connector_fwnode);
}
static int tusb320_probe(struct i2c_client *client)
{
struct tusb320_priv *priv;
const void *match_data;
unsigned int revision;
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &client->dev;
i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
ret = tusb320_check_signature(priv);
if (ret)
return ret;
match_data = device_get_match_data(&client->dev);
if (!match_data)
return -EINVAL;
priv->ops = (struct tusb320_ops*)match_data;
if (priv->ops->get_revision) {
ret = priv->ops->get_revision(priv, &revision);
if (ret)
dev_warn(priv->dev,
"failed to read revision register: %d\n", ret);
else
dev_info(priv->dev, "chip revision %d\n", revision);
}
ret = tusb320_extcon_probe(priv);
if (ret)
return ret;
ret = tusb320_typec_probe(client, priv);
if (ret)
return ret;
/* update initial state */
tusb320_state_update_handler(priv, true);
/* Reset chip to its default state */
ret = tusb320_reset(priv);
if (ret)
dev_warn(priv->dev, "failed to reset chip: %d\n", ret);
else
/*
* State and polarity might change after a reset, so update
* them again and make sure the interrupt status bit is cleared.
*/
tusb320_state_update_handler(priv, true);
ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
tusb320_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, priv);
if (ret)
tusb320_typec_remove(priv);
return ret;
}
static void tusb320_remove(struct i2c_client *client)
{
struct tusb320_priv *priv = i2c_get_clientdata(client);
tusb320_typec_remove(priv);
}
static const struct of_device_id tusb320_extcon_dt_match[] = {
{ .compatible = "ti,tusb320", .data = &tusb320_ops, },
{ .compatible = "ti,tusb320l", .data = &tusb320l_ops, },
{ }
};
MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
static struct i2c_driver tusb320_extcon_driver = {
.probe = tusb320_probe,
.remove = tusb320_remove,
.driver = {
.name = "extcon-tusb320",
.of_match_table = tusb320_extcon_dt_match,
},
};
static int __init tusb320_init(void)
{
return i2c_add_driver(&tusb320_extcon_driver);
}
subsys_initcall(tusb320_init);
static void __exit tusb320_exit(void)
{
i2c_del_driver(&tusb320_extcon_driver);
}
module_exit(tusb320_exit);
MODULE_AUTHOR("Michael Auchter <[email protected]>");
MODULE_DESCRIPTION("TI TUSB320 extcon driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-usbc-tusb320.c |
// SPDX-License-Identifier: GPL-2.0+
//
// extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
//
// Copyright (C) 2012 Samsung Electrnoics
// Chanwoo Choi <[email protected]>
#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/max77693.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#include <linux/extcon-provider.h>
#include <linux/regmap.h>
#include <linux/irqdomain.h>
#define DEV_NAME "max77693-muic"
#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
/*
* Default value of MAX77693 register to bring up MUIC device.
* If user don't set some initial value for MUIC device through platform data,
* extcon-max77693 driver use 'default_init_data' to bring up base operation
* of MAX77693 MUIC device.
*/
static struct max77693_reg_data default_init_data[] = {
{
/* STATUS2 - [3]ChgDetRun */
.addr = MAX77693_MUIC_REG_STATUS2,
.data = MAX77693_STATUS2_CHGDETRUN_MASK,
}, {
/* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
.addr = MAX77693_MUIC_REG_INTMASK1,
.data = INTMASK1_ADC1K_MASK
| INTMASK1_ADC_MASK,
}, {
/* INTMASK2 - Unmask [0]ChgTypM */
.addr = MAX77693_MUIC_REG_INTMASK2,
.data = INTMASK2_CHGTYP_MASK,
}, {
/* INTMASK3 - Mask all of interrupts */
.addr = MAX77693_MUIC_REG_INTMASK3,
.data = 0x0,
}, {
/* CDETCTRL2 */
.addr = MAX77693_MUIC_REG_CDETCTRL2,
.data = CDETCTRL2_VIDRMEN_MASK
| CDETCTRL2_DXOVPEN_MASK,
},
};
enum max77693_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_5MS = 0,
ADC_DEBOUNCE_TIME_10MS,
ADC_DEBOUNCE_TIME_25MS,
ADC_DEBOUNCE_TIME_38_62MS,
};
struct max77693_muic_info {
struct device *dev;
struct max77693_dev *max77693;
struct extcon_dev *edev;
int prev_cable_type;
int prev_cable_type_gnd;
int prev_chg_type;
int prev_button_type;
u8 status[2];
int irq;
struct work_struct irq_work;
struct mutex mutex;
/*
* Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
struct delayed_work wq_detcable;
/* Button of dock device */
struct input_dev *dock;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
int path_usb;
int path_uart;
};
enum max77693_muic_cable_group {
MAX77693_CABLE_GROUP_ADC = 0,
MAX77693_CABLE_GROUP_ADC_GND,
MAX77693_CABLE_GROUP_CHG,
MAX77693_CABLE_GROUP_VBVOLT,
};
enum max77693_muic_charger_type {
MAX77693_CHARGER_TYPE_NONE = 0,
MAX77693_CHARGER_TYPE_USB,
MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT,
MAX77693_CHARGER_TYPE_DEDICATED_CHG,
MAX77693_CHARGER_TYPE_APPLE_500MA,
MAX77693_CHARGER_TYPE_APPLE_1A_2A,
MAX77693_CHARGER_TYPE_DEAD_BATTERY = 7,
};
/**
* struct max77693_muic_irq
* @irq: the index of irq list of MUIC device.
* @name: the name of irq.
* @virq: the virtual irq to use irq domain
*/
struct max77693_muic_irq {
unsigned int irq;
const char *name;
unsigned int virq;
};
static struct max77693_muic_irq muic_irqs[] = {
{ MAX77693_MUIC_IRQ_INT1_ADC, "muic-ADC" },
{ MAX77693_MUIC_IRQ_INT1_ADC_LOW, "muic-ADCLOW" },
{ MAX77693_MUIC_IRQ_INT1_ADC_ERR, "muic-ADCError" },
{ MAX77693_MUIC_IRQ_INT1_ADC1K, "muic-ADC1K" },
{ MAX77693_MUIC_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
{ MAX77693_MUIC_IRQ_INT2_CHGDETREUN, "muic-CHGDETREUN" },
{ MAX77693_MUIC_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
{ MAX77693_MUIC_IRQ_INT2_DXOVP, "muic-DXOVP" },
{ MAX77693_MUIC_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
{ MAX77693_MUIC_IRQ_INT2_VIDRM, "muic-VIDRM" },
{ MAX77693_MUIC_IRQ_INT3_EOC, "muic-EOC" },
{ MAX77693_MUIC_IRQ_INT3_CGMBC, "muic-CGMBC" },
{ MAX77693_MUIC_IRQ_INT3_OVP, "muic-OVP" },
{ MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, "muic-MBCCHG_ERR" },
{ MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, "muic-CHG_ENABLED" },
{ MAX77693_MUIC_IRQ_INT3_BAT_DET, "muic-BAT_DET" },
};
/* Define supported accessory type */
enum max77693_muic_acc_type {
MAX77693_MUIC_ADC_GROUND = 0x0,
MAX77693_MUIC_ADC_SEND_END_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S1_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S2_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S3_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S4_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S5_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S6_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S7_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S8_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S9_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S10_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S11_BUTTON,
MAX77693_MUIC_ADC_REMOTE_S12_BUTTON,
MAX77693_MUIC_ADC_RESERVED_ACC_1,
MAX77693_MUIC_ADC_RESERVED_ACC_2,
MAX77693_MUIC_ADC_RESERVED_ACC_3,
MAX77693_MUIC_ADC_RESERVED_ACC_4,
MAX77693_MUIC_ADC_RESERVED_ACC_5,
MAX77693_MUIC_ADC_CEA936_AUDIO,
MAX77693_MUIC_ADC_PHONE_POWERED_DEV,
MAX77693_MUIC_ADC_TTY_CONVERTER,
MAX77693_MUIC_ADC_UART_CABLE,
MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG,
MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF,
MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON,
MAX77693_MUIC_ADC_AV_CABLE_NOLOAD,
MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG,
MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF,
MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON,
MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE,
MAX77693_MUIC_ADC_OPEN,
/*
* The below accessories have same ADC value so ADCLow and
* ADC1K bit is used to separate specific accessory.
*/
/* ADC|VBVolot|ADCLow|ADC1K| */
MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */
MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */
MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* 0x0| 0| 1| 0| */
MAX77693_MUIC_GND_MHL = 0x103, /* 0x0| 0| 1| 1| */
MAX77693_MUIC_GND_MHL_VB = 0x107, /* 0x0| 1| 1| 1| */
};
/*
* MAX77693 MUIC device support below list of accessories(external connector)
*/
static const unsigned int max77693_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
EXTCON_CHG_USB_CDP,
EXTCON_DISP_MHL,
EXTCON_JIG,
EXTCON_DOCK,
EXTCON_NONE,
};
/*
* max77693_muic_set_debounce_time - Set the debounce time of ADC
* @info: the instance including private data of max77693 MUIC
* @time: the debounce time of ADC
*/
static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
enum max77693_muic_adc_debounce_time time)
{
int ret;
switch (time) {
case ADC_DEBOUNCE_TIME_5MS:
case ADC_DEBOUNCE_TIME_10MS:
case ADC_DEBOUNCE_TIME_25MS:
case ADC_DEBOUNCE_TIME_38_62MS:
/*
* Don't touch BTLDset, JIGset when you want to change adc
* debounce time. If it writes other than 0 to BTLDset, JIGset
* muic device will be reset and loose current state.
*/
ret = regmap_write(info->max77693->regmap_muic,
MAX77693_MUIC_REG_CTRL3,
time << MAX77693_CONTROL3_ADCDBSET_SHIFT);
if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
return ret;
}
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
return -EINVAL;
}
return 0;
};
/*
* max77693_muic_set_path - Set hardware line according to attached cable
* @info: the instance including private data of max77693 MUIC
* @value: the path according to attached cable
* @attached: the state of cable (true:attached, false:detached)
*
* The max77693 MUIC device share outside H/W line among a varity of cables
* so, this function set internal path of H/W line according to the type of
* attached cable.
*/
static int max77693_muic_set_path(struct max77693_muic_info *info,
u8 val, bool attached)
{
int ret;
unsigned int ctrl1, ctrl2 = 0;
if (attached)
ctrl1 = val;
else
ctrl1 = MAX77693_CONTROL1_SW_OPEN;
ret = regmap_update_bits(info->max77693->regmap_muic,
MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
if (attached)
ctrl2 |= MAX77693_CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
else
ctrl2 |= MAX77693_CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
ret = regmap_update_bits(info->max77693->regmap_muic,
MAX77693_MUIC_REG_CTRL2,
MAX77693_CONTROL2_LOWPWR_MASK | MAX77693_CONTROL2_CPEN_MASK,
ctrl2);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
}
dev_info(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
return 0;
}
/*
* max77693_muic_get_cable_type - Return cable type and check cable state
* @info: the instance including private data of max77693 MUIC
* @group: the path according to attached cable
* @attached: store cable state and return
*
* This function check the cable state either attached or detached,
* and then divide precise type of cable according to cable group.
* - MAX77693_CABLE_GROUP_ADC
* - MAX77693_CABLE_GROUP_ADC_GND
* - MAX77693_CABLE_GROUP_CHG
* - MAX77693_CABLE_GROUP_VBVOLT
*/
static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
enum max77693_muic_cable_group group, bool *attached)
{
int cable_type = 0;
int adc;
int adc1k;
int adclow;
int vbvolt;
int chg_type;
switch (group) {
case MAX77693_CABLE_GROUP_ADC:
/*
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
adc >>= MAX77693_STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
* (info->prev_cable_type) for handling cable when cable is
* detached.
*/
if (adc == MAX77693_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_cable_type;
info->prev_cable_type = MAX77693_MUIC_ADC_OPEN;
} else {
*attached = true;
cable_type = info->prev_cable_type = adc;
}
break;
case MAX77693_CABLE_GROUP_ADC_GND:
/*
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
adc >>= MAX77693_STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
* (info->prev_cable_type/_gnd) for handling cable when cable
* is detached.
*/
if (adc == MAX77693_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_cable_type_gnd;
info->prev_cable_type_gnd = MAX77693_MUIC_ADC_OPEN;
} else {
*attached = true;
adclow = info->status[0] & MAX77693_STATUS1_ADCLOW_MASK;
adclow >>= MAX77693_STATUS1_ADCLOW_SHIFT;
adc1k = info->status[0] & MAX77693_STATUS1_ADC1K_MASK;
adc1k >>= MAX77693_STATUS1_ADC1K_SHIFT;
vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
/**
* [0x1|VBVolt|ADCLow|ADC1K]
* [0x1| 0| 0| 0] USB_HOST
* [0x1| 1| 0| 0] USB_HSOT_VB
* [0x1| 0| 1| 0] Audio Video cable with load
* [0x1| 0| 1| 1] MHL without charging cable
* [0x1| 1| 1| 1] MHL with charging cable
*/
cable_type = ((0x1 << 8)
| (vbvolt << 2)
| (adclow << 1)
| adc1k);
info->prev_cable_type = adc;
info->prev_cable_type_gnd = cable_type;
}
break;
case MAX77693_CABLE_GROUP_CHG:
/*
* Read charger type to check cable type and decide cable state
* according to type of charger cable.
*/
chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX77693_CHARGER_TYPE_NONE;
} else {
*attached = true;
/*
* Check current cable state/cable type and store cable
* type(info->prev_chg_type) for handling cable when
* charger cable is detached.
*/
cable_type = info->prev_chg_type = chg_type;
}
break;
case MAX77693_CABLE_GROUP_VBVOLT:
/*
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
adc >>= MAX77693_STATUS1_ADC_SHIFT;
chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
if (adc == MAX77693_MUIC_ADC_OPEN
&& chg_type == MAX77693_CHARGER_TYPE_NONE)
*attached = false;
else
*attached = true;
/*
* Read vbvolt field, if vbvolt is 1,
* this cable is used for charging.
*/
vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
cable_type = vbvolt;
break;
default:
dev_err(info->dev, "Unknown cable group (%d)\n", group);
cable_type = -EINVAL;
break;
}
return cable_type;
}
static int max77693_muic_dock_handler(struct max77693_muic_info *info,
int cable_type, bool attached)
{
int ret = 0;
int vbvolt;
bool cable_attached;
unsigned int dock_id;
dev_info(info->dev,
"external connector is %s (adc:0x%02x)\n",
attached ? "attached" : "detached", cable_type);
switch (cable_type) {
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
/*
* Check power cable whether attached or detached state.
* The Dock-Smart device need surely external power supply.
* If power cable(USB/TA) isn't connected to Dock device,
* user can't use Dock-Smart for desktop mode.
*/
vbvolt = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_VBVOLT, &cable_attached);
if (attached && !vbvolt) {
dev_warn(info->dev,
"Cannot detect external power supply\n");
return 0;
}
/*
* Notify Dock/MHL state.
* - Dock device include three type of cable which
* are HDMI, USB for mouse/keyboard and micro-usb port
* for USB/TA cable. Dock device need always exteranl
* power supply(USB/TA cable through micro-usb cable). Dock
* device support screen output of target to separate
* monitor and mouse/keyboard for desktop mode.
*
* Features of 'USB/TA cable with Dock device'
* - Support MHL
* - Support external output feature of audio
* - Support charging through micro-usb port without data
* connection if TA cable is connected to target.
* - Support charging and data connection through micro-usb port
* if USB cable is connected between target and host
* device.
* - Support OTG(On-The-Go) device (Ex: Mouse/Keyboard)
*/
ret = max77693_muic_set_path(info, info->path_usb, attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
goto out;
case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
dock_id = EXTCON_DOCK;
break;
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
dock_id = EXTCON_DOCK;
if (!attached) {
extcon_set_state_sync(info->edev, EXTCON_USB, false);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
false);
}
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
attached ? "attached" : "detached");
return -EINVAL;
}
/* Dock-Car/Desk/Audio, PATH:AUDIO */
ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, dock_id, attached);
out:
return 0;
}
static int max77693_muic_dock_button_handler(struct max77693_muic_info *info,
int button_type, bool attached)
{
struct input_dev *dock = info->dock;
unsigned int code;
switch (button_type) {
case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON-1
... MAX77693_MUIC_ADC_REMOTE_S3_BUTTON+1:
/* DOCK_KEY_PREV */
code = KEY_PREVIOUSSONG;
break;
case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON-1
... MAX77693_MUIC_ADC_REMOTE_S7_BUTTON+1:
/* DOCK_KEY_NEXT */
code = KEY_NEXTSONG;
break;
case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
/* DOCK_VOL_DOWN */
code = KEY_VOLUMEDOWN;
break;
case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
/* DOCK_VOL_UP */
code = KEY_VOLUMEUP;
break;
case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON-1
... MAX77693_MUIC_ADC_REMOTE_S12_BUTTON+1:
/* DOCK_KEY_PLAY_PAUSE */
code = KEY_PLAYPAUSE;
break;
default:
dev_err(info->dev,
"failed to detect %s key (adc:0x%x)\n",
attached ? "pressed" : "released", button_type);
return -EINVAL;
}
input_event(dock, EV_KEY, code, attached);
input_sync(dock);
return 0;
}
static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
{
int cable_type_gnd;
int ret = 0;
bool attached;
cable_type_gnd = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_ADC_GND, &attached);
switch (cable_type_gnd) {
case MAX77693_MUIC_GND_USB_HOST:
case MAX77693_MUIC_GND_USB_HOST_VB:
/* USB_HOST, PATH: AP_USB */
ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_USB,
attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
/* Audio Video Cable with load, PATH:AUDIO */
ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB, attached);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
/* MHL or MHL with USB/TA cable */
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
break;
default:
dev_err(info->dev, "failed to detect %s cable of gnd type\n",
attached ? "attached" : "detached");
return -EINVAL;
}
return 0;
}
static int max77693_muic_jig_handler(struct max77693_muic_info *info,
int cable_type, bool attached)
{
int ret = 0;
u8 path = MAX77693_CONTROL1_SW_OPEN;
dev_info(info->dev,
"external connector is %s (adc:0x%02x)\n",
attached ? "attached" : "detached", cable_type);
switch (cable_type) {
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
/* PATH:AP_USB */
path = MAX77693_CONTROL1_SW_USB;
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
/* PATH:AP_UART */
path = MAX77693_CONTROL1_SW_UART;
break;
default:
dev_err(info->dev, "failed to detect %s jig cable\n",
attached ? "attached" : "detached");
return -EINVAL;
}
ret = max77693_muic_set_path(info, path, attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
static int max77693_muic_adc_handler(struct max77693_muic_info *info)
{
int cable_type;
int button_type;
bool attached;
int ret = 0;
/* Check accessory state which is either detached or attached */
cable_type = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_ADC, &attached);
dev_info(info->dev,
"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
attached ? "attached" : "detached", cable_type,
info->prev_cable_type);
switch (cable_type) {
case MAX77693_MUIC_ADC_GROUND:
/* USB_HOST/MHL/Audio */
max77693_muic_adc_ground_handler(info);
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
/* JIG */
ret = max77693_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
/*
* DOCK device
*
* The MAX77693 MUIC device can detect total 34 cable type
* except of charger cable and MUIC device didn't define
* specfic role of cable in the range of from 0x01 to 0x12
* of ADC value. So, can use/define cable with no role according
* to schema of hardware board.
*/
ret = max77693_muic_dock_handler(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
/*
* Button of DOCK device
* - the Prev/Next/Volume Up/Volume Down/Play-Pause button
*
* The MAX77693 MUIC device can detect total 34 cable type
* except of charger cable and MUIC device didn't define
* specfic role of cable in the range of from 0x01 to 0x12
* of ADC value. So, can use/define cable with no role according
* to schema of hardware board.
*/
if (attached)
button_type = info->prev_button_type = cable_type;
else
button_type = info->prev_button_type;
ret = max77693_muic_dock_button_handler(info, button_type,
attached);
if (ret < 0)
return ret;
break;
case MAX77693_MUIC_ADC_SEND_END_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
case MAX77693_MUIC_ADC_RESERVED_ACC_1:
case MAX77693_MUIC_ADC_RESERVED_ACC_2:
case MAX77693_MUIC_ADC_RESERVED_ACC_4:
case MAX77693_MUIC_ADC_RESERVED_ACC_5:
case MAX77693_MUIC_ADC_CEA936_AUDIO:
case MAX77693_MUIC_ADC_PHONE_POWERED_DEV:
case MAX77693_MUIC_ADC_TTY_CONVERTER:
case MAX77693_MUIC_ADC_UART_CABLE:
case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
/*
* This accessory isn't used in general case if it is specially
* needed to detect additional accessory, should implement
* proper operation when this accessory is attached/detached.
*/
dev_info(info->dev,
"accessory is %s but it isn't used (adc:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s accessory (adc:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EINVAL;
}
return 0;
}
static int max77693_muic_chg_handler(struct max77693_muic_info *info)
{
int chg_type;
int cable_type_gnd;
int cable_type;
bool attached;
bool cable_attached;
int ret = 0;
chg_type = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_CHG, &attached);
dev_info(info->dev,
"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
attached ? "attached" : "detached",
chg_type, info->prev_chg_type);
switch (chg_type) {
case MAX77693_CHARGER_TYPE_USB:
case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
case MAX77693_CHARGER_TYPE_NONE:
/* Check MAX77693_CABLE_GROUP_ADC_GND type */
cable_type_gnd = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_ADC_GND,
&cable_attached);
switch (cable_type_gnd) {
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
/*
* MHL cable with USB/TA cable
* - MHL cable include two port(HDMI line and separate
* micro-usb port. When the target connect MHL cable,
* extcon driver check whether USB/TA cable is
* connected. If USB/TA cable is connected, extcon
* driver notify state to notifiee for charging battery.
*
* Features of 'USB/TA with MHL cable'
* - Support MHL
* - Support charging through micro-usb port without
* data connection
*/
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
cable_attached);
break;
}
/* Check MAX77693_CABLE_GROUP_ADC type */
cable_type = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_ADC,
&cable_attached);
switch (cable_type) {
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
/*
* Dock-Audio device with USB/TA cable
* - Dock device include two port(Dock-Audio and micro-
* usb port). When the target connect Dock-Audio device,
* extcon driver check whether USB/TA cable is connected
* or not. If USB/TA cable is connected, extcon driver
* notify state to notifiee for charging battery.
*
* Features of 'USB/TA cable with Dock-Audio device'
* - Support external output feature of audio.
* - Support charging through micro-usb port without
* data connection.
*/
extcon_set_state_sync(info->edev, EXTCON_USB,
attached);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
if (!cable_attached)
extcon_set_state_sync(info->edev, EXTCON_DOCK,
cable_attached);
break;
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
/*
* Dock-Smart device with USB/TA cable
* - Dock-Desk device include three type of cable which
* are HDMI, USB for mouse/keyboard and micro-usb port
* for USB/TA cable. Dock-Smart device need always
* exteranl power supply(USB/TA cable through micro-usb
* cable). Dock-Smart device support screen output of
* target to separate monitor and mouse/keyboard for
* desktop mode.
*
* Features of 'USB/TA cable with Dock-Smart device'
* - Support MHL
* - Support external output feature of audio
* - Support charging through micro-usb port without
* data connection if TA cable is connected to target.
* - Support charging and data connection through micro-
* usb port if USB cable is connected between target
* and host device
* - Support OTG(On-The-Go) device (Ex: Mouse/Keyboard)
*/
ret = max77693_muic_set_path(info, info->path_usb,
attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_DOCK,
attached);
extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
attached);
break;
}
/* Check MAX77693_CABLE_GROUP_CHG type */
switch (chg_type) {
case MAX77693_CHARGER_TYPE_NONE:
/*
* When MHL(with USB/TA cable) or Dock-Audio with USB/TA
* cable is attached, muic device happen below two irq.
* - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting
* MHL/Dock-Audio.
* - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting
* USB/TA cable connected to MHL or Dock-Audio.
* Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC
* irq than MAX77693_MUIC_IRQ_INT2_CHGTYP irq.
*
* If user attach MHL (with USB/TA cable and immediately
* detach MHL with USB/TA cable before MAX77693_MUIC_IRQ
* _INT2_CHGTYP irq is happened, USB/TA cable remain
* connected state to target. But USB/TA cable isn't
* connected to target. The user be face with unusual
* action. So, driver should check this situation in
* spite of, that previous charger type is N/A.
*/
break;
case MAX77693_CHARGER_TYPE_USB:
/* Only USB cable, PATH:AP_USB */
ret = max77693_muic_set_path(info, info->path_usb,
attached);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB,
attached);
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
/* Only TA cable */
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
}
break;
case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX77693_CHARGER_TYPE_APPLE_500MA:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX77693_CHARGER_TYPE_APPLE_1A_2A:
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
case MAX77693_CHARGER_TYPE_DEAD_BATTERY:
break;
default:
dev_err(info->dev,
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
return -EINVAL;
}
return 0;
}
static void max77693_muic_irq_work(struct work_struct *work)
{
struct max77693_muic_info *info = container_of(work,
struct max77693_muic_info, irq_work);
int irq_type = -1;
int i, ret = 0;
if (!info->edev)
return;
mutex_lock(&info->mutex);
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
if (info->irq == muic_irqs[i].virq)
irq_type = muic_irqs[i].irq;
ret = regmap_bulk_read(info->max77693->regmap_muic,
MAX77693_MUIC_REG_STATUS1, info->status, 2);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
mutex_unlock(&info->mutex);
return;
}
switch (irq_type) {
case MAX77693_MUIC_IRQ_INT1_ADC:
case MAX77693_MUIC_IRQ_INT1_ADC_LOW:
case MAX77693_MUIC_IRQ_INT1_ADC_ERR:
case MAX77693_MUIC_IRQ_INT1_ADC1K:
/*
* Handle all of accessory except for
* type of charger accessory.
*/
ret = max77693_muic_adc_handler(info);
break;
case MAX77693_MUIC_IRQ_INT2_CHGTYP:
case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
case MAX77693_MUIC_IRQ_INT2_DCDTMR:
case MAX77693_MUIC_IRQ_INT2_DXOVP:
case MAX77693_MUIC_IRQ_INT2_VBVOLT:
case MAX77693_MUIC_IRQ_INT2_VIDRM:
/* Handle charger accessory */
ret = max77693_muic_chg_handler(info);
break;
case MAX77693_MUIC_IRQ_INT3_EOC:
case MAX77693_MUIC_IRQ_INT3_CGMBC:
case MAX77693_MUIC_IRQ_INT3_OVP:
case MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR:
case MAX77693_MUIC_IRQ_INT3_CHG_ENABLED:
case MAX77693_MUIC_IRQ_INT3_BAT_DET:
break;
default:
dev_err(info->dev, "muic interrupt: irq %d occurred\n",
irq_type);
mutex_unlock(&info->mutex);
return;
}
if (ret < 0)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
}
static irqreturn_t max77693_muic_irq_handler(int irq, void *data)
{
struct max77693_muic_info *info = data;
info->irq = irq;
schedule_work(&info->irq_work);
return IRQ_HANDLED;
}
static const struct regmap_config max77693_muic_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
{
int ret = 0;
int adc;
int chg_type;
bool attached;
mutex_lock(&info->mutex);
/* Read STATUSx register to detect accessory */
ret = regmap_bulk_read(info->max77693->regmap_muic,
MAX77693_MUIC_REG_STATUS1, info->status, 2);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
mutex_unlock(&info->mutex);
return ret;
}
adc = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_ADC,
&attached);
if (attached && adc != MAX77693_MUIC_ADC_OPEN) {
ret = max77693_muic_adc_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect accessory\n");
mutex_unlock(&info->mutex);
return ret;
}
}
chg_type = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_CHG,
&attached);
if (attached && chg_type != MAX77693_CHARGER_TYPE_NONE) {
ret = max77693_muic_chg_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect charger accessory\n");
mutex_unlock(&info->mutex);
return ret;
}
}
mutex_unlock(&info->mutex);
return 0;
}
static void max77693_muic_detect_cable_wq(struct work_struct *work)
{
struct max77693_muic_info *info = container_of(to_delayed_work(work),
struct max77693_muic_info, wq_detcable);
max77693_muic_detect_accessory(info);
}
static int max77693_muic_probe(struct platform_device *pdev)
{
struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
struct max77693_muic_info *info;
struct max77693_reg_data *init_data;
int num_init_data;
int delay_jiffies;
int cable_type;
bool attached;
int ret;
int i;
unsigned int id;
info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->max77693 = max77693;
if (info->max77693->regmap_muic) {
dev_dbg(&pdev->dev, "allocate register map\n");
} else {
info->max77693->regmap_muic = devm_regmap_init_i2c(
info->max77693->i2c_muic,
&max77693_muic_regmap_config);
if (IS_ERR(info->max77693->regmap_muic)) {
ret = PTR_ERR(info->max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
return ret;
}
}
/* Register input device for button of dock device */
info->dock = devm_input_allocate_device(&pdev->dev);
if (!info->dock) {
dev_err(&pdev->dev, "%s: failed to allocate input\n", __func__);
return -ENOMEM;
}
info->dock->name = "max77693-muic/dock";
info->dock->phys = "max77693-muic/extcon";
info->dock->dev.parent = &pdev->dev;
__set_bit(EV_REP, info->dock->evbit);
input_set_capability(info->dock, EV_KEY, KEY_VOLUMEUP);
input_set_capability(info->dock, EV_KEY, KEY_VOLUMEDOWN);
input_set_capability(info->dock, EV_KEY, KEY_PLAYPAUSE);
input_set_capability(info->dock, EV_KEY, KEY_PREVIOUSSONG);
input_set_capability(info->dock, EV_KEY, KEY_NEXTSONG);
ret = input_register_device(info->dock);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register input device error(%d)\n",
ret);
return ret;
}
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
ret = devm_work_autocancel(&pdev->dev, &info->irq_work,
max77693_muic_irq_work);
if (ret)
return ret;
/* Support irq domain for MAX77693 MUIC device */
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max77693_muic_irq *muic_irq = &muic_irqs[i];
int virq;
virq = regmap_irq_get_virq(max77693->irq_data_muic,
muic_irq->irq);
if (virq <= 0)
return -EINVAL;
muic_irq->virq = virq;
ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
max77693_muic_irq_handler,
IRQF_NO_SUSPEND,
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
}
/* Initialize extcon device */
info->edev = devm_extcon_dev_allocate(&pdev->dev,
max77693_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return PTR_ERR(info->edev);
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return ret;
}
/* Initialize MUIC register by using platform data or default data */
if (pdata && pdata->muic_data) {
init_data = pdata->muic_data->init_data;
num_init_data = pdata->muic_data->num_init_data;
} else {
init_data = default_init_data;
num_init_data = ARRAY_SIZE(default_init_data);
}
for (i = 0; i < num_init_data; i++) {
regmap_write(info->max77693->regmap_muic,
init_data[i].addr,
init_data[i].data);
}
if (pdata && pdata->muic_data) {
struct max77693_muic_platform_data *muic_pdata
= pdata->muic_data;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (muic_pdata->path_uart)
info->path_uart = muic_pdata->path_uart;
else
info->path_uart = MAX77693_CONTROL1_SW_UART;
if (muic_pdata->path_usb)
info->path_usb = muic_pdata->path_usb;
else
info->path_usb = MAX77693_CONTROL1_SW_USB;
/*
* Default delay time for detecting cable state
* after certain time.
*/
if (muic_pdata->detcable_delay_ms)
delay_jiffies =
msecs_to_jiffies(muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
} else {
info->path_usb = MAX77693_CONTROL1_SW_USB;
info->path_uart = MAX77693_CONTROL1_SW_UART;
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
}
/* Set initial path for UART when JIG is connected to get serial logs */
ret = regmap_bulk_read(info->max77693->regmap_muic,
MAX77693_MUIC_REG_STATUS1, info->status, 2);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
return ret;
}
cable_type = max77693_muic_get_cable_type(info,
MAX77693_CABLE_GROUP_ADC, &attached);
if (attached && (cable_type == MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON ||
cable_type == MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF))
max77693_muic_set_path(info, info->path_uart, true);
/* Check revision number of MUIC device*/
ret = regmap_read(info->max77693->regmap_muic,
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
return ret;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
/* Set ADC debounce time */
max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
/*
* Detect accessory after completing the initialization of platform
*
* - Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
delay_jiffies);
return ret;
}
static struct platform_driver max77693_muic_driver = {
.driver = {
.name = DEV_NAME,
},
.probe = max77693_muic_probe,
};
module_platform_driver(max77693_muic_driver);
MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
MODULE_AUTHOR("Chanwoo Choi <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max77693-muic");
| linux-master | drivers/extcon/extcon-max77693.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Extcon charger detection driver for Intel Cherrytrail Whiskey Cove PMIC
* Copyright (C) 2017 Hans de Goede <[email protected]>
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/role.h>
#include "extcon-intel.h"
#define CHT_WC_PHYCTRL 0x5e07
#define CHT_WC_CHGRCTRL0 0x5e16
#define CHT_WC_CHGRCTRL0_CHGRRESET BIT(0)
#define CHT_WC_CHGRCTRL0_EMRGCHREN BIT(1)
#define CHT_WC_CHGRCTRL0_EXTCHRDIS BIT(2)
#define CHT_WC_CHGRCTRL0_SWCONTROL BIT(3)
#define CHT_WC_CHGRCTRL0_TTLCK BIT(4)
#define CHT_WC_CHGRCTRL0_CCSM_OFF BIT(5)
#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
#define CHT_WC_CHGRCTRL1 0x5e17
#define CHT_WC_CHGRCTRL1_FUSB_INLMT_100 BIT(0)
#define CHT_WC_CHGRCTRL1_FUSB_INLMT_150 BIT(1)
#define CHT_WC_CHGRCTRL1_FUSB_INLMT_500 BIT(2)
#define CHT_WC_CHGRCTRL1_FUSB_INLMT_900 BIT(3)
#define CHT_WC_CHGRCTRL1_FUSB_INLMT_1500 BIT(4)
#define CHT_WC_CHGRCTRL1_FTEMP_EVENT BIT(5)
#define CHT_WC_CHGRCTRL1_OTGMODE BIT(6)
#define CHT_WC_CHGRCTRL1_DBPEN BIT(7)
#define CHT_WC_USBSRC 0x5e29
#define CHT_WC_USBSRC_STS_MASK GENMASK(1, 0)
#define CHT_WC_USBSRC_STS_SUCCESS 2
#define CHT_WC_USBSRC_STS_FAIL 3
#define CHT_WC_USBSRC_TYPE_SHIFT 2
#define CHT_WC_USBSRC_TYPE_MASK GENMASK(5, 2)
#define CHT_WC_USBSRC_TYPE_NONE 0
#define CHT_WC_USBSRC_TYPE_SDP 1
#define CHT_WC_USBSRC_TYPE_DCP 2
#define CHT_WC_USBSRC_TYPE_CDP 3
#define CHT_WC_USBSRC_TYPE_ACA 4
#define CHT_WC_USBSRC_TYPE_SE1 5
#define CHT_WC_USBSRC_TYPE_MHL 6
#define CHT_WC_USBSRC_TYPE_FLOATING 7
#define CHT_WC_USBSRC_TYPE_OTHER 8
#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
#define CHT_WC_CHGDISCTRL 0x5e2f
#define CHT_WC_CHGDISCTRL_OUT BIT(0)
/* 0 - open drain, 1 - regular push-pull output */
#define CHT_WC_CHGDISCTRL_DRV BIT(4)
/* 0 - pin is controlled by SW, 1 - by HW */
#define CHT_WC_CHGDISCTRL_FN BIT(6)
#define CHT_WC_PWRSRC_IRQ 0x6e03
#define CHT_WC_PWRSRC_IRQ_MASK 0x6e0f
#define CHT_WC_PWRSRC_STS 0x6e1e
#define CHT_WC_PWRSRC_VBUS BIT(0)
#define CHT_WC_PWRSRC_DC BIT(1)
#define CHT_WC_PWRSRC_BATT BIT(2)
#define CHT_WC_PWRSRC_USBID_MASK GENMASK(4, 3)
#define CHT_WC_PWRSRC_USBID_SHIFT 3
#define CHT_WC_PWRSRC_RID_ACA 0
#define CHT_WC_PWRSRC_RID_GND 1
#define CHT_WC_PWRSRC_RID_FLOAT 2
#define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
#define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4)
#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5)
enum cht_wc_mux_select {
MUX_SEL_PMIC = 0,
MUX_SEL_SOC,
};
static const unsigned int cht_wc_extcon_cables[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_ACA,
EXTCON_NONE,
};
struct cht_wc_extcon_data {
struct device *dev;
struct regmap *regmap;
struct extcon_dev *edev;
struct usb_role_switch *role_sw;
struct regulator *vbus_boost;
struct power_supply *psy;
enum power_supply_usb_type usb_type;
unsigned int previous_cable;
bool usb_host;
bool vbus_boost_enabled;
};
static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
{
switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
case CHT_WC_PWRSRC_RID_GND:
return INTEL_USB_ID_GND;
case CHT_WC_PWRSRC_RID_FLOAT:
return INTEL_USB_ID_FLOAT;
/*
* According to the spec. we should read the USB-ID pin ADC value here
* to determine the resistance of the used pull-down resister and then
* return RID_A / RID_B / RID_C based on this. But all "Accessory
* Charger Adapter"s (ACAs) which users can actually buy always use
* a combination of a charging port with one or more USB-A ports, so
* they should always use a resistor indicating RID_A. But the spec
* is hard to read / badly-worded so some of them actually indicate
* they are a RID_B ACA evnen though they clearly are a RID_A ACA.
* To workaround this simply always return INTEL_USB_RID_A, which
* matches all the ACAs which users can actually buy.
*/
case CHT_WC_PWRSRC_RID_ACA:
return INTEL_USB_RID_A;
default:
return INTEL_USB_ID_FLOAT;
}
}
static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
bool ignore_errors)
{
int ret, usbsrc, status;
unsigned long timeout;
/* Charger detection can take upto 600ms, wait 800ms max. */
timeout = jiffies + msecs_to_jiffies(800);
do {
ret = regmap_read(ext->regmap, CHT_WC_USBSRC, &usbsrc);
if (ret) {
dev_err(ext->dev, "Error reading usbsrc: %d\n", ret);
return ret;
}
status = usbsrc & CHT_WC_USBSRC_STS_MASK;
if (status == CHT_WC_USBSRC_STS_SUCCESS ||
status == CHT_WC_USBSRC_STS_FAIL)
break;
msleep(50); /* Wait a bit before retrying */
} while (time_before(jiffies, timeout));
if (status != CHT_WC_USBSRC_STS_SUCCESS) {
if (!ignore_errors) {
if (status == CHT_WC_USBSRC_STS_FAIL)
dev_warn(ext->dev, "Could not detect charger type\n");
else
dev_warn(ext->dev, "Timeout detecting charger type\n");
}
/* Safe fallback */
usbsrc = CHT_WC_USBSRC_TYPE_SDP << CHT_WC_USBSRC_TYPE_SHIFT;
}
usbsrc = (usbsrc & CHT_WC_USBSRC_TYPE_MASK) >> CHT_WC_USBSRC_TYPE_SHIFT;
switch (usbsrc) {
default:
dev_warn(ext->dev,
"Unhandled charger type %d, defaulting to SDP\n",
ret);
ext->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
return EXTCON_CHG_USB_SDP;
case CHT_WC_USBSRC_TYPE_SDP:
case CHT_WC_USBSRC_TYPE_FLOATING:
case CHT_WC_USBSRC_TYPE_OTHER:
ext->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
return EXTCON_CHG_USB_SDP;
case CHT_WC_USBSRC_TYPE_CDP:
ext->usb_type = POWER_SUPPLY_USB_TYPE_CDP;
return EXTCON_CHG_USB_CDP;
case CHT_WC_USBSRC_TYPE_DCP:
case CHT_WC_USBSRC_TYPE_DCP_EXTPHY:
case CHT_WC_USBSRC_TYPE_MHL: /* MHL2+ delivers upto 2A, treat as DCP */
ext->usb_type = POWER_SUPPLY_USB_TYPE_DCP;
return EXTCON_CHG_USB_DCP;
case CHT_WC_USBSRC_TYPE_ACA:
ext->usb_type = POWER_SUPPLY_USB_TYPE_ACA;
return EXTCON_CHG_USB_ACA;
}
}
static void cht_wc_extcon_set_phymux(struct cht_wc_extcon_data *ext, u8 state)
{
int ret;
ret = regmap_write(ext->regmap, CHT_WC_PHYCTRL, state);
if (ret)
dev_err(ext->dev, "Error writing phyctrl: %d\n", ret);
}
static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
bool enable)
{
int ret, val;
/*
* The 5V boost converter is enabled through a gpio on the PMIC, since
* there currently is no gpio driver we access the gpio reg directly.
*/
val = CHT_WC_VBUS_GPIO_CTLO_DRV_OD | CHT_WC_VBUS_GPIO_CTLO_DIR_OUT;
if (enable)
val |= CHT_WC_VBUS_GPIO_CTLO_OUTPUT;
ret = regmap_write(ext->regmap, CHT_WC_VBUS_GPIO_CTLO, val);
if (ret)
dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
}
static void cht_wc_extcon_set_otgmode(struct cht_wc_extcon_data *ext,
bool enable)
{
unsigned int val = enable ? CHT_WC_CHGRCTRL1_OTGMODE : 0;
int ret;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL1,
CHT_WC_CHGRCTRL1_OTGMODE, val);
if (ret)
dev_err(ext->dev, "Error updating CHGRCTRL1 reg: %d\n", ret);
if (ext->vbus_boost && ext->vbus_boost_enabled != enable) {
if (enable)
ret = regulator_enable(ext->vbus_boost);
else
ret = regulator_disable(ext->vbus_boost);
if (ret)
dev_err(ext->dev, "Error updating Vbus boost regulator: %d\n", ret);
else
ext->vbus_boost_enabled = enable;
}
}
static void cht_wc_extcon_enable_charging(struct cht_wc_extcon_data *ext,
bool enable)
{
unsigned int val = enable ? 0 : CHT_WC_CHGDISCTRL_OUT;
int ret;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
CHT_WC_CHGDISCTRL_OUT, val);
if (ret)
dev_err(ext->dev, "Error updating CHGDISCTRL reg: %d\n", ret);
}
/* Small helper to sync EXTCON_CHG_USB_SDP and EXTCON_USB state */
static void cht_wc_extcon_set_state(struct cht_wc_extcon_data *ext,
unsigned int cable, bool state)
{
extcon_set_state_sync(ext->edev, cable, state);
if (cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(ext->edev, EXTCON_USB, state);
}
static void cht_wc_extcon_pwrsrc_event(struct cht_wc_extcon_data *ext)
{
int ret, pwrsrc_sts, id;
unsigned int cable = EXTCON_NONE;
/* Ignore errors in host mode, as the 5v boost converter is on then */
bool ignore_get_charger_errors = ext->usb_host;
enum usb_role role;
ext->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
if (ret) {
dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
return;
}
id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
if (id == INTEL_USB_ID_GND) {
cht_wc_extcon_enable_charging(ext, false);
cht_wc_extcon_set_otgmode(ext, true);
/* The 5v boost causes a false VBUS / SDP detect, skip */
goto charger_det_done;
}
cht_wc_extcon_set_otgmode(ext, false);
cht_wc_extcon_enable_charging(ext, true);
/* Plugged into a host/charger or not connected? */
if (!(pwrsrc_sts & CHT_WC_PWRSRC_VBUS)) {
/* Route D+ and D- to PMIC for future charger detection */
cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
goto set_state;
}
ret = cht_wc_extcon_get_charger(ext, ignore_get_charger_errors);
if (ret >= 0)
cable = ret;
charger_det_done:
/* Route D+ and D- to SoC for the host or gadget controller */
cht_wc_extcon_set_phymux(ext, MUX_SEL_SOC);
set_state:
if (cable != ext->previous_cable) {
cht_wc_extcon_set_state(ext, cable, true);
cht_wc_extcon_set_state(ext, ext->previous_cable, false);
ext->previous_cable = cable;
}
ext->usb_host = ((id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A));
extcon_set_state_sync(ext->edev, EXTCON_USB_HOST, ext->usb_host);
if (ext->usb_host)
role = USB_ROLE_HOST;
else if (pwrsrc_sts & CHT_WC_PWRSRC_VBUS)
role = USB_ROLE_DEVICE;
else
role = USB_ROLE_NONE;
/* Note: this is a no-op when ext->role_sw is NULL */
ret = usb_role_switch_set_role(ext->role_sw, role);
if (ret)
dev_err(ext->dev, "Error setting USB-role: %d\n", ret);
if (ext->psy)
power_supply_changed(ext->psy);
}
static irqreturn_t cht_wc_extcon_isr(int irq, void *data)
{
struct cht_wc_extcon_data *ext = data;
int ret, irqs;
ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_IRQ, &irqs);
if (ret) {
dev_err(ext->dev, "Error reading irqs: %d\n", ret);
return IRQ_NONE;
}
cht_wc_extcon_pwrsrc_event(ext);
ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ, irqs);
if (ret) {
dev_err(ext->dev, "Error writing irqs: %d\n", ret);
return IRQ_NONE;
}
return IRQ_HANDLED;
}
static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
{
int ret, mask, val;
val = enable ? 0 : CHT_WC_CHGDISCTRL_FN;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
CHT_WC_CHGDISCTRL_FN, val);
if (ret)
dev_err(ext->dev,
"Error setting sw control for CHGDIS pin: %d\n",
ret);
mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
val = enable ? mask : 0;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
if (ret)
dev_err(ext->dev, "Error setting sw control: %d\n", ret);
return ret;
}
static int cht_wc_extcon_find_role_sw(struct cht_wc_extcon_data *ext)
{
const struct software_node *swnode;
struct fwnode_handle *fwnode;
swnode = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
if (!swnode)
return -EPROBE_DEFER;
fwnode = software_node_fwnode(swnode);
ext->role_sw = usb_role_switch_find_by_fwnode(fwnode);
fwnode_handle_put(fwnode);
return ext->role_sw ? 0 : -EPROBE_DEFER;
}
static void cht_wc_extcon_put_role_sw(void *data)
{
struct cht_wc_extcon_data *ext = data;
usb_role_switch_put(ext->role_sw);
}
/* Some boards require controlling the role-sw and Vbus based on the id-pin */
static int cht_wc_extcon_get_role_sw_and_regulator(struct cht_wc_extcon_data *ext)
{
int ret;
ret = cht_wc_extcon_find_role_sw(ext);
if (ret)
return ret;
ret = devm_add_action_or_reset(ext->dev, cht_wc_extcon_put_role_sw, ext);
if (ret)
return ret;
/*
* On x86/ACPI platforms the regulator <-> consumer link is provided
* by platform_data passed to the regulator driver. This means that
* this info is not available before the regulator driver has bound.
* Use devm_regulator_get_optional() to avoid getting a dummy
* regulator and wait for the regulator to show up if necessary.
*/
ext->vbus_boost = devm_regulator_get_optional(ext->dev, "vbus");
if (IS_ERR(ext->vbus_boost)) {
ret = PTR_ERR(ext->vbus_boost);
if (ret == -ENODEV)
ret = -EPROBE_DEFER;
return dev_err_probe(ext->dev, ret, "getting Vbus regulator");
}
return 0;
}
static int cht_wc_extcon_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct cht_wc_extcon_data *ext = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
val->intval = ext->usb_type;
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval = ext->usb_type ? 1 : 0;
break;
default:
return -EINVAL;
}
return 0;
}
static const enum power_supply_usb_type cht_wc_extcon_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_SDP,
POWER_SUPPLY_USB_TYPE_CDP,
POWER_SUPPLY_USB_TYPE_DCP,
POWER_SUPPLY_USB_TYPE_ACA,
POWER_SUPPLY_USB_TYPE_UNKNOWN,
};
static const enum power_supply_property cht_wc_extcon_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
};
static const struct power_supply_desc cht_wc_extcon_psy_desc = {
.name = "cht_wcove_pwrsrc",
.type = POWER_SUPPLY_TYPE_USB,
.usb_types = cht_wc_extcon_psy_usb_types,
.num_usb_types = ARRAY_SIZE(cht_wc_extcon_psy_usb_types),
.properties = cht_wc_extcon_psy_props,
.num_properties = ARRAY_SIZE(cht_wc_extcon_psy_props),
.get_property = cht_wc_extcon_psy_get_prop,
};
static int cht_wc_extcon_register_psy(struct cht_wc_extcon_data *ext)
{
struct power_supply_config psy_cfg = { .drv_data = ext };
ext->psy = devm_power_supply_register(ext->dev,
&cht_wc_extcon_psy_desc,
&psy_cfg);
return PTR_ERR_OR_ZERO(ext->psy);
}
static int cht_wc_extcon_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
int pwrsrc_sts, id;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ext = devm_kzalloc(&pdev->dev, sizeof(*ext), GFP_KERNEL);
if (!ext)
return -ENOMEM;
ext->dev = &pdev->dev;
ext->regmap = pmic->regmap;
ext->previous_cable = EXTCON_NONE;
/* Initialize extcon device */
ext->edev = devm_extcon_dev_allocate(ext->dev, cht_wc_extcon_cables);
if (IS_ERR(ext->edev))
return PTR_ERR(ext->edev);
switch (pmic->cht_wc_model) {
case INTEL_CHT_WC_GPD_WIN_POCKET:
/*
* When a host-cable is detected the BIOS enables an external 5v boost
* converter to power connected devices there are 2 problems with this:
* 1) This gets seen by the external battery charger as a valid Vbus
* supply and it then tries to feed Vsys from this creating a
* feedback loop which causes aprox. 300 mA extra battery drain
* (and unless we drive the external-charger-disable pin high it
* also tries to charge the battery causing even more feedback).
* 2) This gets seen by the pwrsrc block as a SDP USB Vbus supply
* Since the external battery charger has its own 5v boost converter
* which does not have these issues, we simply turn the separate
* external 5v boost converter off and leave it off entirely.
*/
cht_wc_extcon_set_5v_boost(ext, false);
break;
case INTEL_CHT_WC_LENOVO_YOGABOOK1:
case INTEL_CHT_WC_LENOVO_YT3_X90:
/* Do this first, as it may very well return -EPROBE_DEFER. */
ret = cht_wc_extcon_get_role_sw_and_regulator(ext);
if (ret)
return ret;
/*
* The bq25890 used here relies on this driver's BC-1.2 charger
* detection, and the bq25890 driver expect this info to be
* available through a parent power_supply class device which
* models the detected charger (idem to how the Type-C TCPM code
* registers a power_supply classdev for the connected charger).
*/
ret = cht_wc_extcon_register_psy(ext);
if (ret)
return ret;
break;
case INTEL_CHT_WC_XIAOMI_MIPAD2:
ret = cht_wc_extcon_get_role_sw_and_regulator(ext);
if (ret)
return ret;
break;
default:
break;
}
/* Enable sw control */
ret = cht_wc_extcon_sw_control(ext, true);
if (ret)
goto disable_sw_control;
/* Disable charging by external battery charger */
cht_wc_extcon_enable_charging(ext, false);
/* Register extcon device */
ret = devm_extcon_dev_register(ext->dev, ext->edev);
if (ret) {
dev_err(ext->dev, "Error registering extcon device: %d\n", ret);
goto disable_sw_control;
}
ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
if (ret) {
dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
goto disable_sw_control;
}
/*
* If no USB host or device connected, route D+ and D- to PMIC for
* initial charger detection
*/
id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
if (id != INTEL_USB_ID_GND)
cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
/* Get initial state */
cht_wc_extcon_pwrsrc_event(ext);
ret = devm_request_threaded_irq(ext->dev, irq, NULL, cht_wc_extcon_isr,
IRQF_ONESHOT, pdev->name, ext);
if (ret) {
dev_err(ext->dev, "Error requesting interrupt: %d\n", ret);
goto disable_sw_control;
}
/* Unmask irqs */
ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK, mask);
if (ret) {
dev_err(ext->dev, "Error writing irq-mask: %d\n", ret);
goto disable_sw_control;
}
platform_set_drvdata(pdev, ext);
return 0;
disable_sw_control:
cht_wc_extcon_sw_control(ext, false);
return ret;
}
static int cht_wc_extcon_remove(struct platform_device *pdev)
{
struct cht_wc_extcon_data *ext = platform_get_drvdata(pdev);
cht_wc_extcon_sw_control(ext, false);
return 0;
}
static const struct platform_device_id cht_wc_extcon_table[] = {
{ .name = "cht_wcove_pwrsrc" },
{},
};
MODULE_DEVICE_TABLE(platform, cht_wc_extcon_table);
static struct platform_driver cht_wc_extcon_driver = {
.probe = cht_wc_extcon_probe,
.remove = cht_wc_extcon_remove,
.id_table = cht_wc_extcon_table,
.driver = {
.name = "cht_wcove_pwrsrc",
},
};
module_platform_driver(cht_wc_extcon_driver);
MODULE_DESCRIPTION("Intel Cherrytrail Whiskey Cove PMIC extcon driver");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-intel-cht-wc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel INT3496 ACPI device extcon driver
*
* Copyright (c) 2016 Hans de Goede <[email protected]>
*
* Based on android x86 kernel code which is:
*
* Copyright (c) 2014, Intel Corporation.
* Author: David Cohen <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/devm-helpers.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define INT3496_GPIO_USB_ID 0
#define INT3496_GPIO_VBUS_EN 1
#define INT3496_GPIO_USB_MUX 2
#define DEBOUNCE_TIME msecs_to_jiffies(50)
struct int3496_data {
struct device *dev;
struct extcon_dev *edev;
struct delayed_work work;
struct gpio_desc *gpio_usb_id;
struct gpio_desc *gpio_vbus_en;
struct gpio_desc *gpio_usb_mux;
struct regulator *vbus_boost;
int usb_id_irq;
bool vbus_boost_enabled;
};
static const unsigned int int3496_cable[] = {
EXTCON_USB_HOST,
EXTCON_NONE,
};
static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
/*
* Some platforms have a bug in ACPI GPIO description making IRQ
* GPIO to be output only. Ask the GPIO core to ignore this limit.
*/
{ "id-gpios", &id_gpios, 1, ACPI_GPIO_QUIRK_NO_IO_RESTRICTION },
{ "vbus-gpios", &vbus_gpios, 1 },
{ "mux-gpios", &mux_gpios, 1 },
{ },
};
static void int3496_set_vbus_boost(struct int3496_data *data, bool enable)
{
int ret;
if (IS_ERR_OR_NULL(data->vbus_boost))
return;
if (data->vbus_boost_enabled == enable)
return;
if (enable)
ret = regulator_enable(data->vbus_boost);
else
ret = regulator_disable(data->vbus_boost);
if (ret == 0)
data->vbus_boost_enabled = enable;
else
dev_err(data->dev, "Error updating Vbus boost regulator: %d\n", ret);
}
static void int3496_do_usb_id(struct work_struct *work)
{
struct int3496_data *data =
container_of(work, struct int3496_data, work.work);
int id = gpiod_get_value_cansleep(data->gpio_usb_id);
/* id == 1: PERIPHERAL, id == 0: HOST */
dev_dbg(data->dev, "Connected %s cable\n", id ? "PERIPHERAL" : "HOST");
/*
* Peripheral: set USB mux to peripheral and disable VBUS
* Host: set USB mux to host and enable VBUS
*/
if (!IS_ERR(data->gpio_usb_mux))
gpiod_direction_output(data->gpio_usb_mux, id);
if (!IS_ERR(data->gpio_vbus_en))
gpiod_direction_output(data->gpio_vbus_en, !id);
else
int3496_set_vbus_boost(data, !id);
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
}
static irqreturn_t int3496_thread_isr(int irq, void *priv)
{
struct int3496_data *data = priv;
/* Let the pin settle before processing it */
mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME);
return IRQ_HANDLED;
}
static int int3496_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct int3496_data *data;
int ret;
if (has_acpi_companion(dev)) {
ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
if (ret) {
dev_err(dev, "can't add GPIO ACPI mapping\n");
return ret;
}
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->dev = dev;
ret = devm_delayed_work_autocancel(dev, &data->work, int3496_do_usb_id);
if (ret)
return ret;
data->gpio_usb_id =
devm_gpiod_get(dev, "id", GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(data->gpio_usb_id)) {
ret = PTR_ERR(data->gpio_usb_id);
dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
return ret;
}
data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
if (data->usb_id_irq < 0) {
dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
return data->usb_id_irq;
}
data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
if (IS_ERR(data->gpio_vbus_en)) {
dev_dbg(dev, "can't request VBUS EN GPIO\n");
data->vbus_boost = devm_regulator_get_optional(dev, "vbus");
}
data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
if (IS_ERR(data->gpio_usb_mux))
dev_dbg(dev, "can't request USB MUX GPIO\n");
/* register extcon device */
data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
if (IS_ERR(data->edev))
return -ENOMEM;
ret = devm_extcon_dev_register(dev, data->edev);
if (ret < 0) {
dev_err(dev, "can't register extcon device: %d\n", ret);
return ret;
}
ret = devm_request_threaded_irq(dev, data->usb_id_irq,
NULL, int3496_thread_isr,
IRQF_SHARED | IRQF_ONESHOT |
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
dev_name(dev), data);
if (ret < 0) {
dev_err(dev, "can't request IRQ for USB ID GPIO: %d\n", ret);
return ret;
}
/* process id-pin so that we start with the right status */
queue_delayed_work(system_wq, &data->work, 0);
flush_delayed_work(&data->work);
platform_set_drvdata(pdev, data);
return 0;
}
static const struct acpi_device_id int3496_acpi_match[] = {
{ "INT3496" },
{ }
};
MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
static const struct platform_device_id int3496_ids[] = {
{ .name = "intel-int3496" },
{},
};
MODULE_DEVICE_TABLE(platform, int3496_ids);
static struct platform_driver int3496_driver = {
.driver = {
.name = "intel-int3496",
.acpi_match_table = int3496_acpi_match,
},
.probe = int3496_probe,
.id_table = int3496_ids,
};
module_platform_driver(int3496_driver);
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-intel-int3496.c |
// SPDX-License-Identifier: GPL-2.0
/*
* extcon driver for Basin Cove PMIC
*
* Copyright (c) 2019, Intel Corporation.
* Author: Andy Shevchenko <[email protected]>
*/
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_mrfld.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "extcon-intel.h"
#define BCOVE_USBIDCTRL 0x19
#define BCOVE_USBIDCTRL_ID BIT(0)
#define BCOVE_USBIDCTRL_ACA BIT(1)
#define BCOVE_USBIDCTRL_ALL (BCOVE_USBIDCTRL_ID | BCOVE_USBIDCTRL_ACA)
#define BCOVE_USBIDSTS 0x1a
#define BCOVE_USBIDSTS_GND BIT(0)
#define BCOVE_USBIDSTS_RARBRC_MASK GENMASK(2, 1)
#define BCOVE_USBIDSTS_RARBRC_SHIFT 1
#define BCOVE_USBIDSTS_NO_ACA 0
#define BCOVE_USBIDSTS_R_ID_A 1
#define BCOVE_USBIDSTS_R_ID_B 2
#define BCOVE_USBIDSTS_R_ID_C 3
#define BCOVE_USBIDSTS_FLOAT BIT(3)
#define BCOVE_USBIDSTS_SHORT BIT(4)
#define BCOVE_CHGRIRQ_ALL (BCOVE_CHGRIRQ_VBUSDET | BCOVE_CHGRIRQ_DCDET | \
BCOVE_CHGRIRQ_BATTDET | BCOVE_CHGRIRQ_USBIDDET)
#define BCOVE_CHGRCTRL0 0x4b
#define BCOVE_CHGRCTRL0_CHGRRESET BIT(0)
#define BCOVE_CHGRCTRL0_EMRGCHREN BIT(1)
#define BCOVE_CHGRCTRL0_EXTCHRDIS BIT(2)
#define BCOVE_CHGRCTRL0_SWCONTROL BIT(3)
#define BCOVE_CHGRCTRL0_TTLCK BIT(4)
#define BCOVE_CHGRCTRL0_BIT_5 BIT(5)
#define BCOVE_CHGRCTRL0_BIT_6 BIT(6)
#define BCOVE_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
struct mrfld_extcon_data {
struct device *dev;
struct regmap *regmap;
struct extcon_dev *edev;
unsigned int status;
unsigned int id;
};
static const unsigned int mrfld_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_ACA,
EXTCON_NONE,
};
static int mrfld_extcon_clear(struct mrfld_extcon_data *data, unsigned int reg,
unsigned int mask)
{
return regmap_update_bits(data->regmap, reg, mask, 0x00);
}
static int mrfld_extcon_set(struct mrfld_extcon_data *data, unsigned int reg,
unsigned int mask)
{
return regmap_update_bits(data->regmap, reg, mask, 0xff);
}
static int mrfld_extcon_sw_control(struct mrfld_extcon_data *data, bool enable)
{
unsigned int mask = BCOVE_CHGRCTRL0_SWCONTROL;
struct device *dev = data->dev;
int ret;
if (enable)
ret = mrfld_extcon_set(data, BCOVE_CHGRCTRL0, mask);
else
ret = mrfld_extcon_clear(data, BCOVE_CHGRCTRL0, mask);
if (ret)
dev_err(dev, "can't set SW control: %d\n", ret);
return ret;
}
static int mrfld_extcon_get_id(struct mrfld_extcon_data *data)
{
struct regmap *regmap = data->regmap;
unsigned int id;
bool ground;
int ret;
ret = regmap_read(regmap, BCOVE_USBIDSTS, &id);
if (ret)
return ret;
if (id & BCOVE_USBIDSTS_FLOAT)
return INTEL_USB_ID_FLOAT;
switch ((id & BCOVE_USBIDSTS_RARBRC_MASK) >> BCOVE_USBIDSTS_RARBRC_SHIFT) {
case BCOVE_USBIDSTS_R_ID_A:
return INTEL_USB_RID_A;
case BCOVE_USBIDSTS_R_ID_B:
return INTEL_USB_RID_B;
case BCOVE_USBIDSTS_R_ID_C:
return INTEL_USB_RID_C;
}
/*
* PMIC A0 reports USBIDSTS_GND = 1 for ID_GND,
* but PMIC B0 reports USBIDSTS_GND = 0 for ID_GND.
* Thus we must check this bit at last.
*/
ground = id & BCOVE_USBIDSTS_GND;
switch ('A' + BCOVE_MAJOR(data->id)) {
case 'A':
return ground ? INTEL_USB_ID_GND : INTEL_USB_ID_FLOAT;
case 'B':
return ground ? INTEL_USB_ID_FLOAT : INTEL_USB_ID_GND;
}
/* Unknown or unsupported type */
return INTEL_USB_ID_FLOAT;
}
static int mrfld_extcon_role_detect(struct mrfld_extcon_data *data)
{
unsigned int id;
bool usb_host;
int ret;
ret = mrfld_extcon_get_id(data);
if (ret < 0)
return ret;
id = ret;
usb_host = (id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A);
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, usb_host);
return 0;
}
static int mrfld_extcon_cable_detect(struct mrfld_extcon_data *data)
{
struct regmap *regmap = data->regmap;
unsigned int status, change;
int ret;
/*
* It seems SCU firmware clears the content of BCOVE_CHGRIRQ1
* and makes it useless for OS. Instead we compare a previously
* stored status to the current one, provided by BCOVE_SCHGRIRQ1.
*/
ret = regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
if (ret)
return ret;
change = status ^ data->status;
if (!change)
return -ENODATA;
if (change & BCOVE_CHGRIRQ_USBIDDET) {
ret = mrfld_extcon_role_detect(data);
if (ret)
return ret;
}
data->status = status;
return 0;
}
static irqreturn_t mrfld_extcon_interrupt(int irq, void *dev_id)
{
struct mrfld_extcon_data *data = dev_id;
int ret;
ret = mrfld_extcon_cable_detect(data);
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
return ret ? IRQ_NONE: IRQ_HANDLED;
}
static int mrfld_extcon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct regmap *regmap = pmic->regmap;
struct mrfld_extcon_data *data;
unsigned int status;
unsigned int id;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->dev = dev;
data->regmap = regmap;
data->edev = devm_extcon_dev_allocate(dev, mrfld_extcon_cable);
if (IS_ERR(data->edev))
return -ENOMEM;
ret = devm_extcon_dev_register(dev, data->edev);
if (ret < 0) {
dev_err(dev, "can't register extcon device: %d\n", ret);
return ret;
}
ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_extcon_interrupt,
IRQF_ONESHOT | IRQF_SHARED, pdev->name,
data);
if (ret) {
dev_err(dev, "can't register IRQ handler: %d\n", ret);
return ret;
}
ret = regmap_read(regmap, BCOVE_ID, &id);
if (ret) {
dev_err(dev, "can't read PMIC ID: %d\n", ret);
return ret;
}
data->id = id;
ret = mrfld_extcon_sw_control(data, true);
if (ret)
return ret;
/* Get initial state */
mrfld_extcon_role_detect(data);
/*
* Cached status value is used for cable detection, see comments
* in mrfld_extcon_cable_detect(), we need to sync cached value
* with a real state of the hardware.
*/
regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
data->status = status;
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
mrfld_extcon_set(data, BCOVE_USBIDCTRL, BCOVE_USBIDCTRL_ALL);
platform_set_drvdata(pdev, data);
return 0;
}
static int mrfld_extcon_remove(struct platform_device *pdev)
{
struct mrfld_extcon_data *data = platform_get_drvdata(pdev);
mrfld_extcon_sw_control(data, false);
return 0;
}
static const struct platform_device_id mrfld_extcon_id_table[] = {
{ .name = "mrfld_bcove_pwrsrc" },
{}
};
MODULE_DEVICE_TABLE(platform, mrfld_extcon_id_table);
static struct platform_driver mrfld_extcon_driver = {
.driver = {
.name = "mrfld_bcove_pwrsrc",
},
.probe = mrfld_extcon_probe,
.remove = mrfld_extcon_remove,
.id_table = mrfld_extcon_id_table,
};
module_platform_driver(mrfld_extcon_driver);
MODULE_AUTHOR("Andy Shevchenko <[email protected]>");
MODULE_DESCRIPTION("extcon driver for Intel Merrifield Basin Cove PMIC");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-intel-mrfld.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/extcon/extcon-adc-jack.c
*
* Analog Jack extcon driver with ADC-based detection capability.
*
* Copyright (C) 2016 Samsung Electronics
* Chanwoo Choi <[email protected]>
*
* Copyright (C) 2012 Samsung Electronics
* MyungJoo Ham <[email protected]>
*
* Modified for calling to IIO to get adc by <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/iio/consumer.h>
#include <linux/extcon/extcon-adc-jack.h>
#include <linux/extcon-provider.h>
/**
* struct adc_jack_data - internal data for adc_jack device driver
* @edev: extcon device.
* @cable_names: list of supported cables.
* @adc_conditions: list of adc value conditions.
* @num_conditions: size of adc_conditions.
* @irq: irq number of attach/detach event (0 if not exist).
* @handling_delay: interrupt handler will schedule extcon event
* handling at handling_delay jiffies.
* @handler: extcon event handler called by interrupt handler.
* @chan: iio channel being queried.
*/
struct adc_jack_data {
struct device *dev;
struct extcon_dev *edev;
const unsigned int **cable_names;
struct adc_jack_cond *adc_conditions;
int num_conditions;
int irq;
unsigned long handling_delay; /* in jiffies */
struct delayed_work handler;
struct iio_channel *chan;
bool wakeup_source;
};
static void adc_jack_handler(struct work_struct *work)
{
struct adc_jack_data *data = container_of(to_delayed_work(work),
struct adc_jack_data,
handler);
struct adc_jack_cond *def;
int ret, adc_val;
int i;
ret = iio_read_channel_raw(data->chan, &adc_val);
if (ret < 0) {
dev_err(data->dev, "read channel() error: %d\n", ret);
return;
}
/* Get state from adc value with adc_conditions */
for (i = 0; i < data->num_conditions; i++) {
def = &data->adc_conditions[i];
if (def->min_adc <= adc_val && def->max_adc >= adc_val) {
extcon_set_state_sync(data->edev, def->id, true);
return;
}
}
/* Set the detached state if adc value is not included in the range */
for (i = 0; i < data->num_conditions; i++) {
def = &data->adc_conditions[i];
extcon_set_state_sync(data->edev, def->id, false);
}
}
static irqreturn_t adc_jack_irq_thread(int irq, void *_data)
{
struct adc_jack_data *data = _data;
queue_delayed_work(system_power_efficient_wq,
&data->handler, data->handling_delay);
return IRQ_HANDLED;
}
static int adc_jack_probe(struct platform_device *pdev)
{
struct adc_jack_data *data;
struct adc_jack_pdata *pdata = dev_get_platdata(&pdev->dev);
int i, err = 0;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (!pdata->cable_names) {
dev_err(&pdev->dev, "error: cable_names not defined.\n");
return -EINVAL;
}
data->dev = &pdev->dev;
data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names);
if (IS_ERR(data->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
if (!pdata->adc_conditions) {
dev_err(&pdev->dev, "error: adc_conditions not defined.\n");
return -EINVAL;
}
data->adc_conditions = pdata->adc_conditions;
/* Check the length of array and set num_conditions */
for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++);
data->num_conditions = i;
data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan))
return PTR_ERR(data->chan);
data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
data->wakeup_source = pdata->wakeup_source;
INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
platform_set_drvdata(pdev, data);
err = devm_extcon_dev_register(&pdev->dev, data->edev);
if (err)
return err;
data->irq = platform_get_irq(pdev, 0);
if (data->irq < 0)
return -ENODEV;
err = request_any_context_irq(data->irq, adc_jack_irq_thread,
pdata->irq_flags, pdata->name, data);
if (err < 0) {
dev_err(&pdev->dev, "error: irq %d\n", data->irq);
return err;
}
if (data->wakeup_source)
device_init_wakeup(&pdev->dev, 1);
adc_jack_handler(&data->handler.work);
return 0;
}
static int adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int adc_jack_suspend(struct device *dev)
{
struct adc_jack_data *data = dev_get_drvdata(dev);
cancel_delayed_work_sync(&data->handler);
if (device_may_wakeup(data->dev))
enable_irq_wake(data->irq);
return 0;
}
static int adc_jack_resume(struct device *dev)
{
struct adc_jack_data *data = dev_get_drvdata(dev);
if (device_may_wakeup(data->dev))
disable_irq_wake(data->irq);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
adc_jack_suspend, adc_jack_resume);
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
.remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
.pm = &adc_jack_pm_ops,
},
};
module_platform_driver(adc_jack_driver);
MODULE_AUTHOR("MyungJoo Ham <[email protected]>");
MODULE_DESCRIPTION("ADC Jack extcon driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/extcon/extcon-adc-jack.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* smssdio.c - Siano 1xxx SDIO interface driver
*
* Copyright 2008 Pierre Ossman
*
* Based on code by Siano Mobile Silicon, Inc.,
* Copyright (C) 2006-2008, Uri Shkolnik
*
* This hardware is a bit odd in that all transfers should be done
* to/from the SMSSDIO_DATA register, yet the "increase address" bit
* always needs to be set.
*
* Also, buffers from the card are always aligned to 128 byte
* boundaries.
*/
/*
* General cleanup notes:
*
* - only typedefs should be name *_t
*
* - use ERR_PTR and friends for smscore_register_device()
*
* - smscore_getbuffer should zero fields
*
* Fix stop command
*/
#include "smscoreapi.h"
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/module.h>
#include "sms-cards.h"
#include "smsendian.h"
/* Registers */
#define SMSSDIO_DATA 0x00
#define SMSSDIO_INT 0x04
#define SMSSDIO_BLOCK_SIZE 128
static const struct sdio_device_id smssdio_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
.driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
.driver_data = SMS1XXX_BOARD_SIANO_NOVA_A},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0),
.driver_data = SMS1XXX_BOARD_SIANO_NOVA_B},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0),
.driver_data = SMS1XXX_BOARD_SIANO_VEGA},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE),
.driver_data = SMS1XXX_BOARD_SIANO_VEGA},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_MING),
.driver_data = SMS1XXX_BOARD_SIANO_MING},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_PELE),
.driver_data = SMS1XXX_BOARD_SIANO_PELE},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_RIO),
.driver_data = SMS1XXX_BOARD_SIANO_RIO},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_DENVER_2160),
.driver_data = SMS1XXX_BOARD_SIANO_DENVER_2160},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_DENVER_1530),
.driver_data = SMS1XXX_BOARD_SIANO_DENVER_1530},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, smssdio_ids);
struct smssdio_device {
struct sdio_func *func;
struct smscore_device_t *coredev;
struct smscore_buffer_t *split_cb;
};
/*******************************************************************/
/* Siano core callbacks */
/*******************************************************************/
static int smssdio_sendrequest(void *context, void *buffer, size_t size)
{
int ret = 0;
struct smssdio_device *smsdev;
smsdev = context;
sdio_claim_host(smsdev->func);
smsendian_handle_tx_message((struct sms_msg_data *) buffer);
while (size >= smsdev->func->cur_blksize) {
ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
buffer, smsdev->func->cur_blksize);
if (ret)
goto out;
buffer += smsdev->func->cur_blksize;
size -= smsdev->func->cur_blksize;
}
if (size) {
ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
buffer, size);
}
out:
sdio_release_host(smsdev->func);
return ret;
}
/*******************************************************************/
/* SDIO callbacks */
/*******************************************************************/
static void smssdio_interrupt(struct sdio_func *func)
{
int ret;
struct smssdio_device *smsdev;
struct smscore_buffer_t *cb;
struct sms_msg_hdr *hdr;
size_t size;
smsdev = sdio_get_drvdata(func);
/*
* The interrupt register has no defined meaning. It is just
* a way of turning of the level triggered interrupt.
*/
(void)sdio_readb(func, SMSSDIO_INT, &ret);
if (ret) {
pr_err("Unable to read interrupt register!\n");
return;
}
if (smsdev->split_cb == NULL) {
cb = smscore_getbuffer(smsdev->coredev);
if (!cb) {
pr_err("Unable to allocate data buffer!\n");
return;
}
ret = sdio_memcpy_fromio(smsdev->func,
cb->p,
SMSSDIO_DATA,
SMSSDIO_BLOCK_SIZE);
if (ret) {
pr_err("Error %d reading initial block!\n", ret);
return;
}
hdr = cb->p;
if (hdr->msg_flags & MSG_HDR_FLAG_SPLIT_MSG) {
smsdev->split_cb = cb;
return;
}
if (hdr->msg_length > smsdev->func->cur_blksize)
size = hdr->msg_length - smsdev->func->cur_blksize;
else
size = 0;
} else {
cb = smsdev->split_cb;
hdr = cb->p;
size = hdr->msg_length - sizeof(struct sms_msg_hdr);
smsdev->split_cb = NULL;
}
if (size) {
void *buffer;
buffer = cb->p + (hdr->msg_length - size);
size = ALIGN(size, SMSSDIO_BLOCK_SIZE);
BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE);
/*
* First attempt to transfer all of it in one go...
*/
ret = sdio_memcpy_fromio(smsdev->func,
buffer,
SMSSDIO_DATA,
size);
if (ret && ret != -EINVAL) {
smscore_putbuffer(smsdev->coredev, cb);
pr_err("Error %d reading data from card!\n", ret);
return;
}
/*
* ..then fall back to one block at a time if that is
* not possible...
*
* (we have to do this manually because of the
* problem with the "increase address" bit)
*/
if (ret == -EINVAL) {
while (size) {
ret = sdio_memcpy_fromio(smsdev->func,
buffer, SMSSDIO_DATA,
smsdev->func->cur_blksize);
if (ret) {
smscore_putbuffer(smsdev->coredev, cb);
pr_err("Error %d reading data from card!\n",
ret);
return;
}
buffer += smsdev->func->cur_blksize;
if (size > smsdev->func->cur_blksize)
size -= smsdev->func->cur_blksize;
else
size = 0;
}
}
}
cb->size = hdr->msg_length;
cb->offset = 0;
smsendian_handle_rx_message((struct sms_msg_data *) cb->p);
smscore_onresponse(smsdev->coredev, cb);
}
static int smssdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret;
int board_id;
struct smssdio_device *smsdev;
struct smsdevice_params_t params;
board_id = id->driver_data;
smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL);
if (!smsdev)
return -ENOMEM;
smsdev->func = func;
memset(¶ms, 0, sizeof(struct smsdevice_params_t));
params.device = &func->dev;
params.buffer_size = 0x5000; /* ?? */
params.num_buffers = 22; /* ?? */
params.context = smsdev;
snprintf(params.devpath, sizeof(params.devpath),
"sdio\\%s", sdio_func_id(func));
params.sendrequest_handler = smssdio_sendrequest;
params.device_type = sms_get_board(board_id)->type;
if (params.device_type != SMS_STELLAR)
params.flags |= SMS_DEVICE_FAMILY2;
else {
/*
* FIXME: Stellar needs special handling...
*/
ret = -ENODEV;
goto free;
}
ret = smscore_register_device(¶ms, &smsdev->coredev, GFP_DMA, NULL);
if (ret < 0)
goto free;
smscore_set_board_id(smsdev->coredev, board_id);
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret)
goto release;
ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE);
if (ret)
goto disable;
ret = sdio_claim_irq(func, smssdio_interrupt);
if (ret)
goto disable;
sdio_set_drvdata(func, smsdev);
sdio_release_host(func);
ret = smscore_start_device(smsdev->coredev);
if (ret < 0)
goto reclaim;
return 0;
reclaim:
sdio_claim_host(func);
sdio_release_irq(func);
disable:
sdio_disable_func(func);
release:
sdio_release_host(func);
smscore_unregister_device(smsdev->coredev);
free:
kfree(smsdev);
return ret;
}
static void smssdio_remove(struct sdio_func *func)
{
struct smssdio_device *smsdev;
smsdev = sdio_get_drvdata(func);
/* FIXME: racy! */
if (smsdev->split_cb)
smscore_putbuffer(smsdev->coredev, smsdev->split_cb);
smscore_unregister_device(smsdev->coredev);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
kfree(smsdev);
}
static struct sdio_driver smssdio_driver = {
.name = "smssdio",
.id_table = smssdio_ids,
.probe = smssdio_probe,
.remove = smssdio_remove,
};
/*******************************************************************/
/* Module functions */
/*******************************************************************/
static int __init smssdio_module_init(void)
{
int ret = 0;
printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n");
printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n");
ret = sdio_register_driver(&smssdio_driver);
return ret;
}
static void __exit smssdio_module_exit(void)
{
sdio_unregister_driver(&smssdio_driver);
}
module_init(smssdio_module_init);
module_exit(smssdio_module_exit);
MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver");
MODULE_AUTHOR("Pierre Ossman");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/mmc/siano/smssdio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
* Based on Steven Toth <[email protected]> cx23885 driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "cx25821.h"
/* board config info */
struct cx25821_board cx25821_boards[] = {
[UNKNOWN_BOARD] = {
.name = "UNKNOWN/GENERIC",
/* Ensure safe default for unknown boards */
.clk_freq = 0,
},
[CX25821_BOARD] = {
.name = "CX25821",
.portb = CX25821_RAW,
.portc = CX25821_264,
},
};
| linux-master | drivers/media/pci/cx25821/cx25821-cards.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
* Based on Steven Toth <[email protected]> cx23885 driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/i2c.h>
#include <linux/slab.h>
#include "cx25821.h"
#include "cx25821-sram.h"
#include "cx25821-video.h"
MODULE_DESCRIPTION("Driver for Athena cards");
MODULE_AUTHOR("Shu Lin - Hiep Huynh");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
static unsigned int card[] = {[0 ... (CX25821_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
const struct sram_channel cx25821_sram_channels[] = {
[SRAM_CH00] = {
.i = SRAM_CH00,
.name = "VID A",
.cmds_start = VID_A_DOWN_CMDS,
.ctrl_start = VID_A_IQ,
.cdt = VID_A_CDT,
.fifo_start = VID_A_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA1_PTR1,
.ptr2_reg = DMA1_PTR2,
.cnt1_reg = DMA1_CNT1,
.cnt2_reg = DMA1_CNT2,
.int_msk = VID_A_INT_MSK,
.int_stat = VID_A_INT_STAT,
.int_mstat = VID_A_INT_MSTAT,
.dma_ctl = VID_DST_A_DMA_CTL,
.gpcnt_ctl = VID_DST_A_GPCNT_CTL,
.gpcnt = VID_DST_A_GPCNT,
.vip_ctl = VID_DST_A_VIP_CTL,
.pix_frmt = VID_DST_A_PIX_FRMT,
},
[SRAM_CH01] = {
.i = SRAM_CH01,
.name = "VID B",
.cmds_start = VID_B_DOWN_CMDS,
.ctrl_start = VID_B_IQ,
.cdt = VID_B_CDT,
.fifo_start = VID_B_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA2_PTR1,
.ptr2_reg = DMA2_PTR2,
.cnt1_reg = DMA2_CNT1,
.cnt2_reg = DMA2_CNT2,
.int_msk = VID_B_INT_MSK,
.int_stat = VID_B_INT_STAT,
.int_mstat = VID_B_INT_MSTAT,
.dma_ctl = VID_DST_B_DMA_CTL,
.gpcnt_ctl = VID_DST_B_GPCNT_CTL,
.gpcnt = VID_DST_B_GPCNT,
.vip_ctl = VID_DST_B_VIP_CTL,
.pix_frmt = VID_DST_B_PIX_FRMT,
},
[SRAM_CH02] = {
.i = SRAM_CH02,
.name = "VID C",
.cmds_start = VID_C_DOWN_CMDS,
.ctrl_start = VID_C_IQ,
.cdt = VID_C_CDT,
.fifo_start = VID_C_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA3_PTR1,
.ptr2_reg = DMA3_PTR2,
.cnt1_reg = DMA3_CNT1,
.cnt2_reg = DMA3_CNT2,
.int_msk = VID_C_INT_MSK,
.int_stat = VID_C_INT_STAT,
.int_mstat = VID_C_INT_MSTAT,
.dma_ctl = VID_DST_C_DMA_CTL,
.gpcnt_ctl = VID_DST_C_GPCNT_CTL,
.gpcnt = VID_DST_C_GPCNT,
.vip_ctl = VID_DST_C_VIP_CTL,
.pix_frmt = VID_DST_C_PIX_FRMT,
},
[SRAM_CH03] = {
.i = SRAM_CH03,
.name = "VID D",
.cmds_start = VID_D_DOWN_CMDS,
.ctrl_start = VID_D_IQ,
.cdt = VID_D_CDT,
.fifo_start = VID_D_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA4_PTR1,
.ptr2_reg = DMA4_PTR2,
.cnt1_reg = DMA4_CNT1,
.cnt2_reg = DMA4_CNT2,
.int_msk = VID_D_INT_MSK,
.int_stat = VID_D_INT_STAT,
.int_mstat = VID_D_INT_MSTAT,
.dma_ctl = VID_DST_D_DMA_CTL,
.gpcnt_ctl = VID_DST_D_GPCNT_CTL,
.gpcnt = VID_DST_D_GPCNT,
.vip_ctl = VID_DST_D_VIP_CTL,
.pix_frmt = VID_DST_D_PIX_FRMT,
},
[SRAM_CH04] = {
.i = SRAM_CH04,
.name = "VID E",
.cmds_start = VID_E_DOWN_CMDS,
.ctrl_start = VID_E_IQ,
.cdt = VID_E_CDT,
.fifo_start = VID_E_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
.int_msk = VID_E_INT_MSK,
.int_stat = VID_E_INT_STAT,
.int_mstat = VID_E_INT_MSTAT,
.dma_ctl = VID_DST_E_DMA_CTL,
.gpcnt_ctl = VID_DST_E_GPCNT_CTL,
.gpcnt = VID_DST_E_GPCNT,
.vip_ctl = VID_DST_E_VIP_CTL,
.pix_frmt = VID_DST_E_PIX_FRMT,
},
[SRAM_CH05] = {
.i = SRAM_CH05,
.name = "VID F",
.cmds_start = VID_F_DOWN_CMDS,
.ctrl_start = VID_F_IQ,
.cdt = VID_F_CDT,
.fifo_start = VID_F_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
.cnt2_reg = DMA6_CNT2,
.int_msk = VID_F_INT_MSK,
.int_stat = VID_F_INT_STAT,
.int_mstat = VID_F_INT_MSTAT,
.dma_ctl = VID_DST_F_DMA_CTL,
.gpcnt_ctl = VID_DST_F_GPCNT_CTL,
.gpcnt = VID_DST_F_GPCNT,
.vip_ctl = VID_DST_F_VIP_CTL,
.pix_frmt = VID_DST_F_PIX_FRMT,
},
[SRAM_CH06] = {
.i = SRAM_CH06,
.name = "VID G",
.cmds_start = VID_G_DOWN_CMDS,
.ctrl_start = VID_G_IQ,
.cdt = VID_G_CDT,
.fifo_start = VID_G_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA7_PTR1,
.ptr2_reg = DMA7_PTR2,
.cnt1_reg = DMA7_CNT1,
.cnt2_reg = DMA7_CNT2,
.int_msk = VID_G_INT_MSK,
.int_stat = VID_G_INT_STAT,
.int_mstat = VID_G_INT_MSTAT,
.dma_ctl = VID_DST_G_DMA_CTL,
.gpcnt_ctl = VID_DST_G_GPCNT_CTL,
.gpcnt = VID_DST_G_GPCNT,
.vip_ctl = VID_DST_G_VIP_CTL,
.pix_frmt = VID_DST_G_PIX_FRMT,
},
[SRAM_CH07] = {
.i = SRAM_CH07,
.name = "VID H",
.cmds_start = VID_H_DOWN_CMDS,
.ctrl_start = VID_H_IQ,
.cdt = VID_H_CDT,
.fifo_start = VID_H_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA8_PTR1,
.ptr2_reg = DMA8_PTR2,
.cnt1_reg = DMA8_CNT1,
.cnt2_reg = DMA8_CNT2,
.int_msk = VID_H_INT_MSK,
.int_stat = VID_H_INT_STAT,
.int_mstat = VID_H_INT_MSTAT,
.dma_ctl = VID_DST_H_DMA_CTL,
.gpcnt_ctl = VID_DST_H_GPCNT_CTL,
.gpcnt = VID_DST_H_GPCNT,
.vip_ctl = VID_DST_H_VIP_CTL,
.pix_frmt = VID_DST_H_PIX_FRMT,
},
[SRAM_CH08] = {
.name = "audio from",
.cmds_start = AUD_A_DOWN_CMDS,
.ctrl_start = AUD_A_IQ,
.cdt = AUD_A_CDT,
.fifo_start = AUD_A_DOWN_CLUSTER_1,
.fifo_size = AUDIO_CLUSTER_SIZE * 3,
.ptr1_reg = DMA17_PTR1,
.ptr2_reg = DMA17_PTR2,
.cnt1_reg = DMA17_CNT1,
.cnt2_reg = DMA17_CNT2,
},
[SRAM_CH09] = {
.i = SRAM_CH09,
.name = "VID Upstream I",
.cmds_start = VID_I_UP_CMDS,
.ctrl_start = VID_I_IQ,
.cdt = VID_I_CDT,
.fifo_start = VID_I_UP_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA15_PTR1,
.ptr2_reg = DMA15_PTR2,
.cnt1_reg = DMA15_CNT1,
.cnt2_reg = DMA15_CNT2,
.int_msk = VID_I_INT_MSK,
.int_stat = VID_I_INT_STAT,
.int_mstat = VID_I_INT_MSTAT,
.dma_ctl = VID_SRC_I_DMA_CTL,
.gpcnt_ctl = VID_SRC_I_GPCNT_CTL,
.gpcnt = VID_SRC_I_GPCNT,
.vid_fmt_ctl = VID_SRC_I_FMT_CTL,
.vid_active_ctl1 = VID_SRC_I_ACTIVE_CTL1,
.vid_active_ctl2 = VID_SRC_I_ACTIVE_CTL2,
.vid_cdt_size = VID_SRC_I_CDT_SZ,
.irq_bit = 8,
},
[SRAM_CH10] = {
.i = SRAM_CH10,
.name = "VID Upstream J",
.cmds_start = VID_J_UP_CMDS,
.ctrl_start = VID_J_IQ,
.cdt = VID_J_CDT,
.fifo_start = VID_J_UP_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA16_PTR1,
.ptr2_reg = DMA16_PTR2,
.cnt1_reg = DMA16_CNT1,
.cnt2_reg = DMA16_CNT2,
.int_msk = VID_J_INT_MSK,
.int_stat = VID_J_INT_STAT,
.int_mstat = VID_J_INT_MSTAT,
.dma_ctl = VID_SRC_J_DMA_CTL,
.gpcnt_ctl = VID_SRC_J_GPCNT_CTL,
.gpcnt = VID_SRC_J_GPCNT,
.vid_fmt_ctl = VID_SRC_J_FMT_CTL,
.vid_active_ctl1 = VID_SRC_J_ACTIVE_CTL1,
.vid_active_ctl2 = VID_SRC_J_ACTIVE_CTL2,
.vid_cdt_size = VID_SRC_J_CDT_SZ,
.irq_bit = 9,
},
[SRAM_CH11] = {
.i = SRAM_CH11,
.name = "Audio Upstream Channel B",
.cmds_start = AUD_B_UP_CMDS,
.ctrl_start = AUD_B_IQ,
.cdt = AUD_B_CDT,
.fifo_start = AUD_B_UP_CLUSTER_1,
.fifo_size = (AUDIO_CLUSTER_SIZE * 3),
.ptr1_reg = DMA22_PTR1,
.ptr2_reg = DMA22_PTR2,
.cnt1_reg = DMA22_CNT1,
.cnt2_reg = DMA22_CNT2,
.int_msk = AUD_B_INT_MSK,
.int_stat = AUD_B_INT_STAT,
.int_mstat = AUD_B_INT_MSTAT,
.dma_ctl = AUD_INT_DMA_CTL,
.gpcnt_ctl = AUD_B_GPCNT_CTL,
.gpcnt = AUD_B_GPCNT,
.aud_length = AUD_B_LNGTH,
.aud_cfg = AUD_B_CFG,
.fld_aud_fifo_en = FLD_AUD_SRC_B_FIFO_EN,
.fld_aud_risc_en = FLD_AUD_SRC_B_RISC_EN,
.irq_bit = 11,
},
};
EXPORT_SYMBOL(cx25821_sram_channels);
static int cx25821_risc_decode(u32 risc)
{
static const char * const instr[16] = {
[RISC_SYNC >> 28] = "sync",
[RISC_WRITE >> 28] = "write",
[RISC_WRITEC >> 28] = "writec",
[RISC_READ >> 28] = "read",
[RISC_READC >> 28] = "readc",
[RISC_JUMP >> 28] = "jump",
[RISC_SKIP >> 28] = "skip",
[RISC_WRITERM >> 28] = "writerm",
[RISC_WRITECM >> 28] = "writecm",
[RISC_WRITECR >> 28] = "writecr",
};
static const int incr[16] = {
[RISC_WRITE >> 28] = 3,
[RISC_JUMP >> 28] = 3,
[RISC_SKIP >> 28] = 1,
[RISC_SYNC >> 28] = 1,
[RISC_WRITERM >> 28] = 3,
[RISC_WRITECM >> 28] = 3,
[RISC_WRITECR >> 28] = 4,
};
static const char * const bits[] = {
"12", "13", "14", "resync",
"cnt0", "cnt1", "18", "19",
"20", "21", "22", "23",
"irq1", "irq2", "eol", "sol",
};
int i;
pr_cont("0x%08x [ %s",
risc, instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--) {
if (risc & (1 << (i + 12)))
pr_cont(" %s", bits[i]);
}
pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
/* enable RUN_RISC in Pecos */
cx_write(DEV_CNTRL2, 0x20);
/* Set the master PCI interrupt masks to enable video, audio, MBIF,
* and GPIO interrupts
* I2C interrupt masking is handled by the I2C objects themselves. */
cx_write(PCI_INT_MSK, 0x2001FFFF);
tmp = cx_read(RDR_TLCTL0);
tmp &= ~FLD_CFG_RCB_CK_EN; /* Clear the RCB_CK_EN bit */
cx_write(RDR_TLCTL0, tmp);
/* PLL-A setting for the Audio Master Clock */
cx_write(PLL_A_INT_FRAC, 0x9807A58B);
/* PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1 */
cx_write(PLL_A_POST_STAT_BIST, 0x8000019C);
/* clear reset bit [31] */
tmp = cx_read(PLL_A_INT_FRAC);
cx_write(PLL_A_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-B setting for Mobilygen Host Bus Interface */
cx_write(PLL_B_INT_FRAC, 0x9883A86F);
/* PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0 */
cx_write(PLL_B_POST_STAT_BIST, 0x8000018D);
/* clear reset bit [31] */
tmp = cx_read(PLL_B_INT_FRAC);
cx_write(PLL_B_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-C setting for video upstream channel */
cx_write(PLL_C_INT_FRAC, 0x96A0EA3F);
/* PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0 */
cx_write(PLL_C_POST_STAT_BIST, 0x80000103);
/* clear reset bit [31] */
tmp = cx_read(PLL_C_INT_FRAC);
cx_write(PLL_C_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-D setting for audio upstream channel */
cx_write(PLL_D_INT_FRAC, 0x98757F5B);
/* PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0 */
cx_write(PLL_D_POST_STAT_BIST, 0x80000113);
/* clear reset bit [31] */
tmp = cx_read(PLL_D_INT_FRAC);
cx_write(PLL_D_INT_FRAC, tmp & 0x7FFFFFFF);
/* This selects the PLL C clock source for the video upstream channel
* I and J */
tmp = cx_read(VID_CH_CLK_SEL);
cx_write(VID_CH_CLK_SEL, (tmp & 0x00FFFFFF) | 0x24000000);
/* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
* channel A-C
* select 656/VIP DST for downstream Channel A - C */
tmp = cx_read(VID_CH_MODE_SEL);
/* cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF); */
cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
/* enables 656 port I and J as output */
tmp = cx_read(CLK_RST);
/* use external ALT_PLL_REF pin as its reference clock instead */
tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
msleep(100);
}
int cx25821_sram_channel_setup(struct cx25821_dev *dev,
const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 4)
lines = 4;
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
cx_write(cdt + 16 * i + 4, 0);
cx_write(cdt + 16 * i + 8, 0);
cx_write(cdt + 16 * i + 12, 0);
}
/* init the first cdt buffer */
for (i = 0; i < 128; i++)
cx_write(ch->fifo_start + 4 * i, i);
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines * 16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
return 0;
}
int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 3)
lines = 3; /* for AUDIO */
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
cx_write(cdt + 16 * i + 4, 0);
cx_write(cdt + 16 * i + 8, 0);
cx_write(cdt + 16 * i + 12, 0);
}
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
/* IQ size */
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
/* zero out */
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines * 16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
return 0;
}
EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
void cx25821_sram_channel_dump(struct cx25821_dev *dev, const struct sram_channel *ch)
{
static char *name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc;
unsigned int i, j, n;
pr_warn("%s: %s - dma channel status dump\n", dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
pr_warn("cmds + 0x%2x: %-15s: 0x%08x\n",
i * 4, name[i], cx_read(ch->cmds_start + 4 * i));
j = i * 4;
for (i = 0; i < 4;) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
pr_warn("cmds + 0x%2x: risc%d: ", j + i * 4, i);
i += cx25821_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
pr_warn("ctrl + 0x%2x (0x%08x): iq %x: ",
i * 4, ch->ctrl_start + 4 * i, i);
n = cx25821_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
pr_warn("ctrl + 0x%2x : iq %x: 0x%08x [ arg #%d ]\n",
4 * (i + j), i + j, risc, j);
}
}
pr_warn(" : fifo: 0x%08x -> 0x%x\n",
ch->fifo_start, ch->fifo_start + ch->fifo_size);
pr_warn(" : ctrl: 0x%08x -> 0x%x\n",
ch->ctrl_start, ch->ctrl_start + 6 * 16);
pr_warn(" : ptr1_reg: 0x%08x\n",
cx_read(ch->ptr1_reg));
pr_warn(" : ptr2_reg: 0x%08x\n",
cx_read(ch->ptr2_reg));
pr_warn(" : cnt1_reg: 0x%08x\n",
cx_read(ch->cnt1_reg));
pr_warn(" : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
}
void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
const struct sram_channel *ch)
{
static const char * const name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc, value, tmp;
unsigned int i, j, n;
pr_info("\n%s: %s - dma Audio channel status dump\n",
dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
pr_info("%s: cmds + 0x%2x: %-15s: 0x%08x\n",
dev->name, i * 4, name[i],
cx_read(ch->cmds_start + 4 * i));
j = i * 4;
for (i = 0; i < 4;) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
pr_warn("cmds + 0x%2x: risc%d: ", j + i * 4, i);
i += cx25821_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
pr_warn("ctrl + 0x%2x (0x%08x): iq %x: ",
i * 4, ch->ctrl_start + 4 * i, i);
n = cx25821_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
pr_warn("ctrl + 0x%2x : iq %x: 0x%08x [ arg #%d ]\n",
4 * (i + j), i + j, risc, j);
}
}
pr_warn(" : fifo: 0x%08x -> 0x%x\n",
ch->fifo_start, ch->fifo_start + ch->fifo_size);
pr_warn(" : ctrl: 0x%08x -> 0x%x\n",
ch->ctrl_start, ch->ctrl_start + 6 * 16);
pr_warn(" : ptr1_reg: 0x%08x\n",
cx_read(ch->ptr1_reg));
pr_warn(" : ptr2_reg: 0x%08x\n",
cx_read(ch->ptr2_reg));
pr_warn(" : cnt1_reg: 0x%08x\n",
cx_read(ch->cnt1_reg));
pr_warn(" : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 56 + (i * 4));
pr_warn("instruction %d = 0x%x\n", i, risc);
}
/* read data from the first cdt buffer */
risc = cx_read(AUD_A_CDT);
pr_warn("\nread cdt loc=0x%x\n", risc);
for (i = 0; i < 8; i++) {
n = cx_read(risc + i * 4);
pr_cont("0x%x ", n);
}
pr_cont("\n\n");
value = cx_read(CLK_RST);
CX25821_INFO(" CLK_RST = 0x%x\n\n", value);
value = cx_read(PLL_A_POST_STAT_BIST);
CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_A_INT_FRAC);
CX25821_INFO(" PLL_A_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_B_POST_STAT_BIST);
CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_B_INT_FRAC);
CX25821_INFO(" PLL_B_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_C_POST_STAT_BIST);
CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_C_INT_FRAC);
CX25821_INFO(" PLL_C_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_D_POST_STAT_BIST);
CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_D_INT_FRAC);
CX25821_INFO(" PLL_D_INT_FRAC = 0x%x\n\n", value);
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x\n\n", value);
}
EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
static void cx25821_shutdown(struct cx25821_dev *dev)
{
int i;
/* disable RISC controller */
cx_write(DEV_CNTRL2, 0);
/* Disable Video A/B activity */
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
/* Disable Audio activity */
cx_write(AUD_INT_DMA_CTL, 0);
/* Disable Serial port */
cx_write(UART_CTL, 0);
/* Disable Interrupts */
cx_write(PCI_INT_MSK, 0);
cx_write(AUD_A_INT_MSK, 0);
}
void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel_select,
u32 format)
{
if (channel_select <= 7 && channel_select >= 0) {
cx_write(dev->channels[channel_select].sram_channels->pix_frmt,
format);
}
dev->channels[channel_select].pixel_formats = format;
}
static void cx25821_set_vip_mode(struct cx25821_dev *dev,
const struct sram_channel *ch)
{
cx_write(ch->pix_frmt, PIXEL_FRMT_422);
cx_write(ch->vip_ctl, PIXEL_ENGINE_VIP1);
}
static void cx25821_initialize(struct cx25821_dev *dev)
{
int i;
dprintk(1, "%s()\n", __func__);
cx25821_shutdown(dev);
cx_write(PCI_INT_STAT, 0xffffffff);
for (i = 0; i < VID_CHANNEL_NUM; i++)
cx_write(dev->channels[i].sram_channels->int_stat, 0xffffffff);
cx_write(AUD_A_INT_STAT, 0xffffffff);
cx_write(AUD_B_INT_STAT, 0xffffffff);
cx_write(AUD_C_INT_STAT, 0xffffffff);
cx_write(AUD_D_INT_STAT, 0xffffffff);
cx_write(AUD_E_INT_STAT, 0xffffffff);
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
cx_write(PAD_CTRL, 0x12); /* for I2C */
cx25821_registers_init(dev); /* init Pecos registers */
msleep(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
cx25821_sram_channel_setup(dev, dev->channels[i].sram_channels,
1440, 0);
dev->channels[i].pixel_formats = PIXEL_FRMT_422;
dev->channels[i].use_cif_resolution = 0;
}
/* Probably only affect Downstream */
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
dev->channels[i].pixel_formats = PIXEL_FRMT_422;
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
}
cx25821_sram_channel_setup_audio(dev,
dev->channels[SRAM_CH08].sram_channels, 128, 0);
cx25821_gpio_init(dev);
}
static int cx25821_get_resources(struct cx25821_dev *dev)
{
if (request_mem_region(pci_resource_start(dev->pci, 0),
pci_resource_len(dev->pci, 0), dev->name))
return 0;
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
static void cx25821_dev_checkrevision(struct cx25821_dev *dev)
{
dev->hwrevision = cx_read(RDR_CFG2) & 0xff;
pr_info("Hardware revision = 0x%02x\n", dev->hwrevision);
}
static void cx25821_iounmap(struct cx25821_dev *dev)
{
if (dev == NULL)
return;
/* Releasing IO memory */
if (dev->lmmio != NULL) {
iounmap(dev->lmmio);
dev->lmmio = NULL;
}
}
static int cx25821_dev_setup(struct cx25821_dev *dev)
{
static unsigned int cx25821_devcount;
int i;
mutex_init(&dev->lock);
dev->nr = ++cx25821_devcount;
sprintf(dev->name, "cx25821[%d]", dev->nr);
if (dev->nr >= ARRAY_SIZE(card)) {
CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
return -ENODEV;
}
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
return -ENODEV;
}
pr_info("Athena Hardware device = 0x%02x\n", dev->pci->device);
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 28000000;
for (i = 0; i < MAX_VID_CHANNEL_NUM; i++) {
dev->channels[i].dev = dev;
dev->channels[i].id = i;
dev->channels[i].sram_channels = &cx25821_sram_channels[i];
}
/* board config */
dev->board = 1; /* card[dev->nr]; */
dev->_max_num_decoders = MAX_DECODERS;
dev->pci_bus = dev->pci->bus->number;
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dev->pci_irqmask = 0x001f00;
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
dev->i2c_bus[0].dev = dev;
dev->i2c_bus[0].reg_stat = I2C1_STAT;
dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
dev->i2c_bus[0].reg_addr = I2C1_ADDR;
dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
dev->i2c_bus[0].i2c_period = (0x07 << 24); /* 1.95MHz */
if (cx25821_get_resources(dev) < 0) {
pr_err("%s: No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
cx25821_devcount--;
return -EBUSY;
}
/* PCIe stuff */
dev->base_io_addr = pci_resource_start(dev->pci, 0);
if (!dev->base_io_addr) {
CX25821_ERR("No PCI Memory resources, exiting!\n");
return -ENODEV;
}
dev->lmmio = ioremap(dev->base_io_addr, pci_resource_len(dev->pci, 0));
if (!dev->lmmio) {
CX25821_ERR("ioremap failed, maybe increasing __VMALLOC_RESERVE in page.h\n");
cx25821_iounmap(dev);
return -ENOMEM;
}
dev->bmmio = (u8 __iomem *) dev->lmmio;
pr_info("%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device, cx25821_boards[dev->board].name,
dev->board, card[dev->nr] == dev->board ?
"insmod option" : "autodetected");
/* init hardware */
cx25821_initialize(dev);
cx25821_i2c_register(&dev->i2c_bus[0]);
/* cx25821_i2c_register(&dev->i2c_bus[1]);
* cx25821_i2c_register(&dev->i2c_bus[2]); */
if (medusa_video_init(dev) < 0)
CX25821_ERR("%s(): Failed to initialize medusa!\n", __func__);
cx25821_video_register(dev);
cx25821_dev_checkrevision(dev);
return 0;
}
void cx25821_dev_unregister(struct cx25821_dev *dev)
{
int i;
if (!dev->base_io_addr)
return;
release_mem_region(dev->base_io_addr, pci_resource_len(dev->pci, 0));
for (i = 0; i < MAX_VID_CAP_CHANNEL_NUM - 1; i++) {
if (i == SRAM_CH08) /* audio channel */
continue;
/*
* TODO: enable when video output is properly
* supported.
if (i == SRAM_CH09 || i == SRAM_CH10)
cx25821_free_mem_upstream(&dev->channels[i]);
*/
cx25821_video_unregister(dev, i);
}
cx25821_i2c_unregister(&dev->i2c_bus[0]);
cx25821_iounmap(dev);
}
EXPORT_SYMBOL(cx25821_dev_unregister);
int cx25821_riscmem_alloc(struct pci_dev *pci,
struct cx25821_riscmem *risc,
unsigned int size)
{
__le32 *cpu;
dma_addr_t dma = 0;
if (risc->cpu && risc->size < size) {
dma_free_coherent(&pci->dev, risc->size, risc->cpu, risc->dma);
risc->cpu = NULL;
}
if (NULL == risc->cpu) {
cpu = dma_alloc_coherent(&pci->dev, size, &dma, GFP_KERNEL);
if (NULL == cpu)
return -ENOMEM;
risc->cpu = cpu;
risc->dma = dma;
risc->size = size;
}
return 0;
}
EXPORT_SYMBOL(cx25821_riscmem_alloc);
static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines, bool jump)
{
struct scatterlist *sg;
unsigned int line, todo;
if (jump) {
*(rp++) = cpu_to_le32(RISC_JUMP);
*(rp++) = cpu_to_le32(0);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
}
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg = sg_next(sg);
}
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL | RISC_EOL |
bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx25821_risc_buffer(struct pci_dev *pci, struct cx25821_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
{
u32 instructions;
u32 fields;
__le32 *rp;
int rc;
fields = 0;
if (UNSET != top_offset)
fields++;
if (UNSET != bottom_offset)
fields++;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 3 dwords). Padding
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE +
lines);
instructions += 5;
rc = cx25821_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
if (UNSET != top_offset) {
rp = cx25821_risc_field(rp, sglist, top_offset, 0, bpl, padding,
lines, true);
}
if (UNSET != bottom_offset) {
rp = cx25821_risc_field(rp, sglist, bottom_offset, 0x200, bpl,
padding, lines, UNSET == top_offset);
}
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 3) * sizeof(*risc->cpu) > risc->size);
return 0;
}
static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines, unsigned int lpi)
{
struct scatterlist *sg;
unsigned int line, todo, sol;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg = sg_next(sg);
}
if (lpi && line > 0 && !(line % lpi))
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
else
sol = RISC_SOL;
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE | sol | RISC_EOL |
bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | sol |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx25821_risc_databuffer_audio(struct pci_dev *pci,
struct cx25821_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines, unsigned int lpi)
{
u32 instructions;
__le32 *rp;
int rc;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Here
there is no padding and no sync. First DMA region may be smaller
than PAGE_SIZE */
/* Jump and write need an extra dword */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
rc = cx25821_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
rp = cx25821_risc_field_audio(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
lines, lpi);
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
void cx25821_free_buffer(struct cx25821_dev *dev, struct cx25821_buffer *buf)
{
if (WARN_ON(buf->risc.size == 0))
return;
dma_free_coherent(&dev->pci->dev, buf->risc.size, buf->risc.cpu,
buf->risc.dma);
memset(&buf->risc, 0, sizeof(buf->risc));
}
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
u32 pci_status;
u32 vid_status;
int i, handled = 0;
u32 mask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
pci_status = cx_read(PCI_INT_STAT);
if (pci_status == 0)
goto out;
for (i = 0; i < VID_CHANNEL_NUM; i++) {
if (pci_status & mask[i]) {
vid_status = cx_read(dev->channels[i].
sram_channels->int_stat);
if (vid_status)
handled += cx25821_video_irq(dev, i,
vid_status);
cx_write(PCI_INT_STAT, mask[i]);
}
}
out:
return IRQ_RETVAL(handled);
}
void cx25821_print_irqbits(char *name, char *tag, char **strings,
int len, u32 bits, u32 mask)
{
unsigned int i;
printk(KERN_DEBUG pr_fmt("%s: %s [0x%x]"), name, tag, bits);
for (i = 0; i < len; i++) {
if (!(bits & (1 << i)))
continue;
if (strings[i])
pr_cont(" %s", strings[i]);
else
pr_cont(" %d", i);
if (!(mask & (1 << i)))
continue;
pr_cont("*");
}
pr_cont("\n");
}
EXPORT_SYMBOL(cx25821_print_irqbits);
struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
{
struct cx25821_dev *dev = pci_get_drvdata(pci);
return dev;
}
EXPORT_SYMBOL(cx25821_dev_get);
static int cx25821_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct cx25821_dev *dev;
int err = 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err < 0)
goto fail_free;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
pr_info("pci enable failed!\n");
goto fail_unregister_device;
}
err = cx25821_dev_setup(dev);
if (err)
goto fail_unregister_pci;
/* print pci info */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat, (unsigned long long)dev->base_io_addr);
pci_set_master(pci_dev);
err = dma_set_mask(&pci_dev->dev, 0xffffffff);
if (err) {
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
err = -EIO;
goto fail_irq;
}
err = request_irq(pci_dev->irq, cx25821_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq);
goto fail_irq;
}
return 0;
fail_irq:
pr_info("cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
fail_unregister_pci:
pci_disable_device(pci_dev);
fail_unregister_device:
v4l2_device_unregister(&dev->v4l2_dev);
fail_free:
kfree(dev);
return err;
}
static void cx25821_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
cx25821_shutdown(dev);
/* unregister stuff */
if (pci_dev->irq)
free_irq(pci_dev->irq, dev);
pci_disable_device(pci_dev);
cx25821_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
static const struct pci_device_id cx25821_pci_tbl[] = {
{
/* CX25821 Athena */
.vendor = 0x14f1,
.device = 0x8210,
.subvendor = 0x14f1,
.subdevice = 0x0920,
}, {
/* CX25821 No Brand */
.vendor = 0x14f1,
.device = 0x8210,
.subvendor = 0x0000,
.subdevice = 0x0000,
}, {
/* --- end of list --- */
}
};
MODULE_DEVICE_TABLE(pci, cx25821_pci_tbl);
static struct pci_driver cx25821_pci_driver = {
.name = "cx25821",
.id_table = cx25821_pci_tbl,
.probe = cx25821_initdev,
.remove = cx25821_finidev,
};
static int __init cx25821_init(void)
{
pr_info("driver loaded\n");
return pci_register_driver(&cx25821_pci_driver);
}
static void __exit cx25821_fini(void)
{
pci_unregister_driver(&cx25821_pci_driver);
}
module_init(cx25821_init);
module_exit(cx25821_fini);
| linux-master | drivers/media/pci/cx25821/cx25821-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
* Based on Steven Toth <[email protected]> cx25821 driver
* Parts adapted/taken from Eduardo Moscoso Rubino
* Copyright (C) 2009 Eduardo Moscoso Rubino <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "cx25821-video.h"
MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <[email protected]>");
MODULE_LICENSE("GPL");
static unsigned int video_nr[] = {[0 ... (CX25821_MAXBOARDS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0444);
MODULE_PARM_DESC(video_nr, "video device numbers");
static unsigned int video_debug = VIDEO_DEBUG;
module_param(video_debug, int, 0644);
MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
static unsigned int irq_debug;
module_param(irq_debug, int, 0644);
MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
#define FORMAT_FLAGS_PACKED 0x01
static const struct cx25821_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_Y41P,
.depth = 12,
.flags = FORMAT_FLAGS_PACKED,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},
};
static const struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
return formats + i;
return NULL;
}
int cx25821_start_video_dma(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q,
struct cx25821_buffer *buf,
const struct sram_channel *channel)
{
int tmp = 0;
/* setup fifo + format */
cx25821_sram_channel_setup(dev, channel, buf->bpl, buf->risc.dma);
/* reset counter */
cx_write(channel->gpcnt_ctl, 3);
/* enable irq */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << channel->i));
cx_set(channel->int_msk, 0x11);
/* start dma */
cx_write(channel->dma_ctl, 0x11); /* FIFO and RISC enable */
/* make sure upstream setting if any is reversed */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
return 0;
}
int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
{
int handled = 0;
u32 mask;
const struct sram_channel *channel = dev->channels[chan_num].sram_channels;
mask = cx_read(channel->int_msk);
if (0 == (status & mask))
return handled;
cx_write(channel->int_stat, status);
/* risc op code error */
if (status & (1 << 16)) {
pr_warn("%s, %s: video risc op code error\n",
dev->name, channel->name);
cx_clear(channel->dma_ctl, 0x11);
cx25821_sram_channel_dump(dev, channel);
}
/* risc1 y */
if (status & FLD_VID_DST_RISC1) {
struct cx25821_dmaqueue *dmaq =
&dev->channels[channel->i].dma_vidq;
struct cx25821_buffer *buf;
spin_lock(&dev->slock);
if (!list_empty(&dmaq->active)) {
buf = list_entry(dmaq->active.next,
struct cx25821_buffer, queue);
buf->vb.vb2_buf.timestamp = ktime_get_ns();
buf->vb.sequence = dmaq->count++;
list_del(&buf->queue);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
spin_unlock(&dev->slock);
handled++;
}
return handled;
}
static int cx25821_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct cx25821_channel *chan = q->drv_priv;
unsigned size = (chan->fmt->depth * chan->width * chan->height) >> 3;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
*num_planes = 1;
sizes[0] = size;
return 0;
}
static int cx25821_buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_buffer *buf =
container_of(vbuf, struct cx25821_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
u32 line0_offset;
int bpl_local = LINE_SIZE_D1;
int ret;
if (chan->pixel_formats == PIXEL_FRMT_411)
buf->bpl = (chan->fmt->depth * chan->width) >> 3;
else
buf->bpl = (chan->fmt->depth >> 3) * chan->width;
if (vb2_plane_size(vb, 0) < chan->height * buf->bpl)
return -EINVAL;
vb2_set_plane_payload(vb, 0, chan->height * buf->bpl);
buf->vb.field = chan->field;
if (chan->pixel_formats == PIXEL_FRMT_411) {
bpl_local = buf->bpl;
} else {
bpl_local = buf->bpl; /* Default */
if (chan->use_cif_resolution) {
if (dev->tvnorm & V4L2_STD_625_50)
bpl_local = 352 << 1;
else
bpl_local = chan->cif_width << 1;
}
}
switch (chan->field) {
case V4L2_FIELD_TOP:
ret = cx25821_risc_buffer(dev->pci, &buf->risc,
sgt->sgl, 0, UNSET,
buf->bpl, 0, chan->height);
break;
case V4L2_FIELD_BOTTOM:
ret = cx25821_risc_buffer(dev->pci, &buf->risc,
sgt->sgl, UNSET, 0,
buf->bpl, 0, chan->height);
break;
case V4L2_FIELD_INTERLACED:
/* All other formats are top field first */
line0_offset = 0;
dprintk(1, "top field first\n");
ret = cx25821_risc_buffer(dev->pci, &buf->risc,
sgt->sgl, line0_offset,
bpl_local, bpl_local, bpl_local,
chan->height >> 1);
break;
case V4L2_FIELD_SEQ_TB:
ret = cx25821_risc_buffer(dev->pci, &buf->risc,
sgt->sgl,
0, buf->bpl * (chan->height >> 1),
buf->bpl, 0, chan->height >> 1);
break;
case V4L2_FIELD_SEQ_BT:
ret = cx25821_risc_buffer(dev->pci, &buf->risc,
sgt->sgl,
buf->bpl * (chan->height >> 1), 0,
buf->bpl, 0, chan->height >> 1);
break;
default:
WARN_ON(1);
ret = -EINVAL;
break;
}
dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index, chan->width, chan->height,
chan->fmt->depth, chan->fmt->fourcc,
(unsigned long)buf->risc.dma);
return ret;
}
static void cx25821_buffer_finish(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx25821_buffer *buf =
container_of(vbuf, struct cx25821_buffer, vb);
struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
cx25821_free_buffer(dev, buf);
}
static void cx25821_buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx25821_buffer *buf =
container_of(vbuf, struct cx25821_buffer, vb);
struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_buffer *prev;
struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
if (list_empty(&q->active)) {
list_add_tail(&buf->queue, &q->active);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(q->active.prev, struct cx25821_buffer,
queue);
list_add_tail(&buf->queue, &q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
}
}
static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct cx25821_channel *chan = q->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_dmaqueue *dmaq = &dev->channels[chan->id].dma_vidq;
struct cx25821_buffer *buf = list_entry(dmaq->active.next,
struct cx25821_buffer, queue);
dmaq->count = 0;
cx25821_start_video_dma(dev, dmaq, buf, chan->sram_channels);
return 0;
}
static void cx25821_stop_streaming(struct vb2_queue *q)
{
struct cx25821_channel *chan = q->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_dmaqueue *dmaq = &dev->channels[chan->id].dma_vidq;
unsigned long flags;
cx_write(chan->sram_channels->dma_ctl, 0); /* FIFO and RISC disable */
spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&dmaq->active)) {
struct cx25821_buffer *buf = list_entry(dmaq->active.next,
struct cx25821_buffer, queue);
list_del(&buf->queue);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
static const struct vb2_ops cx25821_video_qops = {
.queue_setup = cx25821_queue_setup,
.buf_prepare = cx25821_buffer_prepare,
.buf_finish = cx25821_buffer_finish,
.buf_queue = cx25821_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = cx25821_start_streaming,
.stop_streaming = cx25821_stop_streaming,
};
/* VIDEO IOCTLS */
static int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
f->pixelformat = formats[f->index].fourcc;
return 0;
}
static int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx25821_channel *chan = video_drvdata(file);
f->fmt.pix.width = chan->width;
f->fmt.pix.height = chan->height;
f->fmt.pix.field = chan->field;
f->fmt.pix.pixelformat = chan->fmt->fourcc;
f->fmt.pix.bytesperline = (chan->width * chan->fmt->depth) >> 3;
f->fmt.pix.sizeimage = chan->height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
static int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
const struct cx25821_fmt *fmt;
enum v4l2_field field = f->fmt.pix.field;
unsigned int maxh;
unsigned w;
fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt)
return -EINVAL;
maxh = (dev->tvnorm & V4L2_STD_625_50) ? 576 : 480;
w = f->fmt.pix.width;
if (field != V4L2_FIELD_BOTTOM)
field = V4L2_FIELD_TOP;
if (w < 352) {
w = 176;
f->fmt.pix.height = maxh / 4;
} else if (w < 720) {
w = 352;
f->fmt.pix.height = maxh / 2;
} else {
w = 720;
f->fmt.pix.height = maxh;
field = V4L2_FIELD_INTERLACED;
}
f->fmt.pix.field = field;
f->fmt.pix.width = w;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
int pix_format = PIXEL_FRMT_422;
int err;
err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
chan->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
chan->field = f->fmt.pix.field;
chan->width = f->fmt.pix.width;
chan->height = f->fmt.pix.height;
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P)
pix_format = PIXEL_FRMT_411;
else
pix_format = PIXEL_FRMT_422;
cx25821_set_pixel_format(dev, SRAM_CH00, pix_format);
/* check if cif resolution */
if (chan->width == 320 || chan->width == 352)
chan->use_cif_resolution = 1;
else
chan->use_cif_resolution = 0;
chan->cif_width = chan->width;
medusa_set_resolution(dev, chan->width, SRAM_CH00);
return 0;
}
static int vidioc_log_status(struct file *file, void *priv)
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
const struct sram_channel *sram_ch = chan->sram_channels;
u32 tmp = 0;
tmp = cx_read(sram_ch->dma_ctl);
pr_info("Video input 0 is %s\n",
(tmp & 0x11) ? "streaming" : "stopped");
return 0;
}
static int cx25821_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
strscpy(cap->driver, "cx25821", sizeof(cap->driver));
strscpy(cap->card, cx25821_boards[dev->board].name, sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int cx25821_vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorms)
{
struct cx25821_channel *chan = video_drvdata(file);
*tvnorms = chan->dev->tvnorm;
return 0;
}
static int cx25821_vidioc_s_std(struct file *file, void *priv,
v4l2_std_id tvnorms)
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
if (dev->tvnorm == tvnorms)
return 0;
dev->tvnorm = tvnorms;
chan->width = 720;
chan->height = (dev->tvnorm & V4L2_STD_625_50) ? 576 : 480;
medusa_set_videostandard(dev);
return 0;
}
static int cx25821_vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
if (i->index)
return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
i->std = CX25821_NORMS;
strscpy(i->name, "Composite", sizeof(i->name));
return 0;
}
static int cx25821_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
static int cx25821_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct cx25821_channel *chan =
container_of(ctrl->handler, struct cx25821_channel, hdl);
struct cx25821_dev *dev = chan->dev;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
medusa_set_brightness(dev, ctrl->val, chan->id);
break;
case V4L2_CID_HUE:
medusa_set_hue(dev, ctrl->val, chan->id);
break;
case V4L2_CID_CONTRAST:
medusa_set_contrast(dev, ctrl->val, chan->id);
break;
case V4L2_CID_SATURATION:
medusa_set_saturation(dev, ctrl->val, chan->id);
break;
default:
return -EINVAL;
}
return 0;
}
static int cx25821_vidioc_enum_output(struct file *file, void *priv,
struct v4l2_output *o)
{
if (o->index)
return -EINVAL;
o->type = V4L2_INPUT_TYPE_CAMERA;
o->std = CX25821_NORMS;
strscpy(o->name, "Composite", sizeof(o->name));
return 0;
}
static int cx25821_vidioc_g_output(struct file *file, void *priv, unsigned int *o)
{
*o = 0;
return 0;
}
static int cx25821_vidioc_s_output(struct file *file, void *priv, unsigned int o)
{
return o ? -EINVAL : 0;
}
static int cx25821_vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
const struct cx25821_fmt *fmt;
fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt)
return -EINVAL;
f->fmt.pix.width = 720;
f->fmt.pix.height = (dev->tvnorm & V4L2_STD_625_50) ? 576 : 480;
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx25821_channel *chan = video_drvdata(file);
int err;
err = cx25821_vidioc_try_fmt_vid_out(file, priv, f);
if (0 != err)
return err;
chan->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
chan->field = f->fmt.pix.field;
chan->width = f->fmt.pix.width;
chan->height = f->fmt.pix.height;
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P)
chan->pixel_formats = PIXEL_FRMT_411;
else
chan->pixel_formats = PIXEL_FRMT_422;
return 0;
}
static const struct v4l2_ctrl_ops cx25821_ctrl_ops = {
.s_ctrl = cx25821_s_ctrl,
};
static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = cx25821_vidioc_querycap,
.vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_std = cx25821_vidioc_g_std,
.vidioc_s_std = cx25821_vidioc_s_std,
.vidioc_enum_input = cx25821_vidioc_enum_input,
.vidioc_g_input = cx25821_vidioc_g_input,
.vidioc_s_input = cx25821_vidioc_s_input,
.vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct video_device cx25821_video_device = {
.name = "cx25821-video",
.fops = &video_fops,
.release = video_device_release_empty,
.minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
static const struct v4l2_file_operations video_out_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.write = vb2_fop_write,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops video_out_ioctl_ops = {
.vidioc_querycap = cx25821_vidioc_querycap,
.vidioc_enum_fmt_vid_out = cx25821_vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_out = cx25821_vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_out = cx25821_vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
.vidioc_g_std = cx25821_vidioc_g_std,
.vidioc_s_std = cx25821_vidioc_s_std,
.vidioc_enum_output = cx25821_vidioc_enum_output,
.vidioc_g_output = cx25821_vidioc_g_output,
.vidioc_s_output = cx25821_vidioc_s_output,
.vidioc_log_status = vidioc_log_status,
};
static const struct video_device cx25821_video_out_device = {
.name = "cx25821-video",
.fops = &video_out_fops,
.release = video_device_release_empty,
.minor = -1,
.ioctl_ops = &video_out_ioctl_ops,
.tvnorms = CX25821_NORMS,
.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE,
};
void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
{
cx_clear(PCI_INT_MSK, 1);
if (video_is_registered(&dev->channels[chan_num].vdev)) {
video_unregister_device(&dev->channels[chan_num].vdev);
v4l2_ctrl_handler_free(&dev->channels[chan_num].hdl);
}
}
int cx25821_video_register(struct cx25821_dev *dev)
{
int err;
int i;
/* initial device configuration */
dev->tvnorm = V4L2_STD_NTSC_M;
spin_lock_init(&dev->slock);
for (i = 0; i < MAX_VID_CAP_CHANNEL_NUM - 1; ++i) {
struct cx25821_channel *chan = &dev->channels[i];
struct video_device *vdev = &chan->vdev;
struct v4l2_ctrl_handler *hdl = &chan->hdl;
struct vb2_queue *q;
bool is_output = i > SRAM_CH08;
if (i == SRAM_CH08) /* audio channel */
continue;
if (!is_output) {
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &cx25821_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 10000, 1, 6200);
v4l2_ctrl_new_std(hdl, &cx25821_ctrl_ops,
V4L2_CID_CONTRAST, 0, 10000, 1, 5000);
v4l2_ctrl_new_std(hdl, &cx25821_ctrl_ops,
V4L2_CID_SATURATION, 0, 10000, 1, 5000);
v4l2_ctrl_new_std(hdl, &cx25821_ctrl_ops,
V4L2_CID_HUE, 0, 10000, 1, 5000);
if (hdl->error) {
err = hdl->error;
goto fail_unreg;
}
err = v4l2_ctrl_handler_setup(hdl);
if (err)
goto fail_unreg;
} else {
chan->out = &dev->vid_out_data[i - SRAM_CH09];
chan->out->chan = chan;
}
chan->sram_channels = &cx25821_sram_channels[i];
chan->width = 720;
chan->field = V4L2_FIELD_INTERLACED;
if (dev->tvnorm & V4L2_STD_625_50)
chan->height = 576;
else
chan->height = 480;
if (chan->pixel_formats == PIXEL_FRMT_411)
chan->fmt = cx25821_format_by_fourcc(V4L2_PIX_FMT_Y41P);
else
chan->fmt = cx25821_format_by_fourcc(V4L2_PIX_FMT_YUYV);
cx_write(chan->sram_channels->int_stat, 0xffffffff);
INIT_LIST_HEAD(&chan->dma_vidq.active);
q = &chan->vidq;
q->type = is_output ? V4L2_BUF_TYPE_VIDEO_OUTPUT :
V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->io_modes |= is_output ? VB2_WRITE : VB2_READ;
q->gfp_flags = GFP_DMA32;
q->min_buffers_needed = 2;
q->drv_priv = chan;
q->buf_struct_size = sizeof(struct cx25821_buffer);
q->ops = &cx25821_video_qops;
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
q->dev = &dev->pci->dev;
if (!is_output) {
err = vb2_queue_init(q);
if (err < 0)
goto fail_unreg;
}
/* register v4l devices */
*vdev = is_output ? cx25821_video_out_device : cx25821_video_device;
vdev->v4l2_dev = &dev->v4l2_dev;
if (!is_output)
vdev->ctrl_handler = hdl;
else
vdev->vfl_dir = VFL_DIR_TX;
vdev->lock = &dev->lock;
vdev->queue = q;
snprintf(vdev->name, sizeof(vdev->name), "%s #%d", dev->name, i);
video_set_drvdata(vdev, chan);
err = video_register_device(vdev, VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0)
goto fail_unreg;
}
/* set PCI interrupt */
cx_set(PCI_INT_MSK, 0xff);
return 0;
fail_unreg:
while (i >= 0)
cx25821_video_unregister(dev, i--);
return err;
}
| linux-master | drivers/media/pci/cx25821/cx25821-video.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "cx25821.h"
#include "cx25821-medusa-video.h"
#include "cx25821-biffuncs.h"
/*
* medusa_enable_bluefield_output()
*
* Enable the generation of blue filed output if no video
*
*/
static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
int enable)
{
u32 value = 0;
u32 tmp = 0;
int out_ctrl = OUT_CTRL1;
int out_ctrl_ns = OUT_CTRL_NS;
switch (channel) {
default:
case VDEC_A:
break;
case VDEC_B:
out_ctrl = VDEC_B_OUT_CTRL1;
out_ctrl_ns = VDEC_B_OUT_CTRL_NS;
break;
case VDEC_C:
out_ctrl = VDEC_C_OUT_CTRL1;
out_ctrl_ns = VDEC_C_OUT_CTRL_NS;
break;
case VDEC_D:
out_ctrl = VDEC_D_OUT_CTRL1;
out_ctrl_ns = VDEC_D_OUT_CTRL_NS;
break;
case VDEC_E:
out_ctrl = VDEC_E_OUT_CTRL1;
out_ctrl_ns = VDEC_E_OUT_CTRL_NS;
return;
case VDEC_F:
out_ctrl = VDEC_F_OUT_CTRL1;
out_ctrl_ns = VDEC_F_OUT_CTRL_NS;
return;
case VDEC_G:
out_ctrl = VDEC_G_OUT_CTRL1;
out_ctrl_ns = VDEC_G_OUT_CTRL_NS;
return;
case VDEC_H:
out_ctrl = VDEC_H_OUT_CTRL1;
out_ctrl_ns = VDEC_H_OUT_CTRL_NS;
return;
}
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl, &tmp);
value &= 0xFFFFFF7F; /* clear BLUE_FIELD_EN */
if (enable)
value |= 0x00000080; /* set BLUE_FIELD_EN */
cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl_ns, &tmp);
value &= 0xFFFFFF7F;
if (enable)
value |= 0x00000080; /* set BLUE_FIELD_EN */
cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
}
static int medusa_initialize_ntsc(struct cx25821_dev *dev)
{
int ret_val = 0;
int i = 0;
u32 value = 0;
u32 tmp = 0;
for (i = 0; i < MAX_DECODERS; i++) {
/* set video format NTSC-M */
value = cx25821_i2c_read(&dev->i2c_bus[0],
MODE_CTRL + (0x200 * i), &tmp);
value &= 0xFFFFFFF0;
/* enable the fast locking mode bit[16] */
value |= 0x10001;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
MODE_CTRL + (0x200 * i), value);
/* resolution NTSC 720x480 */
value = cx25821_i2c_read(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x612D0074;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), value);
value = cx25821_i2c_read(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x1C1E001A; /* vblank_cnt + 2 to get camera ID */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), value);
/* chroma subcarrier step size */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
SC_STEP_SIZE + (0x200 * i), 0x43E00000);
/* enable VIP optional active */
value = cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), value);
/* enable VIP optional active (VIP_OPT_AL) for direct output. */
value = cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL1 + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL1 + (0x200 * i), value);
/*
* clear VPRES_VERT_EN bit, fixes the chroma run away problem
* when the input switching rate < 16 fields
*/
value = cx25821_i2c_read(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), &tmp);
/* disable special play detection */
value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), value);
/* set vbi_gate_en to 0 */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DFE_CTRL1 + (0x200 * i), &tmp);
value = clearBitAtPos(value, 29);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DFE_CTRL1 + (0x200 * i), value);
/* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
}
for (i = 0; i < MAX_ENCODERS; i++) {
/* NTSC hclock */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), &tmp);
value &= 0xF000FC00;
value |= 0x06B402D0;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), value);
/* burst begin and burst end */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), &tmp);
value &= 0xFF000000;
value |= 0x007E9054;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), value);
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), &tmp);
value &= 0xFC00FE00;
value |= 0x00EC00F0;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), value);
/* set NTSC vblank, no phase alternation, 7.5 IRE pedestal */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), &tmp);
value &= 0x00FCFFFF;
value |= 0x13020000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), value);
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_5 + (0x100 * i), &tmp);
value &= 0xFFFF0000;
value |= 0x0000E575;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_5 + (0x100 * i), value);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_6 + (0x100 * i), 0x009A89C1);
/* Subcarrier Increment */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_7 + (0x100 * i), 0x21F07C1F);
}
/* set picture resolutions */
/* 0 - 720 */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0);
/* 0 - 480 */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0);
/* set Bypass input format to NTSC 525 lines */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value |= 0x00080200;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
return ret_val;
}
static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
{
int ret_val = -1;
u32 value = 0, tmp = 0;
/* Setup for 2D threshold */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
COMB_2D_HFS_CFG + (0x200 * dec), 0x20002861);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
COMB_2D_HFD_CFG + (0x200 * dec), 0x20002861);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
COMB_2D_LF_CFG + (0x200 * dec), 0x200A1023);
/* Setup flat chroma and luma thresholds */
value = cx25821_i2c_read(&dev->i2c_bus[0],
COMB_FLAT_THRESH_CTRL + (0x200 * dec), &tmp);
value &= 0x06230000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
COMB_FLAT_THRESH_CTRL + (0x200 * dec), value);
/* set comb 2D blend */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
COMB_2D_BLEND + (0x200 * dec), 0x210F0F0F);
/* COMB MISC CONTROL */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
COMB_MISC_CTRL + (0x200 * dec), 0x41120A7F);
return ret_val;
}
static int medusa_initialize_pal(struct cx25821_dev *dev)
{
int ret_val = 0;
int i = 0;
u32 value = 0;
u32 tmp = 0;
for (i = 0; i < MAX_DECODERS; i++) {
/* set video format PAL-BDGHI */
value = cx25821_i2c_read(&dev->i2c_bus[0],
MODE_CTRL + (0x200 * i), &tmp);
value &= 0xFFFFFFF0;
/* enable the fast locking mode bit[16] */
value |= 0x10004;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
MODE_CTRL + (0x200 * i), value);
/* resolution PAL 720x576 */
value = cx25821_i2c_read(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x632D007D;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), value);
/* vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24 */
value = cx25821_i2c_read(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x28240026; /* vblank_cnt + 2 to get camera ID */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), value);
/* chroma subcarrier step size */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
SC_STEP_SIZE + (0x200 * i), 0x5411E2D0);
/* enable VIP optional active */
value = cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), value);
/* enable VIP optional active (VIP_OPT_AL) for direct output. */
value = cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL1 + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL1 + (0x200 * i), value);
/*
* clear VPRES_VERT_EN bit, fixes the chroma run away problem
* when the input switching rate < 16 fields
*/
value = cx25821_i2c_read(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), &tmp);
/* disable special play detection */
value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), value);
/* set vbi_gate_en to 0 */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DFE_CTRL1 + (0x200 * i), &tmp);
value = clearBitAtPos(value, 29);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DFE_CTRL1 + (0x200 * i), value);
medusa_PALCombInit(dev, i);
/* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
}
for (i = 0; i < MAX_ENCODERS; i++) {
/* PAL hclock */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), &tmp);
value &= 0xF000FC00;
value |= 0x06C002D0;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), value);
/* burst begin and burst end */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), &tmp);
value &= 0xFF000000;
value |= 0x007E9754;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), value);
/* hblank and vactive */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), &tmp);
value &= 0xFC00FE00;
value |= 0x00FC0120;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), value);
/* set PAL vblank, phase alternation, 0 IRE pedestal */
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), &tmp);
value &= 0x00FCFFFF;
value |= 0x14010000;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), value);
value = cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_5 + (0x100 * i), &tmp);
value &= 0xFFFF0000;
value |= 0x0000F078;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_5 + (0x100 * i), value);
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_6 + (0x100 * i), 0x00A493CF);
/* Subcarrier Increment */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_7 + (0x100 * i), 0x2A098ACB);
}
/* set picture resolutions */
/* 0 - 720 */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0);
/* 0 - 576 */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0);
/* set Bypass input format to PAL 625 lines */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value &= 0xFFF7FDFF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
return ret_val;
}
int medusa_set_videostandard(struct cx25821_dev *dev)
{
int status = 0;
u32 value = 0, tmp = 0;
if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK)
status = medusa_initialize_pal(dev);
else
status = medusa_initialize_ntsc(dev);
/* Enable DENC_A output */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_4, &tmp);
value = setBitAtPos(value, 4);
status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_4, value);
/* Enable DENC_B output */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_B_REG_4, &tmp);
value = setBitAtPos(value, 4);
status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_B_REG_4, value);
return status;
}
void medusa_set_resolution(struct cx25821_dev *dev, int width,
int decoder_select)
{
int decoder = 0;
int decoder_count = 0;
u32 hscale = 0x0;
u32 vscale = 0x0;
const int MAX_WIDTH = 720;
/* validate the width */
if (width > MAX_WIDTH) {
pr_info("%s(): width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH\n",
__func__, width, MAX_WIDTH);
width = MAX_WIDTH;
}
if (decoder_select <= 7 && decoder_select >= 0) {
decoder = decoder_select;
decoder_count = decoder_select + 1;
} else {
decoder = 0;
decoder_count = dev->_max_num_decoders;
}
switch (width) {
case 320:
hscale = 0x13E34B;
vscale = 0x0;
break;
case 352:
hscale = 0x10A273;
vscale = 0x0;
break;
case 176:
hscale = 0x3115B2;
vscale = 0x1E00;
break;
case 160:
hscale = 0x378D84;
vscale = 0x1E00;
break;
default: /* 720 */
hscale = 0x0;
vscale = 0x0;
break;
}
for (; decoder < decoder_count; decoder++) {
/* write scaling values for each decoder */
cx25821_i2c_write(&dev->i2c_bus[0],
HSCALE_CTRL + (0x200 * decoder), hscale);
cx25821_i2c_write(&dev->i2c_bus[0],
VSCALE_CTRL + (0x200 * decoder), vscale);
}
}
static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
int duration)
{
u32 fld_cnt = 0;
u32 tmp = 0;
u32 disp_cnt_reg = DISP_AB_CNT;
/* no support */
if (decoder < VDEC_A || decoder > VDEC_H) {
return;
}
switch (decoder) {
default:
break;
case VDEC_C:
case VDEC_D:
disp_cnt_reg = DISP_CD_CNT;
break;
case VDEC_E:
case VDEC_F:
disp_cnt_reg = DISP_EF_CNT;
break;
case VDEC_G:
case VDEC_H:
disp_cnt_reg = DISP_GH_CNT;
break;
}
/* update hardware */
fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp);
if (!(decoder % 2)) { /* EVEN decoder */
fld_cnt &= 0xFFFF0000;
fld_cnt |= duration;
} else {
fld_cnt &= 0x0000FFFF;
fld_cnt |= ((u32) duration) << 16;
}
cx25821_i2c_write(&dev->i2c_bus[0], disp_cnt_reg, fld_cnt);
}
/* Map to Medusa register setting */
static int mapM(int srcMin, int srcMax, int srcVal, int dstMin, int dstMax,
int *dstVal)
{
int numerator;
int denominator;
int quotient;
if ((srcMin == srcMax) || (srcVal < srcMin) || (srcVal > srcMax))
return -1;
/*
* This is the overall expression used:
* *dstVal =
* (srcVal - srcMin)*(dstMax - dstMin) / (srcMax - srcMin) + dstMin;
* but we need to account for rounding so below we use the modulus
* operator to find the remainder and increment if necessary.
*/
numerator = (srcVal - srcMin) * (dstMax - dstMin);
denominator = srcMax - srcMin;
quotient = numerator / denominator;
if (2 * (numerator % denominator) >= denominator)
quotient++;
*dstVal = quotient + dstMin;
return 0;
}
static unsigned long convert_to_twos(long numeric, unsigned long bits_len)
{
unsigned char temp;
if (numeric >= 0)
return numeric;
else {
temp = ~(abs(numeric) & 0xFF);
temp += 1;
return temp;
}
}
int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder)
{
int ret_val = 0;
int value = 0;
u32 val = 0, tmp = 0;
if ((brightness > VIDEO_PROCAMP_MAX) ||
(brightness < VIDEO_PROCAMP_MIN)) {
return -1;
}
ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, brightness,
SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value);
value = convert_to_twos(value, 8);
val = cx25821_i2c_read(&dev->i2c_bus[0],
VDEC_A_BRITE_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
VDEC_A_BRITE_CTRL + (0x200 * decoder), val | value);
return ret_val;
}
int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder)
{
int ret_val = 0;
int value = 0;
u32 val = 0, tmp = 0;
if ((contrast > VIDEO_PROCAMP_MAX) || (contrast < VIDEO_PROCAMP_MIN)) {
return -1;
}
ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, contrast,
UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value);
val = cx25821_i2c_read(&dev->i2c_bus[0],
VDEC_A_CNTRST_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
VDEC_A_CNTRST_CTRL + (0x200 * decoder), val | value);
return ret_val;
}
int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder)
{
int ret_val = 0;
int value = 0;
u32 val = 0, tmp = 0;
if ((hue > VIDEO_PROCAMP_MAX) || (hue < VIDEO_PROCAMP_MIN)) {
return -1;
}
ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, hue,
SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value);
value = convert_to_twos(value, 8);
val = cx25821_i2c_read(&dev->i2c_bus[0],
VDEC_A_HUE_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
VDEC_A_HUE_CTRL + (0x200 * decoder), val | value);
return ret_val;
}
int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
{
int ret_val = 0;
int value = 0;
u32 val = 0, tmp = 0;
if ((saturation > VIDEO_PROCAMP_MAX) ||
(saturation < VIDEO_PROCAMP_MIN)) {
return -1;
}
ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, saturation,
UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value);
val = cx25821_i2c_read(&dev->i2c_bus[0],
VDEC_A_USAT_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
VDEC_A_USAT_CTRL + (0x200 * decoder), val | value);
val = cx25821_i2c_read(&dev->i2c_bus[0],
VDEC_A_VSAT_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
VDEC_A_VSAT_CTRL + (0x200 * decoder), val | value);
return ret_val;
}
/* Program the display sequence and monitor output. */
int medusa_video_init(struct cx25821_dev *dev)
{
u32 value = 0, tmp = 0;
int ret_val = 0;
int i = 0;
/* disable Auto source selection on all video decoders */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFF0FF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value);
if (ret_val < 0)
goto error;
/* Turn off Master source switch enable */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFFFDF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value);
if (ret_val < 0)
goto error;
/*
* FIXME: due to a coding bug the duration was always 0. It's
* likely that it really should be something else, but due to the
* lack of documentation I have no idea what it should be. For
* now just fill in 0 as the duration.
*/
for (i = 0; i < dev->_max_num_decoders; i++)
medusa_set_decoderduration(dev, i, 0);
/* Select monitor as DENC A input, power up the DAC */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp);
value &= 0xFF70FF70;
value |= 0x00090008; /* set en_active */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_AB_CTRL, value);
if (ret_val < 0)
goto error;
/* enable input is VIP/656 */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value |= 0x00040100; /* enable VIP */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
if (ret_val < 0)
goto error;
/* select AFE clock to output mode */
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
value &= 0x83FFFFFF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL,
value | 0x10000000);
if (ret_val < 0)
goto error;
/* Turn on all of the data out and control output pins. */
value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp);
value &= 0xFEF0FE00;
if (dev->_max_num_decoders == MAX_DECODERS) {
/*
* Note: The octal board does not support control pins(bit16-19)
* These bits are ignored in the octal board.
*
* disable VDEC A-C port, default to Mobilygen Interface
*/
value |= 0x010001F8;
} else {
/* disable VDEC A-C port, default to Mobilygen Interface */
value |= 0x010F0108;
}
value |= 7;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], PIN_OE_CTRL, value);
if (ret_val < 0)
goto error;
ret_val = medusa_set_videostandard(dev);
error:
return ret_val;
}
| linux-master | drivers/media/pci/cx25821/cx25821-medusa-video.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
*/
#include <linux/module.h>
#include "cx25821.h"
/********************* GPIO stuffs *********************/
void cx25821_set_gpiopin_direction(struct cx25821_dev *dev,
int pin_number, int pin_logic_value)
{
int bit = pin_number;
u32 gpio_oe_reg = GPIO_LO_OE;
u32 gpio_register = 0;
u32 value = 0;
/* Check for valid pinNumber */
if (pin_number >= 47)
return;
if (pin_number > 31) {
bit = pin_number - 31;
gpio_oe_reg = GPIO_HI_OE;
}
/* Here we will make sure that the GPIOs 0 and 1 are output. keep the
* rest as is */
gpio_register = cx_read(gpio_oe_reg);
if (pin_logic_value == 1)
value = gpio_register | Set_GPIO_Bit(bit);
else
value = gpio_register & Clear_GPIO_Bit(bit);
cx_write(gpio_oe_reg, value);
}
EXPORT_SYMBOL(cx25821_set_gpiopin_direction);
static void cx25821_set_gpiopin_logicvalue(struct cx25821_dev *dev,
int pin_number, int pin_logic_value)
{
int bit = pin_number;
u32 gpio_reg = GPIO_LO;
u32 value = 0;
/* Check for valid pinNumber */
if (pin_number >= 47)
return;
/* change to output direction */
cx25821_set_gpiopin_direction(dev, pin_number, 0);
if (pin_number > 31) {
bit = pin_number - 31;
gpio_reg = GPIO_HI;
}
value = cx_read(gpio_reg);
if (pin_logic_value == 0)
value &= Clear_GPIO_Bit(bit);
else
value |= Set_GPIO_Bit(bit);
cx_write(gpio_reg, value);
}
void cx25821_gpio_init(struct cx25821_dev *dev)
{
if (dev == NULL)
return;
switch (dev->board) {
case CX25821_BOARD_CONEXANT_ATHENA10:
default:
/* set GPIO 5 to select the path for Medusa/Athena */
cx25821_set_gpiopin_logicvalue(dev, 5, 1);
msleep(20);
break;
}
}
| linux-master | drivers/media/pci/cx25821/cx25821-gpio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
* Based on Steven Toth <[email protected]> cx23885 driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/i2c.h>
#include "cx25821.h"
static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
static unsigned int i2c_scan;
module_param(i2c_scan, int, 0444);
MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
#define dprintk(level, fmt, arg...) \
do { \
if (i2c_debug >= level) \
printk(KERN_DEBUG "%s/0: " fmt, dev->name, ##arg); \
} while (0)
#define I2C_WAIT_DELAY 32
#define I2C_WAIT_RETRY 64
#define I2C_EXTEND (1 << 3)
#define I2C_NOSTOP (1 << 4)
static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
return cx_read(bus->reg_stat) & 0x01;
}
static inline int i2c_is_busy(struct i2c_adapter *i2c_adap)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
return cx_read(bus->reg_stat) & 0x02 ? 1 : 0;
}
static int i2c_wait_done(struct i2c_adapter *i2c_adap)
{
int count;
for (count = 0; count < I2C_WAIT_RETRY; count++) {
if (!i2c_is_busy(i2c_adap))
break;
udelay(I2C_WAIT_DELAY);
}
if (I2C_WAIT_RETRY == count)
return 0;
return 1;
}
static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
const struct i2c_msg *msg, int joined_rlen)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
u32 wdata, addr, ctrl;
int retval, cnt;
if (joined_rlen)
dprintk(1, "%s(msg->wlen=%d, nextmsg->rlen=%d)\n", __func__,
msg->len, joined_rlen);
else
dprintk(1, "%s(msg->len=%d)\n", __func__, msg->len);
/* Deal with i2c probe functions with zero payload */
if (msg->len == 0) {
cx_write(bus->reg_addr, msg->addr << 25);
cx_write(bus->reg_ctrl, bus->i2c_period | (1 << 2));
if (!i2c_wait_done(i2c_adap))
return -EIO;
if (!i2c_slave_did_ack(i2c_adap))
return -EIO;
dprintk(1, "%s(): returns 0\n", __func__);
return 0;
}
/* dev, reg + first byte */
addr = (msg->addr << 25) | msg->buf[0];
wdata = msg->buf[0];
ctrl = bus->i2c_period | (1 << 12) | (1 << 2);
if (msg->len > 1)
ctrl |= I2C_NOSTOP | I2C_EXTEND;
else if (joined_rlen)
ctrl |= I2C_NOSTOP;
cx_write(bus->reg_addr, addr);
cx_write(bus->reg_wdata, wdata);
cx_write(bus->reg_ctrl, ctrl);
retval = i2c_wait_done(i2c_adap);
if (retval < 0)
goto err;
if (retval == 0)
goto eio;
if (i2c_debug) {
if (!(ctrl & I2C_NOSTOP))
printk(" >\n");
}
for (cnt = 1; cnt < msg->len; cnt++) {
/* following bytes */
wdata = msg->buf[cnt];
ctrl = bus->i2c_period | (1 << 12) | (1 << 2);
if (cnt < msg->len - 1)
ctrl |= I2C_NOSTOP | I2C_EXTEND;
else if (joined_rlen)
ctrl |= I2C_NOSTOP;
cx_write(bus->reg_addr, addr);
cx_write(bus->reg_wdata, wdata);
cx_write(bus->reg_ctrl, ctrl);
retval = i2c_wait_done(i2c_adap);
if (retval < 0)
goto err;
if (retval == 0)
goto eio;
if (i2c_debug) {
dprintk(1, " %02x", msg->buf[cnt]);
if (!(ctrl & I2C_NOSTOP))
dprintk(1, " >\n");
}
}
return msg->len;
eio:
retval = -EIO;
err:
if (i2c_debug)
pr_err(" ERR: %d\n", retval);
return retval;
}
static int i2c_readbytes(struct i2c_adapter *i2c_adap,
const struct i2c_msg *msg, int joined)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
u32 ctrl, cnt;
int retval;
if (i2c_debug && !joined)
dprintk(1, "6-%s(msg->len=%d)\n", __func__, msg->len);
/* Deal with i2c probe functions with zero payload */
if (msg->len == 0) {
cx_write(bus->reg_addr, msg->addr << 25);
cx_write(bus->reg_ctrl, bus->i2c_period | (1 << 2) | 1);
if (!i2c_wait_done(i2c_adap))
return -EIO;
if (!i2c_slave_did_ack(i2c_adap))
return -EIO;
dprintk(1, "%s(): returns 0\n", __func__);
return 0;
}
if (i2c_debug) {
if (joined)
dprintk(1, " R");
else
dprintk(1, " <R %02x", (msg->addr << 1) + 1);
}
for (cnt = 0; cnt < msg->len; cnt++) {
ctrl = bus->i2c_period | (1 << 12) | (1 << 2) | 1;
if (cnt < msg->len - 1)
ctrl |= I2C_NOSTOP | I2C_EXTEND;
cx_write(bus->reg_addr, msg->addr << 25);
cx_write(bus->reg_ctrl, ctrl);
retval = i2c_wait_done(i2c_adap);
if (retval < 0)
goto err;
if (retval == 0)
goto eio;
msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
if (i2c_debug) {
dprintk(1, " %02x", msg->buf[cnt]);
if (!(ctrl & I2C_NOSTOP))
dprintk(1, " >\n");
}
}
return msg->len;
eio:
retval = -EIO;
err:
if (i2c_debug)
pr_err(" ERR: %d\n", retval);
return retval;
}
static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
int i, retval = 0;
dprintk(1, "%s(num = %d)\n", __func__, num);
for (i = 0; i < num; i++) {
dprintk(1, "%s(num = %d) addr = 0x%02x len = 0x%x\n",
__func__, num, msgs[i].addr, msgs[i].len);
if (msgs[i].flags & I2C_M_RD) {
/* read */
retval = i2c_readbytes(i2c_adap, &msgs[i], 0);
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr) {
/* write then read from same address */
retval = i2c_sendbytes(i2c_adap, &msgs[i],
msgs[i + 1].len);
if (retval < 0)
goto err;
i++;
retval = i2c_readbytes(i2c_adap, &msgs[i], 1);
} else {
/* write */
retval = i2c_sendbytes(i2c_adap, &msgs[i], 0);
}
if (retval < 0)
goto err;
}
return num;
err:
return retval;
}
static u32 cx25821_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
}
static const struct i2c_algorithm cx25821_i2c_algo_template = {
.master_xfer = i2c_xfer,
.functionality = cx25821_functionality,
#ifdef NEED_ALGO_CONTROL
.algo_control = dummy_algo_control,
#endif
};
static const struct i2c_adapter cx25821_i2c_adap_template = {
.name = "cx25821",
.owner = THIS_MODULE,
.algo = &cx25821_i2c_algo_template,
};
static const struct i2c_client cx25821_i2c_client_template = {
.name = "cx25821 internal",
};
/* init + register i2c adapter */
int cx25821_i2c_register(struct cx25821_i2c *bus)
{
struct cx25821_dev *dev = bus->dev;
dprintk(1, "%s(bus = %d)\n", __func__, bus->nr);
bus->i2c_adap = cx25821_i2c_adap_template;
bus->i2c_client = cx25821_i2c_client_template;
bus->i2c_adap.dev.parent = &dev->pci->dev;
strscpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name));
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev);
i2c_add_adapter(&bus->i2c_adap);
bus->i2c_client.adapter = &bus->i2c_adap;
/* set up the I2c */
bus->i2c_client.addr = (0x88 >> 1);
return bus->i2c_rc;
}
int cx25821_i2c_unregister(struct cx25821_i2c *bus)
{
i2c_del_adapter(&bus->i2c_adap);
return 0;
}
#if 0 /* Currently unused */
static void cx25821_av_clk(struct cx25821_dev *dev, int enable)
{
/* write 0 to bus 2 addr 0x144 via i2x_xfer() */
char buffer[3];
struct i2c_msg msg;
dprintk(1, "%s(enabled = %d)\n", __func__, enable);
/* Register 0x144 */
buffer[0] = 0x01;
buffer[1] = 0x44;
if (enable == 1)
buffer[2] = 0x05;
else
buffer[2] = 0x00;
msg.addr = 0x44;
msg.flags = I2C_M_TEN;
msg.len = 3;
msg.buf = buffer;
i2c_xfer(&dev->i2c_bus[0].i2c_adap, &msg, 1);
}
#endif
int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
{
struct i2c_client *client = &bus->i2c_client;
int v = 0;
u8 addr[2] = { 0, 0 };
u8 buf[4] = { 0, 0, 0, 0 };
struct i2c_msg msgs[2] = {
{
.addr = client->addr,
.flags = 0,
.len = 2,
.buf = addr,
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = 4,
.buf = buf,
}
};
addr[0] = (reg_addr >> 8);
addr[1] = (reg_addr & 0xff);
msgs[0].addr = 0x44;
msgs[1].addr = 0x44;
i2c_xfer(client->adapter, msgs, 2);
v = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
*value = v;
return v;
}
int cx25821_i2c_write(struct cx25821_i2c *bus, u16 reg_addr, int value)
{
struct i2c_client *client = &bus->i2c_client;
int retval = 0;
u8 buf[6] = { 0, 0, 0, 0, 0, 0 };
struct i2c_msg msgs[1] = {
{
.addr = client->addr,
.flags = 0,
.len = 6,
.buf = buf,
}
};
buf[0] = reg_addr >> 8;
buf[1] = reg_addr & 0xff;
buf[5] = (value >> 24) & 0xff;
buf[4] = (value >> 16) & 0xff;
buf[3] = (value >> 8) & 0xff;
buf[2] = value & 0xff;
client->flags = 0;
msgs[0].addr = 0x44;
retval = i2c_xfer(client->adapter, msgs, 1);
return retval;
}
| linux-master | drivers/media/pci/cx25821/cx25821-i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <[email protected]>, <[email protected]>
* Based on SAA713x ALSA driver and CX88 driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/control.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "cx25821.h"
#include "cx25821-reg.h"
#define AUDIO_SRAM_CHANNEL SRAM_CH08
#define dprintk(level, fmt, arg...) \
do { \
if (debug >= level) \
pr_info("%s/1: " fmt, chip->dev->name, ##arg); \
} while (0)
#define dprintk_core(level, fmt, arg...) \
do { \
if (debug >= level) \
printk(KERN_DEBUG "%s/1: " fmt, chip->dev->name, ##arg); \
} while (0)
/****************************************************************************
Data type declarations - Can be moded to a header file later
****************************************************************************/
static int devno;
struct cx25821_audio_buffer {
unsigned int bpl;
struct cx25821_riscmem risc;
void *vaddr;
struct scatterlist *sglist;
int sglen;
unsigned long nr_pages;
};
struct cx25821_audio_dev {
struct cx25821_dev *dev;
struct cx25821_dmaqueue q;
/* pci i/o */
struct pci_dev *pci;
/* audio controls */
int irq;
struct snd_card *card;
unsigned long iobase;
spinlock_t reg_lock;
atomic_t count;
unsigned int dma_size;
unsigned int period_size;
unsigned int num_periods;
struct cx25821_audio_buffer *buf;
struct snd_pcm_substream *substream;
};
/****************************************************************************
Module global static vars
****************************************************************************/
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled.");
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s).");
/****************************************************************************
Module macros
****************************************************************************/
MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards");
MODULE_AUTHOR("Hiep Huynh");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
/****************************************************************************
Module specific functions
****************************************************************************/
/* Constants taken from cx88-reg.h */
#define AUD_INT_DN_RISCI1 (1 << 0)
#define AUD_INT_UP_RISCI1 (1 << 1)
#define AUD_INT_RDS_DN_RISCI1 (1 << 2)
#define AUD_INT_DN_RISCI2 (1 << 4) /* yes, 3 is skipped */
#define AUD_INT_UP_RISCI2 (1 << 5)
#define AUD_INT_RDS_DN_RISCI2 (1 << 6)
#define AUD_INT_DN_SYNC (1 << 12)
#define AUD_INT_UP_SYNC (1 << 13)
#define AUD_INT_RDS_DN_SYNC (1 << 14)
#define AUD_INT_OPC_ERR (1 << 16)
#define AUD_INT_BER_IRQ (1 << 20)
#define AUD_INT_MCHG_IRQ (1 << 21)
#define GP_COUNT_CONTROL_RESET 0x3
#define PCI_MSK_AUD_EXT (1 << 4)
#define PCI_MSK_AUD_INT (1 << 3)
static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip,
unsigned long nr_pages)
{
struct cx25821_audio_buffer *buf = chip->buf;
struct page *pg;
int i;
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == buf->vaddr) {
dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
return -ENOMEM;
}
dprintk(1, "vmalloc is at addr 0x%p, size=%lu\n",
buf->vaddr,
nr_pages << PAGE_SHIFT);
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
buf->nr_pages = nr_pages;
buf->sglist = vzalloc(array_size(sizeof(*buf->sglist), buf->nr_pages));
if (NULL == buf->sglist)
goto vzalloc_err;
sg_init_table(buf->sglist, buf->nr_pages);
for (i = 0; i < buf->nr_pages; i++) {
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
if (NULL == pg)
goto vmalloc_to_page_err;
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
}
return 0;
vmalloc_to_page_err:
vfree(buf->sglist);
buf->sglist = NULL;
vzalloc_err:
vfree(buf->vaddr);
buf->vaddr = NULL;
return -ENOMEM;
}
static int cx25821_alsa_dma_map(struct cx25821_audio_dev *dev)
{
struct cx25821_audio_buffer *buf = dev->buf;
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
buf->nr_pages, DMA_FROM_DEVICE);
if (0 == buf->sglen) {
pr_warn("%s: cx25821_alsa_map_sg failed\n", __func__);
return -ENOMEM;
}
return 0;
}
static int cx25821_alsa_dma_unmap(struct cx25821_audio_dev *dev)
{
struct cx25821_audio_buffer *buf = dev->buf;
if (!buf->sglen)
return 0;
dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->nr_pages, DMA_FROM_DEVICE);
buf->sglen = 0;
return 0;
}
static int cx25821_alsa_dma_free(struct cx25821_audio_buffer *buf)
{
vfree(buf->sglist);
buf->sglist = NULL;
vfree(buf->vaddr);
buf->vaddr = NULL;
return 0;
}
/*
* BOARD Specific: Sets audio DMA
*/
static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip)
{
struct cx25821_audio_buffer *buf = chip->buf;
struct cx25821_dev *dev = chip->dev;
const struct sram_channel *audio_ch =
&cx25821_sram_channels[AUDIO_SRAM_CHANNEL];
u32 tmp = 0;
/* enable output on the GPIO 0 for the MCLK ADC (Audio) */
cx25821_set_gpiopin_direction(chip->dev, 0, 0);
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
cx_clear(AUD_INT_DMA_CTL,
FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN);
/* setup fifo + format - out channel */
cx25821_sram_channel_setup_audio(chip->dev, audio_ch, buf->bpl,
buf->risc.dma);
/* sets bpl size */
cx_write(AUD_A_LNGTH, buf->bpl);
/* reset counter */
/* GP_COUNT_CONTROL_RESET = 0x3 */
cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
/* Set the input mode to 16-bit */
tmp = cx_read(AUD_A_CFG);
cx_write(AUD_A_CFG, tmp | FLD_AUD_DST_PK_MODE | FLD_AUD_DST_ENABLE |
FLD_AUD_CLK_ENABLE);
/*
pr_info("DEBUG: Start audio DMA, %d B/line, cmds_start(0x%x)= %d lines/FIFO, %d periods, %d byte buffer\n",
buf->bpl, audio_ch->cmds_start,
cx_read(audio_ch->cmds_start + 12)>>1,
chip->num_periods, buf->bpl * chip->num_periods);
*/
/* Enables corresponding bits at AUD_INT_STAT */
cx_write(AUD_A_INT_MSK, FLD_AUD_DST_RISCI1 | FLD_AUD_DST_OF |
FLD_AUD_DST_SYNC | FLD_AUD_DST_OPC_ERR);
/* Clean any pending interrupt bits already set */
cx_write(AUD_A_INT_STAT, ~0);
/* enable audio irqs */
cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT);
/* Turn on audio downstream fifo and risc enable 0x101 */
tmp = cx_read(AUD_INT_DMA_CTL);
cx_set(AUD_INT_DMA_CTL, tmp |
(FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN));
mdelay(100);
return 0;
}
/*
* BOARD Specific: Resets audio DMA
*/
static int _cx25821_stop_audio_dma(struct cx25821_audio_dev *chip)
{
struct cx25821_dev *dev = chip->dev;
/* stop dma */
cx_clear(AUD_INT_DMA_CTL,
FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN);
/* disable irqs */
cx_clear(PCI_INT_MSK, PCI_MSK_AUD_INT);
cx_clear(AUD_A_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC |
AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
return 0;
}
#define MAX_IRQ_LOOP 50
/*
* BOARD Specific: IRQ dma bits
*/
static char *cx25821_aud_irqs[32] = {
"dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */
NULL, /* reserved */
"dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */
NULL, /* reserved */
"dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */
NULL, /* reserved */
"dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */
NULL, /* reserved */
"opc_err", "par_err", "rip_err", /* 16-18 */
"pci_abort", "ber_irq", "mchg_irq" /* 19-21 */
};
/*
* BOARD Specific: Threats IRQ audio specific calls
*/
static void cx25821_aud_irq(struct cx25821_audio_dev *chip, u32 status,
u32 mask)
{
struct cx25821_dev *dev = chip->dev;
if (0 == (status & mask))
return;
cx_write(AUD_A_INT_STAT, status);
if (debug > 1 || (status & mask & ~0xff))
cx25821_print_irqbits(dev->name, "irq aud", cx25821_aud_irqs,
ARRAY_SIZE(cx25821_aud_irqs), status, mask);
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
pr_warn("WARNING %s/1: Audio risc op code error\n", dev->name);
cx_clear(AUD_INT_DMA_CTL,
FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN);
cx25821_sram_channel_dump_audio(dev,
&cx25821_sram_channels[AUDIO_SRAM_CHANNEL]);
}
if (status & AUD_INT_DN_SYNC) {
pr_warn("WARNING %s: Downstream sync error!\n", dev->name);
cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
return;
}
/* risc1 downstream */
if (status & AUD_INT_DN_RISCI1) {
atomic_set(&chip->count, cx_read(AUD_A_GPCNT));
snd_pcm_period_elapsed(chip->substream);
}
}
/*
* BOARD Specific: Handles IRQ calls
*/
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
struct cx25821_audio_dev *chip = dev_id;
struct cx25821_dev *dev = chip->dev;
u32 status, pci_status;
u32 audint_status, audint_mask;
int loop, handled = 0;
audint_status = cx_read(AUD_A_INT_STAT);
audint_mask = cx_read(AUD_A_INT_MSK);
status = cx_read(PCI_INT_STAT);
for (loop = 0; loop < 1; loop++) {
status = cx_read(PCI_INT_STAT);
if (0 == status) {
status = cx_read(PCI_INT_STAT);
audint_status = cx_read(AUD_A_INT_STAT);
audint_mask = cx_read(AUD_A_INT_MSK);
if (status) {
handled = 1;
cx_write(PCI_INT_STAT, status);
cx25821_aud_irq(chip, audint_status,
audint_mask);
break;
} else {
goto out;
}
}
handled = 1;
cx_write(PCI_INT_STAT, status);
cx25821_aud_irq(chip, audint_status, audint_mask);
}
pci_status = cx_read(PCI_INT_STAT);
if (handled)
cx_write(PCI_INT_STAT, pci_status);
out:
return IRQ_RETVAL(handled);
}
static int dsp_buffer_free(struct cx25821_audio_dev *chip)
{
struct cx25821_riscmem *risc = &chip->buf->risc;
BUG_ON(!chip->dma_size);
dprintk(2, "Freeing buffer\n");
cx25821_alsa_dma_unmap(chip);
cx25821_alsa_dma_free(chip->buf);
dma_free_coherent(&chip->pci->dev, risc->size, risc->cpu, risc->dma);
kfree(chip->buf);
chip->buf = NULL;
chip->dma_size = 0;
return 0;
}
/****************************************************************************
ALSA PCM Interface
****************************************************************************/
/*
* Digital hardware definition
*/
#define DEFAULT_FIFO_SIZE 384
static const struct snd_pcm_hardware snd_cx25821_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
/* Analog audio output will be full of clicks and pops if there
are not exactly four lines in the SRAM FIFO buffer. */
.period_bytes_min = DEFAULT_FIFO_SIZE / 3,
.period_bytes_max = DEFAULT_FIFO_SIZE / 3,
.periods_min = 1,
.periods_max = AUDIO_LINE_SIZE,
/* 128 * 128 = 16384 = 1024 * 16 */
.buffer_bytes_max = (AUDIO_LINE_SIZE * AUDIO_LINE_SIZE),
};
/*
* audio pcm capture open callback
*/
static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream)
{
struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
unsigned int bpl = 0;
if (!chip) {
pr_err("DEBUG: cx25821 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
err = snd_pcm_hw_constraint_pow2(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
goto _error;
chip->substream = substream;
runtime->hw = snd_cx25821_digital_hw;
if (cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size !=
DEFAULT_FIFO_SIZE) {
/* since there are 3 audio Clusters */
bpl = cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 3;
bpl &= ~7; /* must be multiple of 8 */
if (bpl > AUDIO_LINE_SIZE)
bpl = AUDIO_LINE_SIZE;
runtime->hw.period_bytes_min = bpl;
runtime->hw.period_bytes_max = bpl;
}
return 0;
_error:
dprintk(1, "Error opening PCM!\n");
return err;
}
/*
* audio close callback
*/
static int snd_cx25821_close(struct snd_pcm_substream *substream)
{
return 0;
}
/*
* hw_params callback
*/
static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct cx25821_audio_buffer *buf;
int ret;
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
substream->runtime->dma_area = NULL;
}
chip->period_size = params_period_bytes(hw_params);
chip->num_periods = params_periods(hw_params);
chip->dma_size = chip->period_size * params_periods(hw_params);
BUG_ON(!chip->dma_size);
BUG_ON(chip->num_periods & (chip->num_periods - 1));
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (NULL == buf)
return -ENOMEM;
if (chip->period_size > AUDIO_LINE_SIZE)
chip->period_size = AUDIO_LINE_SIZE;
buf->bpl = chip->period_size;
chip->buf = buf;
ret = cx25821_alsa_dma_init(chip,
(PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
ret = cx25821_alsa_dma_map(chip);
if (ret < 0)
goto error;
ret = cx25821_risc_databuffer_audio(chip->pci, &buf->risc, buf->sglist,
chip->period_size, chip->num_periods, 1);
if (ret < 0) {
pr_info("DEBUG: ERROR after cx25821_risc_databuffer_audio()\n");
goto error;
}
/* Loop back to start of program */
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
substream->runtime->dma_area = chip->buf->vaddr;
substream->runtime->dma_bytes = chip->dma_size;
substream->runtime->dma_addr = 0;
return 0;
error:
chip->buf = NULL;
kfree(buf);
return ret;
}
/*
* hw free callback
*/
static int snd_cx25821_hw_free(struct snd_pcm_substream *substream)
{
struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
substream->runtime->dma_area = NULL;
}
return 0;
}
/*
* prepare callback
*/
static int snd_cx25821_prepare(struct snd_pcm_substream *substream)
{
return 0;
}
/*
* trigger callback
*/
static int snd_cx25821_card_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
int err = 0;
/* Local interrupts are already disabled by ALSA */
spin_lock(&chip->reg_lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
err = _cx25821_start_audio_dma(chip);
break;
case SNDRV_PCM_TRIGGER_STOP:
err = _cx25821_stop_audio_dma(chip);
break;
default:
err = -EINVAL;
break;
}
spin_unlock(&chip->reg_lock);
return err;
}
/*
* pointer callback
*/
static snd_pcm_uframes_t snd_cx25821_pointer(struct snd_pcm_substream
*substream)
{
struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
u16 count;
count = atomic_read(&chip->count);
return runtime->period_size * (count & (runtime->periods - 1));
}
/*
* page callback (needed for mmap)
*/
static struct page *snd_cx25821_page(struct snd_pcm_substream *substream,
unsigned long offset)
{
void *pageptr = substream->runtime->dma_area + offset;
return vmalloc_to_page(pageptr);
}
/*
* operators
*/
static const struct snd_pcm_ops snd_cx25821_pcm_ops = {
.open = snd_cx25821_pcm_open,
.close = snd_cx25821_close,
.hw_params = snd_cx25821_hw_params,
.hw_free = snd_cx25821_hw_free,
.prepare = snd_cx25821_prepare,
.trigger = snd_cx25821_card_trigger,
.pointer = snd_cx25821_pointer,
.page = snd_cx25821_page,
};
/*
* ALSA create a PCM device: Called when initializing the board.
* Sets up the name and hooks up the callbacks
*/
static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
char *name)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
if (err < 0) {
pr_info("ERROR: FAILED snd_pcm_new() in %s\n", __func__);
return err;
}
pcm->private_data = chip;
pcm->info_flags = 0;
strscpy(pcm->name, name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx25821_pcm_ops);
return 0;
}
/****************************************************************************
Basic Flow for Sound Devices
****************************************************************************/
/*
* PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
static const struct pci_device_id __maybe_unused cx25821_audio_pci_tbl[] = {
{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, cx25821_audio_pci_tbl);
/*
* Alsa Constructor - Component probe
*/
static int cx25821_audio_initdev(struct cx25821_dev *dev)
{
struct snd_card *card;
struct cx25821_audio_dev *chip;
int err;
if (devno >= SNDRV_CARDS) {
pr_info("DEBUG ERROR: devno >= SNDRV_CARDS %s\n", __func__);
return -ENODEV;
}
if (!enable[devno]) {
++devno;
pr_info("DEBUG ERROR: !enable[devno] %s\n", __func__);
return -ENOENT;
}
err = snd_card_new(&dev->pci->dev, index[devno], id[devno],
THIS_MODULE,
sizeof(struct cx25821_audio_dev), &card);
if (err < 0) {
pr_info("DEBUG ERROR: cannot create snd_card_new in %s\n",
__func__);
return err;
}
strscpy(card->driver, "cx25821", sizeof(card->driver));
/* Card "creation" */
chip = card->private_data;
spin_lock_init(&chip->reg_lock);
chip->dev = dev;
chip->card = card;
chip->pci = dev->pci;
chip->iobase = pci_resource_start(dev->pci, 0);
chip->irq = dev->pci->irq;
err = devm_request_irq(&dev->pci->dev, dev->pci->irq, cx25821_irq,
IRQF_SHARED, chip->dev->name, chip);
if (err < 0) {
pr_err("ERROR %s: can't get IRQ %d for ALSA\n", chip->dev->name,
dev->pci->irq);
goto error;
}
err = snd_cx25821_pcm(chip, 0, "cx25821 Digital");
if (err < 0) {
pr_info("DEBUG ERROR: cannot create snd_cx25821_pcm %s\n",
__func__);
goto error;
}
strscpy(card->shortname, "cx25821", sizeof(card->shortname));
sprintf(card->longname, "%s at 0x%lx irq %d", chip->dev->name,
chip->iobase, chip->irq);
strscpy(card->mixername, "CX25821", sizeof(card->mixername));
pr_info("%s/%i: ALSA support for cx25821 boards\n", card->driver,
devno);
err = snd_card_register(card);
if (err < 0) {
pr_info("DEBUG ERROR: cannot register sound card %s\n",
__func__);
goto error;
}
dev->card = card;
devno++;
return 0;
error:
snd_card_free(card);
return err;
}
/****************************************************************************
LINUX MODULE INIT
****************************************************************************/
static int cx25821_alsa_exit_callback(struct device *dev, void *data)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct cx25821_dev *cxdev = get_cx25821(v4l2_dev);
snd_card_free(cxdev->card);
return 0;
}
static void cx25821_audio_fini(void)
{
struct device_driver *drv = driver_find("cx25821", &pci_bus_type);
int ret;
ret = driver_for_each_device(drv, NULL, NULL, cx25821_alsa_exit_callback);
if (ret)
pr_err("%s failed to find a cx25821 driver.\n", __func__);
}
static int cx25821_alsa_init_callback(struct device *dev, void *data)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct cx25821_dev *cxdev = get_cx25821(v4l2_dev);
cx25821_audio_initdev(cxdev);
return 0;
}
/*
* Module initializer
*
* Loops through present saa7134 cards, and assigns an ALSA device
* to each one
*
*/
static int cx25821_alsa_init(void)
{
struct device_driver *drv = driver_find("cx25821", &pci_bus_type);
return driver_for_each_device(drv, NULL, NULL, cx25821_alsa_init_callback);
}
late_initcall(cx25821_alsa_init);
module_exit(cx25821_audio_fini);
| linux-master | drivers/media/pci/cx25821/cx25821-alsa.c |
// SPDX-License-Identifier: GPL-2.0
/* Author: Dan Scally <[email protected]> */
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/mei_cl_bus.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <media/ipu-bridge.h>
#include <media/v4l2-fwnode.h>
/*
* 92335fcf-3203-4472-af93-7b4453ac29da
*
* Used to build MEI CSI device name to lookup MEI CSI device by
* device_find_child_by_name().
*/
#define MEI_CSI_UUID \
UUID_LE(0x92335FCF, 0x3203, 0x4472, \
0xAF, 0x93, 0x7B, 0x44, 0x53, 0xAC, 0x29, 0xDA)
/*
* IVSC device name
*
* Used to match IVSC device by ipu_bridge_match_ivsc_dev()
*/
#define IVSC_DEV_NAME "intel_vsc"
/*
* Extend this array with ACPI Hardware IDs of devices known to be working
* plus the number of link-frequencies expected by their drivers, along with
* the frequency values in hertz. This is somewhat opportunistic way of adding
* support for this for now in the hopes of a better source for the information
* (possibly some encoded value in the SSDB buffer that we're unaware of)
* becoming apparent in the future.
*
* Do not add an entry for a sensor that is not actually supported.
*/
static const struct ipu_sensor_config ipu_supported_sensors[] = {
/* Omnivision OV5693 */
IPU_SENSOR_CONFIG("INT33BE", 1, 419200000),
/* Omnivision OV8865 */
IPU_SENSOR_CONFIG("INT347A", 1, 360000000),
/* Omnivision OV7251 */
IPU_SENSOR_CONFIG("INT347E", 1, 319200000),
/* Omnivision OV2680 */
IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000),
/* Omnivision ov8856 */
IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
/* Omnivision ov2740 */
IPU_SENSOR_CONFIG("INT3474", 1, 360000000),
/* Hynix hi556 */
IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
/* Omnivision ov13b10 */
IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
/* GalaxyCore GC0310 */
IPU_SENSOR_CONFIG("INT0310", 0),
};
static const struct ipu_property_names prop_names = {
.clock_frequency = "clock-frequency",
.rotation = "rotation",
.orientation = "orientation",
.bus_type = "bus-type",
.data_lanes = "data-lanes",
.remote_endpoint = "remote-endpoint",
.link_frequencies = "link-frequencies",
};
static const char * const ipu_vcm_types[] = {
"ad5823",
"dw9714",
"ad5816",
"dw9719",
"dw9718",
"dw9806b",
"wv517s",
"lc898122xa",
"lc898212axb",
};
/*
* Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
* instead of device and driver match to probe IVSC device.
*/
static const struct acpi_device_id ivsc_acpi_ids[] = {
{ "INTC1059" },
{ "INTC1095" },
{ "INTC100A" },
{ "INTC10CF" },
};
static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
{
acpi_handle handle = acpi_device_handle(adev);
struct acpi_device *consumer, *ivsc_adev;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
/* camera sensor depends on IVSC in DSDT if exist */
for_each_acpi_consumer_dev(ivsc_adev, consumer)
if (consumer->handle == handle)
return ivsc_adev;
}
return NULL;
}
static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
{
if (ACPI_COMPANION(dev) != adev)
return 0;
if (!sysfs_streq(dev_name(dev), IVSC_DEV_NAME))
return 0;
return 1;
}
static struct device *ipu_bridge_get_ivsc_csi_dev(struct acpi_device *adev)
{
struct device *dev, *csi_dev;
uuid_le uuid = MEI_CSI_UUID;
char name[64];
/* IVSC device on platform bus */
dev = bus_find_device(&platform_bus_type, NULL, adev,
ipu_bridge_match_ivsc_dev);
if (dev) {
snprintf(name, sizeof(name), "%s-%pUl", dev_name(dev), &uuid);
csi_dev = device_find_child_by_name(dev, name);
put_device(dev);
return csi_dev;
}
return NULL;
}
static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
struct acpi_device *sensor_adev)
{
struct acpi_device *adev;
struct device *csi_dev;
adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
if (adev) {
csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
if (!csi_dev) {
acpi_dev_put(adev);
dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
return -ENODEV;
}
sensor->csi_dev = csi_dev;
sensor->ivsc_adev = adev;
}
return 0;
}
static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
void *data, u32 size)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
obj = buffer.pointer;
if (!obj) {
dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
return -ENODEV;
}
if (obj->type != ACPI_TYPE_BUFFER) {
dev_err(&adev->dev, "Not an ACPI buffer\n");
ret = -ENODEV;
goto out_free_buff;
}
if (obj->buffer.length > size) {
dev_err(&adev->dev, "Given buffer is too small\n");
ret = -EINVAL;
goto out_free_buff;
}
memcpy(data, obj->buffer.pointer, obj->buffer.length);
out_free_buff:
kfree(buffer.pointer);
return ret;
}
static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
struct ipu_sensor_ssdb *ssdb)
{
switch (ssdb->degree) {
case IPU_SENSOR_ROTATION_NORMAL:
return 0;
case IPU_SENSOR_ROTATION_INVERTED:
return 180;
default:
dev_warn(&adev->dev,
"Unknown rotation %d. Assume 0 degree rotation\n",
ssdb->degree);
return 0;
}
}
static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
{
enum v4l2_fwnode_orientation orientation;
struct acpi_pld_info *pld;
acpi_status status;
status = acpi_get_physical_device_location(adev->handle, &pld);
if (ACPI_FAILURE(status)) {
dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
return V4L2_FWNODE_ORIENTATION_EXTERNAL;
}
switch (pld->panel) {
case ACPI_PLD_PANEL_FRONT:
orientation = V4L2_FWNODE_ORIENTATION_FRONT;
break;
case ACPI_PLD_PANEL_BACK:
orientation = V4L2_FWNODE_ORIENTATION_BACK;
break;
case ACPI_PLD_PANEL_TOP:
case ACPI_PLD_PANEL_LEFT:
case ACPI_PLD_PANEL_RIGHT:
case ACPI_PLD_PANEL_UNKNOWN:
orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
break;
default:
dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
break;
}
ACPI_FREE(pld);
return orientation;
}
int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
{
struct ipu_sensor_ssdb ssdb = {};
int ret;
ret = ipu_bridge_read_acpi_buffer(adev, "SSDB", &ssdb, sizeof(ssdb));
if (ret)
return ret;
if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
ssdb.vcmtype = 0;
}
if (ssdb.lanes > IPU_MAX_LANES) {
dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
return -EINVAL;
}
sensor->link = ssdb.link;
sensor->lanes = ssdb.lanes;
sensor->mclkspeed = ssdb.mclkspeed;
sensor->rotation = ipu_bridge_parse_rotation(adev, &ssdb);
sensor->orientation = ipu_bridge_parse_orientation(adev);
if (ssdb.vcmtype)
sensor->vcm_type = ipu_vcm_types[ssdb.vcmtype - 1];
return 0;
}
EXPORT_SYMBOL_NS_GPL(ipu_bridge_parse_ssdb, INTEL_IPU_BRIDGE);
static void ipu_bridge_create_fwnode_properties(
struct ipu_sensor *sensor,
struct ipu_bridge *bridge,
const struct ipu_sensor_config *cfg)
{
struct ipu_property_names *names = &sensor->prop_names;
struct software_node *nodes = sensor->swnodes;
sensor->prop_names = prop_names;
if (sensor->csi_dev) {
sensor->local_ref[0] =
SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_SENSOR_ENDPOINT]);
sensor->remote_ref[0] =
SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_IPU_ENDPOINT]);
sensor->ivsc_sensor_ref[0] =
SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]);
sensor->ivsc_ipu_ref[0] =
SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]);
sensor->ivsc_sensor_ep_properties[0] =
PROPERTY_ENTRY_U32(names->bus_type,
V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
sensor->ivsc_sensor_ep_properties[1] =
PROPERTY_ENTRY_U32_ARRAY_LEN(names->data_lanes,
bridge->data_lanes,
sensor->lanes);
sensor->ivsc_sensor_ep_properties[2] =
PROPERTY_ENTRY_REF_ARRAY(names->remote_endpoint,
sensor->ivsc_sensor_ref);
sensor->ivsc_ipu_ep_properties[0] =
PROPERTY_ENTRY_U32(names->bus_type,
V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
sensor->ivsc_ipu_ep_properties[1] =
PROPERTY_ENTRY_U32_ARRAY_LEN(names->data_lanes,
bridge->data_lanes,
sensor->lanes);
sensor->ivsc_ipu_ep_properties[2] =
PROPERTY_ENTRY_REF_ARRAY(names->remote_endpoint,
sensor->ivsc_ipu_ref);
} else {
sensor->local_ref[0] =
SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]);
sensor->remote_ref[0] =
SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]);
}
sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.clock_frequency,
sensor->mclkspeed);
sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
sensor->prop_names.rotation,
sensor->rotation);
sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
sensor->prop_names.orientation,
sensor->orientation);
if (sensor->vcm_type) {
sensor->vcm_ref[0] =
SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
sensor->dev_properties[3] =
PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
}
sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.bus_type,
V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
sensor->prop_names.data_lanes,
bridge->data_lanes, sensor->lanes);
sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
sensor->prop_names.remote_endpoint,
sensor->local_ref);
if (cfg->nr_link_freqs > 0)
sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
sensor->prop_names.link_frequencies,
cfg->link_freqs,
cfg->nr_link_freqs);
sensor->ipu_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
sensor->prop_names.data_lanes,
bridge->data_lanes, sensor->lanes);
sensor->ipu_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
sensor->prop_names.remote_endpoint,
sensor->remote_ref);
}
static void ipu_bridge_init_swnode_names(struct ipu_sensor *sensor)
{
snprintf(sensor->node_names.remote_port,
sizeof(sensor->node_names.remote_port),
SWNODE_GRAPH_PORT_NAME_FMT, sensor->link);
snprintf(sensor->node_names.port,
sizeof(sensor->node_names.port),
SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
snprintf(sensor->node_names.endpoint,
sizeof(sensor->node_names.endpoint),
SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
if (sensor->vcm_type) {
/* append link to distinguish nodes with same model VCM */
snprintf(sensor->node_names.vcm, sizeof(sensor->node_names.vcm),
"%s-%u", sensor->vcm_type, sensor->link);
}
if (sensor->csi_dev) {
snprintf(sensor->node_names.ivsc_sensor_port,
sizeof(sensor->node_names.ivsc_sensor_port),
SWNODE_GRAPH_PORT_NAME_FMT, 0);
snprintf(sensor->node_names.ivsc_ipu_port,
sizeof(sensor->node_names.ivsc_ipu_port),
SWNODE_GRAPH_PORT_NAME_FMT, 1);
}
}
static void ipu_bridge_init_swnode_group(struct ipu_sensor *sensor)
{
struct software_node *nodes = sensor->swnodes;
sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
sensor->group[SWNODE_IPU_PORT] = &nodes[SWNODE_IPU_PORT];
sensor->group[SWNODE_IPU_ENDPOINT] = &nodes[SWNODE_IPU_ENDPOINT];
if (sensor->vcm_type)
sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
if (sensor->csi_dev) {
sensor->group[SWNODE_IVSC_HID] =
&nodes[SWNODE_IVSC_HID];
sensor->group[SWNODE_IVSC_SENSOR_PORT] =
&nodes[SWNODE_IVSC_SENSOR_PORT];
sensor->group[SWNODE_IVSC_SENSOR_ENDPOINT] =
&nodes[SWNODE_IVSC_SENSOR_ENDPOINT];
sensor->group[SWNODE_IVSC_IPU_PORT] =
&nodes[SWNODE_IVSC_IPU_PORT];
sensor->group[SWNODE_IVSC_IPU_ENDPOINT] =
&nodes[SWNODE_IVSC_IPU_ENDPOINT];
if (sensor->vcm_type)
sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
} else {
if (sensor->vcm_type)
sensor->group[SWNODE_IVSC_HID] = &nodes[SWNODE_VCM];
}
}
static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
struct ipu_sensor *sensor)
{
struct ipu_node_names *names = &sensor->node_names;
struct software_node *nodes = sensor->swnodes;
ipu_bridge_init_swnode_names(sensor);
nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
sensor->dev_properties);
nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
&nodes[SWNODE_SENSOR_HID]);
nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
sensor->node_names.endpoint,
&nodes[SWNODE_SENSOR_PORT],
sensor->ep_properties);
nodes[SWNODE_IPU_PORT] = NODE_PORT(sensor->node_names.remote_port,
&bridge->ipu_hid_node);
nodes[SWNODE_IPU_ENDPOINT] = NODE_ENDPOINT(
sensor->node_names.endpoint,
&nodes[SWNODE_IPU_PORT],
sensor->ipu_properties);
if (sensor->csi_dev) {
snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
acpi_device_hid(sensor->ivsc_adev), sensor->link);
nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
sensor->ivsc_properties);
nodes[SWNODE_IVSC_SENSOR_PORT] =
NODE_PORT(names->ivsc_sensor_port,
&nodes[SWNODE_IVSC_HID]);
nodes[SWNODE_IVSC_SENSOR_ENDPOINT] =
NODE_ENDPOINT(names->endpoint,
&nodes[SWNODE_IVSC_SENSOR_PORT],
sensor->ivsc_sensor_ep_properties);
nodes[SWNODE_IVSC_IPU_PORT] =
NODE_PORT(names->ivsc_ipu_port,
&nodes[SWNODE_IVSC_HID]);
nodes[SWNODE_IVSC_IPU_ENDPOINT] =
NODE_ENDPOINT(names->endpoint,
&nodes[SWNODE_IVSC_IPU_PORT],
sensor->ivsc_ipu_ep_properties);
}
nodes[SWNODE_VCM] = NODE_VCM(sensor->node_names.vcm);
ipu_bridge_init_swnode_group(sensor);
}
/*
* The actual instantiation must be done from a workqueue to avoid
* a deadlock on taking list_lock from v4l2-async twice.
*/
struct ipu_bridge_instantiate_vcm_work_data {
struct work_struct work;
struct device *sensor;
char name[16];
struct i2c_board_info board_info;
};
static void ipu_bridge_instantiate_vcm_work(struct work_struct *work)
{
struct ipu_bridge_instantiate_vcm_work_data *data =
container_of(work, struct ipu_bridge_instantiate_vcm_work_data,
work);
struct acpi_device *adev = ACPI_COMPANION(data->sensor);
struct i2c_client *vcm_client;
bool put_fwnode = true;
int ret;
/*
* The client may get probed before the device_link gets added below
* make sure the sensor is powered-up during probe.
*/
ret = pm_runtime_get_sync(data->sensor);
if (ret < 0) {
dev_err(data->sensor, "Error %d runtime-resuming sensor, cannot instantiate VCM\n",
ret);
goto out_pm_put;
}
/*
* Note the client is created only once and then kept around
* even after a rmmod, just like the software-nodes.
*/
vcm_client = i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(adev),
1, &data->board_info);
if (IS_ERR(vcm_client)) {
dev_err(data->sensor, "Error instantiating VCM client: %ld\n",
PTR_ERR(vcm_client));
goto out_pm_put;
}
device_link_add(&vcm_client->dev, data->sensor, DL_FLAG_PM_RUNTIME);
dev_info(data->sensor, "Instantiated %s VCM\n", data->board_info.type);
put_fwnode = false; /* Ownership has passed to the i2c-client */
out_pm_put:
pm_runtime_put(data->sensor);
put_device(data->sensor);
if (put_fwnode)
fwnode_handle_put(data->board_info.fwnode);
kfree(data);
}
int ipu_bridge_instantiate_vcm(struct device *sensor)
{
struct ipu_bridge_instantiate_vcm_work_data *data;
struct fwnode_handle *vcm_fwnode;
struct i2c_client *vcm_client;
struct acpi_device *adev;
char *sep;
adev = ACPI_COMPANION(sensor);
if (!adev)
return 0;
vcm_fwnode = fwnode_find_reference(dev_fwnode(sensor), "lens-focus", 0);
if (IS_ERR(vcm_fwnode))
return 0;
/* When reloading modules the client will already exist */
vcm_client = i2c_find_device_by_fwnode(vcm_fwnode);
if (vcm_client) {
fwnode_handle_put(vcm_fwnode);
put_device(&vcm_client->dev);
return 0;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
fwnode_handle_put(vcm_fwnode);
return -ENOMEM;
}
INIT_WORK(&data->work, ipu_bridge_instantiate_vcm_work);
data->sensor = get_device(sensor);
snprintf(data->name, sizeof(data->name), "%s-VCM",
acpi_dev_name(adev));
data->board_info.dev_name = data->name;
data->board_info.fwnode = vcm_fwnode;
snprintf(data->board_info.type, sizeof(data->board_info.type),
"%pfwP", vcm_fwnode);
/* Strip "-<link>" postfix */
sep = strchrnul(data->board_info.type, '-');
*sep = 0;
queue_work(system_long_wq, &data->work);
return 0;
}
EXPORT_SYMBOL_NS_GPL(ipu_bridge_instantiate_vcm, INTEL_IPU_BRIDGE);
static int ipu_bridge_instantiate_ivsc(struct ipu_sensor *sensor)
{
struct fwnode_handle *fwnode;
if (!sensor->csi_dev)
return 0;
fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_IVSC_HID]);
if (!fwnode)
return -ENODEV;
set_secondary_fwnode(sensor->csi_dev, fwnode);
return 0;
}
static void ipu_bridge_unregister_sensors(struct ipu_bridge *bridge)
{
struct ipu_sensor *sensor;
unsigned int i;
for (i = 0; i < bridge->n_sensors; i++) {
sensor = &bridge->sensors[i];
software_node_unregister_node_group(sensor->group);
acpi_dev_put(sensor->adev);
put_device(sensor->csi_dev);
acpi_dev_put(sensor->ivsc_adev);
}
}
static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
struct ipu_bridge *bridge)
{
struct fwnode_handle *fwnode, *primary;
struct ipu_sensor *sensor;
struct acpi_device *adev;
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
if (!adev->status.enabled)
continue;
if (bridge->n_sensors >= IPU_MAX_PORTS) {
acpi_dev_put(adev);
dev_err(bridge->dev, "Exceeded available IPU ports\n");
return -EINVAL;
}
sensor = &bridge->sensors[bridge->n_sensors];
ret = bridge->parse_sensor_fwnode(adev, sensor);
if (ret)
goto err_put_adev;
snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
cfg->hid, sensor->link);
ret = ipu_bridge_check_ivsc_dev(sensor, adev);
if (ret)
goto err_put_adev;
ipu_bridge_create_fwnode_properties(sensor, bridge, cfg);
ipu_bridge_create_connection_swnodes(bridge, sensor);
ret = software_node_register_node_group(sensor->group);
if (ret)
goto err_put_ivsc;
fwnode = software_node_fwnode(&sensor->swnodes[
SWNODE_SENSOR_HID]);
if (!fwnode) {
ret = -ENODEV;
goto err_free_swnodes;
}
sensor->adev = acpi_dev_get(adev);
primary = acpi_fwnode_handle(adev);
primary->secondary = fwnode;
ret = ipu_bridge_instantiate_ivsc(sensor);
if (ret)
goto err_free_swnodes;
dev_info(bridge->dev, "Found supported sensor %s\n",
acpi_dev_name(adev));
bridge->n_sensors++;
}
return 0;
err_free_swnodes:
software_node_unregister_node_group(sensor->group);
err_put_ivsc:
put_device(sensor->csi_dev);
acpi_dev_put(sensor->ivsc_adev);
err_put_adev:
acpi_dev_put(adev);
return ret;
}
static int ipu_bridge_connect_sensors(struct ipu_bridge *bridge)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
const struct ipu_sensor_config *cfg =
&ipu_supported_sensors[i];
ret = ipu_bridge_connect_sensor(cfg, bridge);
if (ret)
goto err_unregister_sensors;
}
return 0;
err_unregister_sensors:
ipu_bridge_unregister_sensors(bridge);
return ret;
}
static int ipu_bridge_ivsc_is_ready(void)
{
struct acpi_device *sensor_adev, *adev;
struct device *csi_dev;
bool ready = true;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
const struct ipu_sensor_config *cfg =
&ipu_supported_sensors[i];
for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
if (!sensor_adev->status.enabled)
continue;
adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
if (!adev)
continue;
csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
if (!csi_dev)
ready = false;
put_device(csi_dev);
acpi_dev_put(adev);
}
}
return ready;
}
int ipu_bridge_init(struct device *dev,
ipu_parse_sensor_fwnode_t parse_sensor_fwnode)
{
struct fwnode_handle *fwnode;
struct ipu_bridge *bridge;
unsigned int i;
int ret;
if (!ipu_bridge_ivsc_is_ready())
return -EPROBE_DEFER;
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
strscpy(bridge->ipu_node_name, IPU_HID,
sizeof(bridge->ipu_node_name));
bridge->ipu_hid_node.name = bridge->ipu_node_name;
bridge->dev = dev;
bridge->parse_sensor_fwnode = parse_sensor_fwnode;
ret = software_node_register(&bridge->ipu_hid_node);
if (ret < 0) {
dev_err(dev, "Failed to register the IPU HID node\n");
goto err_free_bridge;
}
/*
* Map the lane arrangement, which is fixed for the IPU3 (meaning we
* only need one, rather than one per sensor). We include it as a
* member of the struct ipu_bridge rather than a global variable so
* that it survives if the module is unloaded along with the rest of
* the struct.
*/
for (i = 0; i < IPU_MAX_LANES; i++)
bridge->data_lanes[i] = i + 1;
ret = ipu_bridge_connect_sensors(bridge);
if (ret || bridge->n_sensors == 0)
goto err_unregister_ipu;
dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
fwnode = software_node_fwnode(&bridge->ipu_hid_node);
if (!fwnode) {
dev_err(dev, "Error getting fwnode from ipu software_node\n");
ret = -ENODEV;
goto err_unregister_sensors;
}
set_secondary_fwnode(dev, fwnode);
return 0;
err_unregister_sensors:
ipu_bridge_unregister_sensors(bridge);
err_unregister_ipu:
software_node_unregister(&bridge->ipu_hid_node);
err_free_bridge:
kfree(bridge);
return ret;
}
EXPORT_SYMBOL_NS_GPL(ipu_bridge_init, INTEL_IPU_BRIDGE);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel IPU Sensors Bridge driver");
| linux-master | drivers/media/pci/intel/ipu-bridge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Intel Corporation. All rights reserved.
* Intel Visual Sensing Controller ACE Linux driver
*/
/*
* To set ownership of camera sensor, there is specific command, which
* is sent via MEI protocol. That's a two-step scheme where the firmware
* first acks receipt of the command and later responses the command was
* executed. The command sending function uses "completion" as the
* synchronization mechanism. The notification for command is received
* via a mei callback which wakes up the caller. There can be only one
* outstanding command at a time.
*
* The power line of camera sensor is directly connected to IVSC instead
* of host, when camera sensor ownership is switched to host, sensor is
* already powered up by firmware.
*/
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mei_cl_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
#define MEI_ACE_DRIVER_NAME "ivsc_ace"
/* indicating driver message */
#define ACE_DRV_MSG 1
/* indicating set command */
#define ACE_CMD_SET 4
/* command timeout determined experimentally */
#define ACE_CMD_TIMEOUT (5 * HZ)
/* indicating the first command block */
#define ACE_CMD_INIT_BLOCK 1
/* indicating the last command block */
#define ACE_CMD_FINAL_BLOCK 1
/* size of camera status notification content */
#define ACE_CAMERA_STATUS_SIZE 5
/* UUID used to get firmware id */
#define ACE_GET_FW_ID_UUID UUID_LE(0x6167DCFB, 0x72F1, 0x4584, 0xBF, \
0xE3, 0x84, 0x17, 0x71, 0xAA, 0x79, 0x0B)
/* UUID used to get csi device */
#define MEI_CSI_UUID UUID_LE(0x92335FCF, 0x3203, 0x4472, \
0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
/* identify firmware event type */
enum ace_event_type {
/* firmware ready */
ACE_FW_READY = 0x8,
/* command response */
ACE_CMD_RESPONSE = 0x10,
};
/* identify camera sensor ownership */
enum ace_camera_owner {
ACE_CAMERA_IVSC,
ACE_CAMERA_HOST,
};
/* identify the command id supported by firmware IPC */
enum ace_cmd_id {
/* used to switch camera sensor to host */
ACE_SWITCH_CAMERA_TO_HOST = 0x13,
/* used to switch camera sensor to IVSC */
ACE_SWITCH_CAMERA_TO_IVSC = 0x14,
/* used to get firmware id */
ACE_GET_FW_ID = 0x1A,
};
/* ACE command header structure */
struct ace_cmd_hdr {
u32 firmware_id : 16;
u32 instance_id : 8;
u32 type : 5;
u32 rsp : 1;
u32 msg_tgt : 1;
u32 _hw_rsvd_1 : 1;
u32 param_size : 20;
u32 cmd_id : 8;
u32 final_block : 1;
u32 init_block : 1;
u32 _hw_rsvd_2 : 2;
} __packed;
/* ACE command parameter structure */
union ace_cmd_param {
uuid_le uuid;
u32 param;
};
/* ACE command structure */
struct ace_cmd {
struct ace_cmd_hdr hdr;
union ace_cmd_param param;
} __packed;
/* ACE notification header */
union ace_notif_hdr {
struct _confirm {
u32 status : 24;
u32 type : 5;
u32 rsp : 1;
u32 msg_tgt : 1;
u32 _hw_rsvd_1 : 1;
u32 param_size : 20;
u32 cmd_id : 8;
u32 final_block : 1;
u32 init_block : 1;
u32 _hw_rsvd_2 : 2;
} __packed ack;
struct _event {
u32 rsvd1 : 16;
u32 event_type : 8;
u32 type : 5;
u32 ack : 1;
u32 msg_tgt : 1;
u32 _hw_rsvd_1 : 1;
u32 rsvd2 : 30;
u32 _hw_rsvd_2 : 2;
} __packed event;
struct _response {
u32 event_id : 16;
u32 notif_type : 8;
u32 type : 5;
u32 rsp : 1;
u32 msg_tgt : 1;
u32 _hw_rsvd_1 : 1;
u32 event_data_size : 16;
u32 request_target : 1;
u32 request_type : 5;
u32 cmd_id : 8;
u32 _hw_rsvd_2 : 2;
} __packed response;
};
/* ACE notification content */
union ace_notif_cont {
u16 firmware_id;
u8 state_notif;
u8 camera_status[ACE_CAMERA_STATUS_SIZE];
};
/* ACE notification structure */
struct ace_notif {
union ace_notif_hdr hdr;
union ace_notif_cont cont;
} __packed;
struct mei_ace {
struct mei_cl_device *cldev;
/* command ack */
struct ace_notif cmd_ack;
/* command response */
struct ace_notif cmd_response;
/* used to wait for command ack and response */
struct completion cmd_completion;
/* lock used to prevent multiple call to send command */
struct mutex lock;
/* used to construct command */
u16 firmware_id;
struct device *csi_dev;
/* runtime PM link from ace to csi */
struct device_link *csi_link;
struct work_struct work;
};
static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
{
memset(hdr, 0, sizeof(struct ace_cmd_hdr));
hdr->type = ACE_CMD_SET;
hdr->msg_tgt = ACE_DRV_MSG;
hdr->init_block = ACE_CMD_INIT_BLOCK;
hdr->final_block = ACE_CMD_FINAL_BLOCK;
}
static int construct_command(struct mei_ace *ace, struct ace_cmd *cmd,
enum ace_cmd_id cmd_id)
{
union ace_cmd_param *param = &cmd->param;
struct ace_cmd_hdr *hdr = &cmd->hdr;
init_cmd_hdr(hdr);
hdr->cmd_id = cmd_id;
switch (cmd_id) {
case ACE_GET_FW_ID:
param->uuid = ACE_GET_FW_ID_UUID;
hdr->param_size = sizeof(param->uuid);
break;
case ACE_SWITCH_CAMERA_TO_IVSC:
param->param = 0;
hdr->firmware_id = ace->firmware_id;
hdr->param_size = sizeof(param->param);
break;
case ACE_SWITCH_CAMERA_TO_HOST:
hdr->firmware_id = ace->firmware_id;
break;
default:
return -EINVAL;
}
return hdr->param_size + sizeof(cmd->hdr);
}
/* send command to firmware */
static int mei_ace_send(struct mei_ace *ace, struct ace_cmd *cmd,
size_t len, bool only_ack)
{
union ace_notif_hdr *resp_hdr = &ace->cmd_response.hdr;
union ace_notif_hdr *ack_hdr = &ace->cmd_ack.hdr;
struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
int ret;
mutex_lock(&ace->lock);
reinit_completion(&ace->cmd_completion);
ret = mei_cldev_send(ace->cldev, (u8 *)cmd, len);
if (ret < 0)
goto out;
ret = wait_for_completion_killable_timeout(&ace->cmd_completion,
ACE_CMD_TIMEOUT);
if (ret < 0) {
goto out;
} else if (!ret) {
ret = -ETIMEDOUT;
goto out;
}
if (ack_hdr->ack.cmd_id != cmd_hdr->cmd_id) {
ret = -EINVAL;
goto out;
}
/* command ack status */
ret = ack_hdr->ack.status;
if (ret) {
ret = -EIO;
goto out;
}
if (only_ack)
goto out;
ret = wait_for_completion_killable_timeout(&ace->cmd_completion,
ACE_CMD_TIMEOUT);
if (ret < 0) {
goto out;
} else if (!ret) {
ret = -ETIMEDOUT;
goto out;
} else {
ret = 0;
}
if (resp_hdr->response.cmd_id != cmd_hdr->cmd_id)
ret = -EINVAL;
out:
mutex_unlock(&ace->lock);
return ret;
}
static int ace_set_camera_owner(struct mei_ace *ace,
enum ace_camera_owner owner)
{
enum ace_cmd_id cmd_id;
struct ace_cmd cmd;
int cmd_size;
int ret;
if (owner == ACE_CAMERA_IVSC)
cmd_id = ACE_SWITCH_CAMERA_TO_IVSC;
else
cmd_id = ACE_SWITCH_CAMERA_TO_HOST;
cmd_size = construct_command(ace, &cmd, cmd_id);
if (cmd_size >= 0)
ret = mei_ace_send(ace, &cmd, cmd_size, false);
else
ret = cmd_size;
return ret;
}
/* the first command downloaded to firmware */
static inline int ace_get_firmware_id(struct mei_ace *ace)
{
struct ace_cmd cmd;
int cmd_size;
int ret;
cmd_size = construct_command(ace, &cmd, ACE_GET_FW_ID);
if (cmd_size >= 0)
ret = mei_ace_send(ace, &cmd, cmd_size, true);
else
ret = cmd_size;
return ret;
}
static void handle_command_response(struct mei_ace *ace,
struct ace_notif *resp, int len)
{
union ace_notif_hdr *hdr = &resp->hdr;
switch (hdr->response.cmd_id) {
case ACE_SWITCH_CAMERA_TO_IVSC:
case ACE_SWITCH_CAMERA_TO_HOST:
memcpy(&ace->cmd_response, resp, len);
complete(&ace->cmd_completion);
break;
case ACE_GET_FW_ID:
break;
default:
break;
}
}
static void handle_command_ack(struct mei_ace *ace,
struct ace_notif *ack, int len)
{
union ace_notif_hdr *hdr = &ack->hdr;
switch (hdr->ack.cmd_id) {
case ACE_GET_FW_ID:
ace->firmware_id = ack->cont.firmware_id;
fallthrough;
case ACE_SWITCH_CAMERA_TO_IVSC:
case ACE_SWITCH_CAMERA_TO_HOST:
memcpy(&ace->cmd_ack, ack, len);
complete(&ace->cmd_completion);
break;
default:
break;
}
}
/* callback for receive */
static void mei_ace_rx(struct mei_cl_device *cldev)
{
struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
struct ace_notif event;
union ace_notif_hdr *hdr = &event.hdr;
int ret;
ret = mei_cldev_recv(cldev, (u8 *)&event, sizeof(event));
if (ret < 0) {
dev_err(&cldev->dev, "recv error: %d\n", ret);
return;
}
if (hdr->event.ack) {
handle_command_ack(ace, &event, ret);
return;
}
switch (hdr->event.event_type) {
case ACE_CMD_RESPONSE:
handle_command_response(ace, &event, ret);
break;
case ACE_FW_READY:
/*
* firmware ready notification sent to driver
* after HECI client connected with firmware.
*/
dev_dbg(&cldev->dev, "firmware ready\n");
break;
default:
break;
}
}
static int mei_ace_setup_dev_link(struct mei_ace *ace)
{
struct device *dev = &ace->cldev->dev;
uuid_le uuid = MEI_CSI_UUID;
struct device *csi_dev;
char name[64];
int ret;
snprintf(name, sizeof(name), "%s-%pUl", dev_name(dev->parent), &uuid);
csi_dev = device_find_child_by_name(dev->parent, name);
if (!csi_dev) {
ret = -EPROBE_DEFER;
goto err;
}
/* setup link between mei_ace and mei_csi */
ace->csi_link = device_link_add(csi_dev, dev, DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE | DL_FLAG_STATELESS);
if (!ace->csi_link) {
ret = -EINVAL;
dev_err(dev, "failed to link to %s\n", dev_name(csi_dev));
goto err_put;
}
ace->csi_dev = csi_dev;
return 0;
err_put:
put_device(csi_dev);
err:
return ret;
}
/* switch camera to host before probe sensor device */
static void mei_ace_post_probe_work(struct work_struct *work)
{
struct acpi_device *adev;
struct mei_ace *ace;
struct device *dev;
int ret;
ace = container_of(work, struct mei_ace, work);
dev = &ace->cldev->dev;
ret = ace_set_camera_owner(ace, ACE_CAMERA_HOST);
if (ret) {
dev_err(dev, "switch camera to host failed: %d\n", ret);
return;
}
adev = ACPI_COMPANION(dev->parent);
if (!adev)
return;
acpi_dev_clear_dependencies(adev);
}
static int mei_ace_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct device *dev = &cldev->dev;
struct mei_ace *ace;
int ret;
ace = devm_kzalloc(dev, sizeof(struct mei_ace), GFP_KERNEL);
if (!ace)
return -ENOMEM;
ace->cldev = cldev;
mutex_init(&ace->lock);
init_completion(&ace->cmd_completion);
INIT_WORK(&ace->work, mei_ace_post_probe_work);
mei_cldev_set_drvdata(cldev, ace);
ret = mei_cldev_enable(cldev);
if (ret < 0) {
dev_err(dev, "mei_cldev_enable failed: %d\n", ret);
goto destroy_mutex;
}
ret = mei_cldev_register_rx_cb(cldev, mei_ace_rx);
if (ret) {
dev_err(dev, "event cb registration failed: %d\n", ret);
goto err_disable;
}
ret = ace_get_firmware_id(ace);
if (ret) {
dev_err(dev, "get firmware id failed: %d\n", ret);
goto err_disable;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = mei_ace_setup_dev_link(ace);
if (ret)
goto disable_pm;
schedule_work(&ace->work);
return 0;
disable_pm:
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
err_disable:
mei_cldev_disable(cldev);
destroy_mutex:
mutex_destroy(&ace->lock);
return ret;
}
static void mei_ace_remove(struct mei_cl_device *cldev)
{
struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
cancel_work_sync(&ace->work);
device_link_del(ace->csi_link);
put_device(ace->csi_dev);
pm_runtime_disable(&cldev->dev);
pm_runtime_set_suspended(&cldev->dev);
ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
mutex_destroy(&ace->lock);
}
static int __maybe_unused mei_ace_runtime_suspend(struct device *dev)
{
struct mei_ace *ace = dev_get_drvdata(dev);
return ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
}
static int __maybe_unused mei_ace_runtime_resume(struct device *dev)
{
struct mei_ace *ace = dev_get_drvdata(dev);
return ace_set_camera_owner(ace, ACE_CAMERA_HOST);
}
static const struct dev_pm_ops mei_ace_pm_ops = {
SET_RUNTIME_PM_OPS(mei_ace_runtime_suspend,
mei_ace_runtime_resume, NULL)
};
#define MEI_ACE_UUID UUID_LE(0x5DB76CF6, 0x0A68, 0x4ED6, \
0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
static const struct mei_cl_device_id mei_ace_tbl[] = {
{ MEI_ACE_DRIVER_NAME, MEI_ACE_UUID, MEI_CL_VERSION_ANY },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
static struct mei_cl_driver mei_ace_driver = {
.id_table = mei_ace_tbl,
.name = MEI_ACE_DRIVER_NAME,
.probe = mei_ace_probe,
.remove = mei_ace_remove,
.driver = {
.pm = &mei_ace_pm_ops,
},
};
module_mei_cl_driver(mei_ace_driver);
MODULE_AUTHOR("Wentong Wu <[email protected]>");
MODULE_AUTHOR("Zhifeng Wang <[email protected]>");
MODULE_DESCRIPTION("Device driver for IVSC ACE");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/intel/ivsc/mei_ace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Intel Corporation. All rights reserved.
* Intel Visual Sensing Controller CSI Linux driver
*/
/*
* To set ownership of CSI-2 link and to configure CSI-2 link, there
* are specific commands, which are sent via MEI protocol. The send
* command function uses "completion" as a synchronization mechanism.
* The response for command is received via a mei callback which wakes
* up the caller. There can be only one outstanding command at a time.
*/
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/mei_cl_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/units.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#define MEI_CSI_DRIVER_NAME "ivsc_csi"
#define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
#define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
/* the 5s used here is based on experiment */
#define CSI_CMD_TIMEOUT (5 * HZ)
/* to setup CSI-2 link an extra delay needed and determined experimentally */
#define CSI_FW_READY_DELAY_MS 100
/* link frequency unit is 100kHz */
#define CSI_LINK_FREQ(x) ((u32)(div_u64(x, 100 * HZ_PER_KHZ)))
/*
* identify the command id supported by firmware
* IPC, as well as the privacy notification id
* used when processing privacy event.
*/
enum csi_cmd_id {
/* used to set csi ownership */
CSI_SET_OWNER = 0,
/* used to configure CSI-2 link */
CSI_SET_CONF = 2,
/* privacy notification id used when privacy state changes */
CSI_PRIVACY_NOTIF = 6,
};
/* CSI-2 link ownership definition */
enum csi_link_owner {
CSI_LINK_IVSC,
CSI_LINK_HOST,
};
/* privacy status definition */
enum ivsc_privacy_status {
CSI_PRIVACY_OFF,
CSI_PRIVACY_ON,
CSI_PRIVACY_MAX,
};
enum csi_pads {
CSI_PAD_SOURCE,
CSI_PAD_SINK,
CSI_NUM_PADS
};
/* configuration of the CSI-2 link between host and IVSC */
struct csi_link_cfg {
/* number of data lanes used on the CSI-2 link */
u32 nr_of_lanes;
/* frequency of the CSI-2 link */
u32 link_freq;
/* for future use */
u32 rsvd[2];
} __packed;
/* CSI command structure */
struct csi_cmd {
u32 cmd_id;
union _cmd_param {
u32 param;
struct csi_link_cfg conf;
} param;
} __packed;
/* CSI notification structure */
struct csi_notif {
u32 cmd_id;
int status;
union _resp_cont {
u32 cont;
struct csi_link_cfg conf;
} cont;
} __packed;
struct mei_csi {
struct mei_cl_device *cldev;
/* command response */
struct csi_notif cmd_response;
/* used to wait for command response from firmware */
struct completion cmd_completion;
/* protect command download */
struct mutex lock;
struct v4l2_subdev subdev;
struct v4l2_subdev *remote;
struct v4l2_async_notifier notifier;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *freq_ctrl;
struct v4l2_ctrl *privacy_ctrl;
unsigned int remote_pad;
/* start streaming or not */
int streaming;
struct media_pad pads[CSI_NUM_PADS];
struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
/* number of data lanes used on the CSI-2 link */
u32 nr_of_lanes;
/* frequency of the CSI-2 link */
u64 link_freq;
/* privacy status */
enum ivsc_privacy_status status;
};
static const struct v4l2_mbus_framefmt mei_csi_format_mbus_default = {
.width = 1,
.height = 1,
.code = MEDIA_BUS_FMT_Y8_1X8,
.field = V4L2_FIELD_NONE,
};
static s64 link_freq_menu_items[] = {
MEI_CSI_LINK_FREQ_400MHZ
};
static inline struct mei_csi *notifier_to_csi(struct v4l2_async_notifier *n)
{
return container_of(n, struct mei_csi, notifier);
}
static inline struct mei_csi *sd_to_csi(struct v4l2_subdev *sd)
{
return container_of(sd, struct mei_csi, subdev);
}
static inline struct mei_csi *ctrl_to_csi(struct v4l2_ctrl *ctrl)
{
return container_of(ctrl->handler, struct mei_csi, ctrl_handler);
}
/* send a command to firmware and mutex must be held by caller */
static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len)
{
struct csi_cmd *cmd = (struct csi_cmd *)buf;
int ret;
reinit_completion(&csi->cmd_completion);
ret = mei_cldev_send(csi->cldev, buf, len);
if (ret < 0)
goto out;
ret = wait_for_completion_killable_timeout(&csi->cmd_completion,
CSI_CMD_TIMEOUT);
if (ret < 0) {
goto out;
} else if (!ret) {
ret = -ETIMEDOUT;
goto out;
}
/* command response status */
ret = csi->cmd_response.status;
if (ret) {
ret = -EINVAL;
goto out;
}
if (csi->cmd_response.cmd_id != cmd->cmd_id)
ret = -EINVAL;
out:
return ret;
}
/* set CSI-2 link ownership */
static int csi_set_link_owner(struct mei_csi *csi, enum csi_link_owner owner)
{
struct csi_cmd cmd = { 0 };
size_t cmd_size;
int ret;
cmd.cmd_id = CSI_SET_OWNER;
cmd.param.param = owner;
cmd_size = sizeof(cmd.cmd_id) + sizeof(cmd.param.param);
mutex_lock(&csi->lock);
ret = mei_csi_send(csi, (u8 *)&cmd, cmd_size);
mutex_unlock(&csi->lock);
return ret;
}
/* configure CSI-2 link between host and IVSC */
static int csi_set_link_cfg(struct mei_csi *csi)
{
struct csi_cmd cmd = { 0 };
size_t cmd_size;
int ret;
cmd.cmd_id = CSI_SET_CONF;
cmd.param.conf.nr_of_lanes = csi->nr_of_lanes;
cmd.param.conf.link_freq = CSI_LINK_FREQ(csi->link_freq);
cmd_size = sizeof(cmd.cmd_id) + sizeof(cmd.param.conf);
mutex_lock(&csi->lock);
ret = mei_csi_send(csi, (u8 *)&cmd, cmd_size);
/*
* wait configuration ready if download success. placing
* delay under mutex is to make sure current command flow
* completed before starting a possible new one.
*/
if (!ret)
msleep(CSI_FW_READY_DELAY_MS);
mutex_unlock(&csi->lock);
return ret;
}
/* callback for receive */
static void mei_csi_rx(struct mei_cl_device *cldev)
{
struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
struct csi_notif notif = { 0 };
int ret;
ret = mei_cldev_recv(cldev, (u8 *)¬if, sizeof(notif));
if (ret < 0) {
dev_err(&cldev->dev, "recv error: %d\n", ret);
return;
}
switch (notif.cmd_id) {
case CSI_PRIVACY_NOTIF:
if (notif.cont.cont < CSI_PRIVACY_MAX) {
csi->status = notif.cont.cont;
v4l2_ctrl_s_ctrl(csi->privacy_ctrl, csi->status);
}
break;
case CSI_SET_OWNER:
case CSI_SET_CONF:
memcpy(&csi->cmd_response, ¬if, ret);
complete(&csi->cmd_completion);
break;
default:
break;
}
}
static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
{
struct mei_csi *csi = sd_to_csi(sd);
s64 freq;
int ret;
if (enable && csi->streaming == 0) {
freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
if (freq < 0) {
dev_err(&csi->cldev->dev,
"error %lld, invalid link_freq\n", freq);
ret = freq;
goto err;
}
csi->link_freq = freq;
/* switch CSI-2 link to host */
ret = csi_set_link_owner(csi, CSI_LINK_HOST);
if (ret < 0)
goto err;
/* configure CSI-2 link */
ret = csi_set_link_cfg(csi);
if (ret < 0)
goto err_switch;
ret = v4l2_subdev_call(csi->remote, video, s_stream, 1);
if (ret)
goto err_switch;
} else if (!enable && csi->streaming == 1) {
v4l2_subdev_call(csi->remote, video, s_stream, 0);
/* switch CSI-2 link to IVSC */
ret = csi_set_link_owner(csi, CSI_LINK_IVSC);
if (ret < 0)
dev_warn(&csi->cldev->dev,
"failed to switch CSI2 link: %d\n", ret);
}
csi->streaming = enable;
return 0;
err_switch:
csi_set_link_owner(csi, CSI_LINK_IVSC);
err:
return ret;
}
static struct v4l2_mbus_framefmt *
mei_csi_get_pad_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct mei_csi *csi = sd_to_csi(sd);
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
return v4l2_subdev_get_try_format(sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &csi->format_mbus[pad];
default:
return NULL;
}
}
static int mei_csi_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mbusformat;
struct mei_csi *csi = sd_to_csi(sd);
unsigned int i;
mutex_lock(&csi->lock);
for (i = 0; i < sd->entity.num_pads; i++) {
mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
*mbusformat = mei_csi_format_mbus_default;
}
mutex_unlock(&csi->lock);
return 0;
}
static int mei_csi_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mbusformat;
struct mei_csi *csi = sd_to_csi(sd);
mutex_lock(&csi->lock);
mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
format->which);
if (mbusformat)
format->format = *mbusformat;
mutex_unlock(&csi->lock);
return 0;
}
static int mei_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *source_mbusformat;
struct v4l2_mbus_framefmt *mbusformat;
struct mei_csi *csi = sd_to_csi(sd);
struct media_pad *pad;
mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
format->which);
if (!mbusformat)
return -EINVAL;
source_mbusformat = mei_csi_get_pad_format(sd, sd_state, CSI_PAD_SOURCE,
format->which);
if (!source_mbusformat)
return -EINVAL;
v4l_bound_align_image(&format->format.width, 1, 65536, 0,
&format->format.height, 1, 65536, 0, 0);
switch (format->format.code) {
case MEDIA_BUS_FMT_RGB444_1X12:
case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_BGR565_2X8_BE:
case MEDIA_BUS_FMT_BGR565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RBG888_1X24:
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
case MEDIA_BUS_FMT_BGR888_1X24:
case MEDIA_BUS_FMT_GBR888_1X24:
case MEDIA_BUS_FMT_RGB888_1X24:
case MEDIA_BUS_FMT_RGB888_2X12_BE:
case MEDIA_BUS_FMT_RGB888_2X12_LE:
case MEDIA_BUS_FMT_ARGB8888_1X32:
case MEDIA_BUS_FMT_RGB888_1X32_PADHI:
case MEDIA_BUS_FMT_RGB101010_1X30:
case MEDIA_BUS_FMT_RGB121212_1X36:
case MEDIA_BUS_FMT_RGB161616_1X48:
case MEDIA_BUS_FMT_Y8_1X8:
case MEDIA_BUS_FMT_UV8_1X8:
case MEDIA_BUS_FMT_UYVY8_1_5X8:
case MEDIA_BUS_FMT_VYUY8_1_5X8:
case MEDIA_BUS_FMT_YUYV8_1_5X8:
case MEDIA_BUS_FMT_YVYU8_1_5X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_Y10_1X10:
case MEDIA_BUS_FMT_UYVY10_2X10:
case MEDIA_BUS_FMT_VYUY10_2X10:
case MEDIA_BUS_FMT_YUYV10_2X10:
case MEDIA_BUS_FMT_YVYU10_2X10:
case MEDIA_BUS_FMT_Y12_1X12:
case MEDIA_BUS_FMT_UYVY12_2X12:
case MEDIA_BUS_FMT_VYUY12_2X12:
case MEDIA_BUS_FMT_YUYV12_2X12:
case MEDIA_BUS_FMT_YVYU12_2X12:
case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_VYUY8_1X16:
case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_YVYU8_1X16:
case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
case MEDIA_BUS_FMT_UYVY10_1X20:
case MEDIA_BUS_FMT_VYUY10_1X20:
case MEDIA_BUS_FMT_YUYV10_1X20:
case MEDIA_BUS_FMT_YVYU10_1X20:
case MEDIA_BUS_FMT_VUY8_1X24:
case MEDIA_BUS_FMT_YUV8_1X24:
case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
case MEDIA_BUS_FMT_UYVY12_1X24:
case MEDIA_BUS_FMT_VYUY12_1X24:
case MEDIA_BUS_FMT_YUYV12_1X24:
case MEDIA_BUS_FMT_YVYU12_1X24:
case MEDIA_BUS_FMT_YUV10_1X30:
case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
case MEDIA_BUS_FMT_AYUV8_1X32:
case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
case MEDIA_BUS_FMT_YUV12_1X36:
case MEDIA_BUS_FMT_YUV16_1X48:
case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
case MEDIA_BUS_FMT_JPEG_1X8:
case MEDIA_BUS_FMT_AHSV8888_1X32:
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
case MEDIA_BUS_FMT_SBGGR12_1X12:
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
case MEDIA_BUS_FMT_SBGGR14_1X14:
case MEDIA_BUS_FMT_SGBRG14_1X14:
case MEDIA_BUS_FMT_SGRBG14_1X14:
case MEDIA_BUS_FMT_SRGGB14_1X14:
case MEDIA_BUS_FMT_SBGGR16_1X16:
case MEDIA_BUS_FMT_SGBRG16_1X16:
case MEDIA_BUS_FMT_SGRBG16_1X16:
case MEDIA_BUS_FMT_SRGGB16_1X16:
break;
default:
format->format.code = MEDIA_BUS_FMT_Y8_1X8;
break;
}
if (format->format.field == V4L2_FIELD_ANY)
format->format.field = V4L2_FIELD_NONE;
mutex_lock(&csi->lock);
pad = &csi->pads[format->pad];
if (pad->flags & MEDIA_PAD_FL_SOURCE)
format->format = csi->format_mbus[CSI_PAD_SINK];
*mbusformat = format->format;
if (pad->flags & MEDIA_PAD_FL_SINK)
*source_mbusformat = format->format;
mutex_unlock(&csi->lock);
return 0;
}
static int mei_csi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct mei_csi *csi = ctrl_to_csi(ctrl);
s64 freq;
if (ctrl->id == V4L2_CID_LINK_FREQ) {
if (!csi->remote)
return -EINVAL;
freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
if (freq < 0) {
dev_err(&csi->cldev->dev,
"error %lld, invalid link_freq\n", freq);
return -EINVAL;
}
link_freq_menu_items[0] = freq;
ctrl->val = 0;
return 0;
}
return -EINVAL;
}
static const struct v4l2_ctrl_ops mei_csi_ctrl_ops = {
.g_volatile_ctrl = mei_csi_g_volatile_ctrl,
};
static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
.s_stream = mei_csi_set_stream,
};
static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
.init_cfg = mei_csi_init_cfg,
.get_fmt = mei_csi_get_fmt,
.set_fmt = mei_csi_set_fmt,
};
static const struct v4l2_subdev_ops mei_csi_subdev_ops = {
.video = &mei_csi_video_ops,
.pad = &mei_csi_pad_ops,
};
static const struct media_entity_operations mei_csi_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct mei_csi *csi = notifier_to_csi(notifier);
int pad;
pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
MEDIA_PAD_FL_SOURCE);
if (pad < 0)
return pad;
csi->remote = subdev;
csi->remote_pad = pad;
return media_create_pad_link(&subdev->entity, pad,
&csi->subdev.entity, 1,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
}
static void mei_csi_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct mei_csi *csi = notifier_to_csi(notifier);
csi->remote = NULL;
}
static const struct v4l2_async_notifier_operations mei_csi_notify_ops = {
.bound = mei_csi_notify_bound,
.unbind = mei_csi_notify_unbind,
};
static int mei_csi_init_controls(struct mei_csi *csi)
{
u32 max;
int ret;
ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2);
if (ret)
return ret;
csi->ctrl_handler.lock = &csi->lock;
max = ARRAY_SIZE(link_freq_menu_items) - 1;
csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler,
&mei_csi_ctrl_ops,
V4L2_CID_LINK_FREQ,
max,
0,
link_freq_menu_items);
if (csi->freq_ctrl)
csi->freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY |
V4L2_CTRL_FLAG_VOLATILE;
csi->privacy_ctrl = v4l2_ctrl_new_std(&csi->ctrl_handler, NULL,
V4L2_CID_PRIVACY, 0, 1, 1, 0);
if (csi->privacy_ctrl)
csi->privacy_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (csi->ctrl_handler.error)
return csi->ctrl_handler.error;
csi->subdev.ctrl_handler = &csi->ctrl_handler;
return 0;
}
static int mei_csi_parse_firmware(struct mei_csi *csi)
{
struct v4l2_fwnode_endpoint v4l2_ep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
struct device *dev = &csi->cldev->dev;
struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode;
struct fwnode_handle *ep;
int ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
if (!ep) {
dev_err(dev, "not connected to subdevice\n");
return -EINVAL;
}
ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
if (ret) {
dev_err(dev, "could not parse v4l2 endpoint\n");
fwnode_handle_put(ep);
return -EINVAL;
}
fwnode = fwnode_graph_get_remote_endpoint(ep);
fwnode_handle_put(ep);
v4l2_async_subdev_nf_init(&csi->notifier, &csi->subdev);
csi->notifier.ops = &mei_csi_notify_ops;
asd = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
struct v4l2_async_connection);
if (IS_ERR(asd)) {
fwnode_handle_put(fwnode);
return PTR_ERR(asd);
}
ret = v4l2_fwnode_endpoint_alloc_parse(fwnode, &v4l2_ep);
fwnode_handle_put(fwnode);
if (ret)
return ret;
csi->nr_of_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
ret = v4l2_async_nf_register(&csi->notifier);
if (ret)
v4l2_async_nf_cleanup(&csi->notifier);
v4l2_fwnode_endpoint_free(&v4l2_ep);
return ret;
}
static int mei_csi_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct device *dev = &cldev->dev;
struct mei_csi *csi;
int ret;
if (!dev_fwnode(dev))
return -EPROBE_DEFER;
csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL);
if (!csi)
return -ENOMEM;
csi->cldev = cldev;
mutex_init(&csi->lock);
init_completion(&csi->cmd_completion);
mei_cldev_set_drvdata(cldev, csi);
ret = mei_cldev_enable(cldev);
if (ret < 0) {
dev_err(dev, "mei_cldev_enable failed: %d\n", ret);
goto destroy_mutex;
}
ret = mei_cldev_register_rx_cb(cldev, mei_csi_rx);
if (ret) {
dev_err(dev, "event cb registration failed: %d\n", ret);
goto err_disable;
}
ret = mei_csi_parse_firmware(csi);
if (ret)
goto err_disable;
csi->subdev.dev = &cldev->dev;
v4l2_subdev_init(&csi->subdev, &mei_csi_subdev_ops);
v4l2_set_subdevdata(&csi->subdev, csi);
csi->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
csi->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi->subdev.entity.ops = &mei_csi_entity_ops;
snprintf(csi->subdev.name, sizeof(csi->subdev.name),
MEI_CSI_ENTITY_NAME);
ret = mei_csi_init_controls(csi);
if (ret)
goto err_ctrl_handler;
csi->format_mbus[CSI_PAD_SOURCE] = mei_csi_format_mbus_default;
csi->format_mbus[CSI_PAD_SINK] = mei_csi_format_mbus_default;
csi->pads[CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
csi->pads[CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&csi->subdev.entity, CSI_NUM_PADS,
csi->pads);
if (ret)
goto err_ctrl_handler;
ret = v4l2_subdev_init_finalize(&csi->subdev);
if (ret < 0)
goto err_entity;
ret = v4l2_async_register_subdev(&csi->subdev);
if (ret < 0)
goto err_subdev;
pm_runtime_enable(&cldev->dev);
return 0;
err_subdev:
v4l2_subdev_cleanup(&csi->subdev);
err_entity:
media_entity_cleanup(&csi->subdev.entity);
err_ctrl_handler:
v4l2_ctrl_handler_free(&csi->ctrl_handler);
v4l2_async_nf_unregister(&csi->notifier);
v4l2_async_nf_cleanup(&csi->notifier);
err_disable:
mei_cldev_disable(cldev);
destroy_mutex:
mutex_destroy(&csi->lock);
return ret;
}
static void mei_csi_remove(struct mei_cl_device *cldev)
{
struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
v4l2_async_nf_unregister(&csi->notifier);
v4l2_async_nf_cleanup(&csi->notifier);
v4l2_ctrl_handler_free(&csi->ctrl_handler);
v4l2_async_unregister_subdev(&csi->subdev);
v4l2_subdev_cleanup(&csi->subdev);
media_entity_cleanup(&csi->subdev.entity);
pm_runtime_disable(&cldev->dev);
mutex_destroy(&csi->lock);
}
#define MEI_CSI_UUID UUID_LE(0x92335FCF, 0x3203, 0x4472, \
0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
static const struct mei_cl_device_id mei_csi_tbl[] = {
{ MEI_CSI_DRIVER_NAME, MEI_CSI_UUID, MEI_CL_VERSION_ANY },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
static struct mei_cl_driver mei_csi_driver = {
.id_table = mei_csi_tbl,
.name = MEI_CSI_DRIVER_NAME,
.probe = mei_csi_probe,
.remove = mei_csi_remove,
};
module_mei_cl_driver(mei_csi_driver);
MODULE_AUTHOR("Wentong Wu <[email protected]>");
MODULE_AUTHOR("Zhifeng Wang <[email protected]>");
MODULE_DESCRIPTION("Device driver for IVSC CSI");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/intel/ivsc/mei_csi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017,2020 Intel Corporation
*
* Based partially on Intel IPU4 driver written by
* Sakari Ailus <[email protected]>
* Samu Onkalo <[email protected]>
* Jouni Högander <[email protected]>
* Jouni Ukkonen <[email protected]>
* Antti Laakso <[email protected]>
* et al.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/vmalloc.h>
#include <media/ipu-bridge.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-sg.h>
#include "ipu3-cio2.h"
struct ipu3_cio2_fmt {
u32 mbus_code;
u32 fourcc;
u8 mipicode;
u8 bpp;
};
/*
* These are raw formats used in Intel's third generation of
* Image Processing Unit known as IPU3.
* 10bit raw bayer packed, 32 bytes for every 25 pixels,
* last LSB 6 bits unused.
*/
static const struct ipu3_cio2_fmt formats[] = {
{ /* put default entry at beginning */
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_Y10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_Y10,
.mipicode = 0x2b,
.bpp = 10,
},
};
/*
* cio2_find_format - lookup color format by fourcc or/and media bus code
* @pixelformat: fourcc to match, ignored if null
* @mbus_code: media bus code to match, ignored if null
*/
static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
const u32 *mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (pixelformat && *pixelformat != formats[i].fourcc)
continue;
if (mbus_code && *mbus_code != formats[i].mbus_code)
continue;
return &formats[i];
}
return NULL;
}
static inline u32 cio2_bytesperline(const unsigned int width)
{
/*
* 64 bytes for every 50 pixels, the line length
* in bytes is multiple of 64 (line end alignment).
*/
return DIV_ROUND_UP(width, 50) * 64;
}
/**************** FBPT operations ****************/
static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{
struct device *dev = &cio2->pci_dev->dev;
if (cio2->dummy_lop) {
dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
cio2->dummy_lop_bus_addr);
cio2->dummy_lop = NULL;
}
if (cio2->dummy_page) {
dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
cio2->dummy_page_bus_addr);
cio2->dummy_page = NULL;
}
}
static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{
struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_page_bus_addr,
GFP_KERNEL);
cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_lop_bus_addr,
GFP_KERNEL);
if (!cio2->dummy_page || !cio2->dummy_lop) {
cio2_fbpt_exit_dummy(cio2);
return -ENOMEM;
}
/*
* List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
* Initialize each entry to dummy_page bus base address.
*/
for (i = 0; i < CIO2_LOP_ENTRIES; i++)
cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
return 0;
}
static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
{
/*
* The CPU first initializes some fields in fbpt, then sets
* the VALID bit, this barrier is to ensure that the DMA(device)
* does not see the VALID bit enabled before other fields are
* initialized; otherwise it could lead to havoc.
*/
dma_wmb();
/*
* Request interrupts for start and completion
* Valid bit is applicable only to 1st entry
*/
entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
}
/* Initialize fpbt entries to point to dummy frame */
static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
struct cio2_fbpt_entry
entry[CIO2_MAX_LOPS])
{
unsigned int i;
entry[0].first_entry.first_page_offset = 0;
entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
for (i = 0; i < CIO2_MAX_LOPS; i++)
entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
cio2_fbpt_entry_enable(cio2, entry);
}
/* Initialize fpbt entries to point to a given buffer */
static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
struct cio2_buffer *b,
struct cio2_fbpt_entry
entry[CIO2_MAX_LOPS])
{
struct vb2_buffer *vb = &b->vbb.vb2_buf;
unsigned int length = vb->planes[0].length;
int remaining, i;
entry[0].first_entry.first_page_offset = b->offset;
remaining = length + entry[0].first_entry.first_page_offset;
entry[1].second_entry.num_of_pages = PFN_UP(remaining);
/*
* last_page_available_bytes has the offset of the last byte in the
* last page which is still accessible by DMA. DMA cannot access
* beyond this point. Valid range for this is from 0 to 4095.
* 0 indicates 1st byte in the page is DMA accessible.
* 4095 (PAGE_SIZE - 1) means every single byte in the last page
* is available for DMA transfer.
*/
remaining = offset_in_page(remaining) ?: PAGE_SIZE;
entry[1].second_entry.last_page_available_bytes = remaining - 1;
/* Fill FBPT */
remaining = length;
i = 0;
while (remaining > 0) {
entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
entry++;
i++;
}
/*
* The first not meaningful FBPT entry should point to a valid LOP
*/
entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
cio2_fbpt_entry_enable(cio2, entry);
}
static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
{
struct device *dev = &cio2->pci_dev->dev;
q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
GFP_KERNEL);
if (!q->fbpt)
return -ENOMEM;
return 0;
}
static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
{
dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
}
/**************** CSI2 hardware setup ****************/
/*
* The CSI2 receiver has several parameters affecting
* the receiver timings. These depend on the MIPI bus frequency
* F in Hz (sensor transmitter rate) as follows:
* register value = (A/1e9 + B * UI) / COUNT_ACC
* where
* UI = 1 / (2 * F) in seconds
* COUNT_ACC = counter accuracy in seconds
* For IPU3 COUNT_ACC = 0.0625
*
* A and B are coefficients from the table below,
* depending whether the register minimum or maximum value is
* calculated.
* Minimum Maximum
* Clock lane A B A B
* reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
* reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
* Data lanes
* reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
* reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
* reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
* reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
*
* We use the minimum values of both A and B.
*/
/*
* shift for keeping value range suitable for 32-bit integer arithmetic
*/
#define LIMIT_SHIFT 8
static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
{
const u32 accinv = 16; /* invert of counter resolution */
const u32 uiinv = 500000000; /* 1e9 / 2 */
s32 r;
freq >>= LIMIT_SHIFT;
if (WARN_ON(freq <= 0 || freq > S32_MAX))
return def;
/*
* b could be 0, -2 or -8, so |accinv * b| is always
* less than (1 << ds) and thus |r| < 500000000.
*/
r = accinv * b * (uiinv >> LIMIT_SHIFT);
r = r / (s32)freq;
/* max value of a is 95 */
r += accinv * a;
return r;
};
/* Calculate the delay value for termination enable of clock lane HS Rx */
static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
struct cio2_csi2_timing *timing,
unsigned int bpp, unsigned int lanes)
{
struct device *dev = &cio2->pci_dev->dev;
s64 freq;
if (!q->sensor)
return -ENODEV;
freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
}
timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
return 0;
};
static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
{
static const int NUM_VCS = 4;
static const int SID; /* Stream id */
static const int ENTRY;
static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
CIO2_FBPT_SUBENTRY_UNIT);
const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
const struct ipu3_cio2_fmt *fmt;
void __iomem *const base = cio2->base;
u8 lanes, csi2bus = q->csi2.port;
u8 sensor_vc = SENSOR_VIR_CH_DFLT;
struct cio2_csi2_timing timing = { 0 };
int i, r;
fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
if (!fmt)
return -EINVAL;
lanes = q->csi2.lanes;
r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
if (r)
return r;
writel(timing.clk_termen, q->csi_rx_base +
CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
writel(timing.clk_settle, q->csi_rx_base +
CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
for (i = 0; i < lanes; i++) {
writel(timing.dat_termen, q->csi_rx_base +
CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
writel(timing.dat_settle, q->csi_rx_base +
CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
}
writel(CIO2_PBM_WMCTRL1_MIN_2CK |
CIO2_PBM_WMCTRL1_MID1_2CK |
CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
CIO2_PBM_ARB_CTRL_LE_EN |
CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
base + CIO2_REG_PBM_ARB_CTRL);
writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
/* Configure MIPI backend */
for (i = 0; i < NUM_VCS; i++)
writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
/* There are 16 short packet LUT entry */
for (i = 0; i < 16; i++)
writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
base + CIO2_REG_INT_EN);
writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
<< CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
writel(CIO2_CGC_PRIM_TGE |
CIO2_CGC_SIDE_TGE |
CIO2_CGC_XOSC_TGE |
CIO2_CGC_D3I3_TGE |
CIO2_CGC_CSI2_INTERFRAME_TGE |
CIO2_CGC_CSI2_PORT_DCGE |
CIO2_CGC_SIDE_DCGE |
CIO2_CGC_PRIM_DCGE |
CIO2_CGC_ROSC_DCGE |
CIO2_CGC_XOSC_DCGE |
CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
CIO2_CGC_CSI_CLKGATE_HOLDOFF
<< CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
base + CIO2_REG_LTRVAL01);
writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
base + CIO2_REG_LTRVAL23);
for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
writel(0, base + CIO2_REG_CDMABA(i));
writel(0, base + CIO2_REG_CDMAC0(i));
writel(0, base + CIO2_REG_CDMAC1(i));
}
/* Enable DMA */
writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
CIO2_CDMAC0_DMA_INTR_ON_FE |
CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
CIO2_CDMAC0_DMA_EN |
CIO2_CDMAC0_DMA_INTR_ON_FS |
CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
CIO2_PXM_FRF_CFG_MSK_ECC_RE |
CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
/* Clear interrupts */
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
writel(~0, base + CIO2_REG_INT_STS);
/* Enable devices, starting from the last device in the pipe */
writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
return 0;
}
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
struct device *dev = &cio2->pci_dev->dev;
void __iomem *const base = cio2->base;
unsigned int i;
u32 value;
int ret;
/* Disable CSI receiver and MIPI backend devices */
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
/* Halt DMA */
writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
value, value & CIO2_CDMAC0_DMA_HALTED,
4000, 2000000);
if (ret)
dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
for (i = 0; i < CIO2_NUM_PORTS; i++) {
writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
}
}
static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
{
struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q = cio2->cur_queue;
struct cio2_fbpt_entry *entry;
u64 ns = ktime_get_ns();
if (dma_chan >= CIO2_QUEUES) {
dev_err(dev, "bad DMA channel %i\n", dma_chan);
return;
}
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
dev_warn(dev, "no ready buffers found on DMA channel %u\n",
dma_chan);
return;
}
/* Find out which buffer(s) are ready */
do {
struct cio2_buffer *b;
b = q->bufs[q->bufs_first];
if (b) {
unsigned int received = entry[1].second_entry.num_of_bytes;
unsigned long payload =
vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
q->bufs[q->bufs_first] = NULL;
atomic_dec(&q->bufs_queued);
dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
b->vbb.vb2_buf.timestamp = ns;
b->vbb.field = V4L2_FIELD_NONE;
b->vbb.sequence = atomic_read(&q->frame_sequence);
if (payload != received)
dev_warn(dev,
"payload length is %lu, received %u\n",
payload, received);
vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
}
atomic_inc(&q->frame_sequence);
cio2_fbpt_entry_init_dummy(cio2, entry);
q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
}
static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
{
/*
* For the user space camera control algorithms it is essential
* to know when the reception of a frame has begun. That's often
* the best timing information to get from the hardware.
*/
struct v4l2_event event = {
.type = V4L2_EVENT_FRAME_SYNC,
.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
};
v4l2_event_queue(q->subdev.devnode, &event);
}
static const char *const cio2_irq_errs[] = {
"single packet header error corrected",
"multiple packet header errors detected",
"payload checksum (CRC) error",
"fifo overflow",
"reserved short packet data type detected",
"reserved long packet data type detected",
"incomplete long packet detected",
"frame sync error",
"line sync error",
"DPHY start of transmission error",
"DPHY synchronization error",
"escape mode error",
"escape mode trigger event",
"escape mode ultra-low power state for data lane(s)",
"escape mode ultra-low power state exit for clock lane",
"inter-frame short packet discarded",
"inter-frame long packet discarded",
"non-matching Long Packet stalled",
};
static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
{
unsigned long csi2_status = status;
unsigned int i;
for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
dev_err(dev, "CSI-2 receiver port %i: %s\n",
port, cio2_irq_errs[i]);
if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
csi2_status, port);
}
static const char *const cio2_port_errs[] = {
"ECC recoverable",
"DPHY not recoverable",
"ECC not recoverable",
"CRC error",
"INTERFRAMEDATA",
"PKT2SHORT",
"PKT2LONG",
};
static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
{
unsigned long port_status = status;
unsigned int i;
for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
}
static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
{
struct device *dev = &cio2->pci_dev->dev;
void __iomem *const base = cio2->base;
if (int_status & CIO2_INT_IOOE) {
/*
* Interrupt on Output Error:
* 1) SRAM is full and FS received, or
* 2) An invalid bit detected by DMA.
*/
u32 oe_status, oe_clear;
oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
oe_status = oe_clear;
if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
dev_err(dev, "DMA output error: 0x%x\n",
(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
}
if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
(oe_status & CIO2_INT_EXT_OE_OES_MASK)
>> CIO2_INT_EXT_OE_OES_SHIFT);
oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
}
writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
if (oe_status)
dev_warn(dev, "unknown interrupt 0x%x on OE\n",
oe_status);
int_status &= ~CIO2_INT_IOOE;
}
if (int_status & CIO2_INT_IOC_MASK) {
/* DMA IO done -- frame ready */
u32 clr = 0;
unsigned int d;
for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
if (int_status & CIO2_INT_IOC(d)) {
clr |= CIO2_INT_IOC(d);
cio2_buffer_done(cio2, d);
}
int_status &= ~clr;
}
if (int_status & CIO2_INT_IOS_IOLN_MASK) {
/* DMA IO starts or reached specified line */
u32 clr = 0;
unsigned int d;
for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
if (int_status & CIO2_INT_IOS_IOLN(d)) {
clr |= CIO2_INT_IOS_IOLN(d);
if (d == CIO2_DMA_CHAN)
cio2_queue_event_sof(cio2,
cio2->cur_queue);
}
int_status &= ~clr;
}
if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
/* CSI2 receiver (error) interrupt */
unsigned int port;
u32 ie_status;
ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
for (port = 0; port < CIO2_NUM_PORTS; port++) {
u32 port_status = (ie_status >> (port * 8)) & 0xff;
cio2_irq_log_port_errs(dev, port, port_status);
if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
void __iomem *csi_rx_base =
base + CIO2_REG_PIPE_BASE(port);
u32 csi2_status;
csi2_status = readl(csi_rx_base +
CIO2_REG_IRQCTRL_STATUS);
cio2_irq_log_irq_errs(dev, port, csi2_status);
writel(csi2_status,
csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
}
}
writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
}
if (int_status)
dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
}
static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
{
struct cio2_device *cio2 = cio2_ptr;
void __iomem *const base = cio2->base;
struct device *dev = &cio2->pci_dev->dev;
u32 int_status;
int_status = readl(base + CIO2_REG_INT_STS);
dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
if (!int_status)
return IRQ_NONE;
do {
writel(int_status, base + CIO2_REG_INT_STS);
cio2_irq_handle_once(cio2, int_status);
int_status = readl(base + CIO2_REG_INT_STS);
if (int_status)
dev_dbg(dev, "pending status 0x%x\n", int_status);
} while (int_status);
return IRQ_HANDLED;
}
/**************** Videobuf2 interface ****************/
static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
enum vb2_buffer_state state)
{
unsigned int i;
for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
if (q->bufs[i]) {
atomic_dec(&q->bufs_queued);
vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
state);
q->bufs[i] = NULL;
}
}
}
static int cio2_vb2_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
unsigned int i;
if (*num_planes && *num_planes < q->format.num_planes)
return -EINVAL;
for (i = 0; i < q->format.num_planes; ++i) {
if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
return -EINVAL;
sizes[i] = q->format.plane_fmt[i].sizeimage;
alloc_devs[i] = dev;
}
*num_planes = q->format.num_planes;
*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
/* Initialize buffer queue */
for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
q->bufs[i] = NULL;
cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
}
atomic_set(&q->bufs_queued, 0);
q->bufs_first = 0;
q->bufs_next = 0;
return 0;
}
/* Called after each buffer is allocated */
static int cio2_vb2_buf_init(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &cio2->pci_dev->dev;
struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int pages = PFN_UP(vb->planes[0].length);
unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
struct sg_table *sg;
struct sg_dma_page_iter sg_iter;
unsigned int i, j;
if (lops <= 0 || lops > CIO2_MAX_LOPS) {
dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
vb->planes[0].length);
return -ENOSPC; /* Should never happen */
}
memset(b->lop, 0, sizeof(b->lop));
/* Allocate LOP table */
for (i = 0; i < lops; i++) {
b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
&b->lop_bus_addr[i], GFP_KERNEL);
if (!b->lop[i])
goto fail;
}
/* Fill LOP */
sg = vb2_dma_sg_plane_desc(vb, 0);
if (!sg)
return -ENOMEM;
if (sg->nents && sg->sgl)
b->offset = sg->sgl->offset;
i = j = 0;
for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
if (!pages--)
break;
b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
j++;
if (j == CIO2_LOP_ENTRIES) {
i++;
j = 0;
}
}
b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
return 0;
fail:
while (i--)
dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
return -ENOMEM;
}
/* Transfer buffer ownership to cio2 */
static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q =
container_of(vb->vb2_queue, struct cio2_queue, vbq);
struct cio2_buffer *b = to_cio2_buffer(vb);
struct cio2_fbpt_entry *entry;
unsigned long flags;
unsigned int i, j, next = q->bufs_next;
int bufs_queued = atomic_inc_return(&q->bufs_queued);
u32 fbpt_rp;
dev_dbg(dev, "queue buffer %d\n", vb->index);
/*
* This code queues the buffer to the CIO2 DMA engine, which starts
* running once streaming has started. It is possible that this code
* gets pre-empted due to increased CPU load. Upon this, the driver
* does not get an opportunity to queue new buffers to the CIO2 DMA
* engine. When the DMA engine encounters an FBPT entry without the
* VALID bit set, the DMA engine halts, which requires a restart of
* the DMA engine and sensor, to continue streaming.
* This is not desired and is highly unlikely given that there are
* 32 FBPT entries that the DMA engine needs to process, to run into
* an FBPT entry, without the VALID bit set. We try to mitigate this
* by disabling interrupts for the duration of this queueing.
*/
local_irq_save(flags);
fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
>> CIO2_CDMARI_FBPT_RP_SHIFT)
& CIO2_CDMARI_FBPT_RP_MASK;
/*
* fbpt_rp is the fbpt entry that the dma is currently working
* on, but since it could jump to next entry at any time,
* assume that we might already be there.
*/
fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
if (bufs_queued <= 1 || fbpt_rp == next)
/* Buffers were drained */
next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
/*
* We have allocated CIO2_MAX_BUFFERS circularly for the
* hw, the user has requested N buffer queue. The driver
* ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
* user queues a buffer, there necessarily is a free buffer.
*/
if (!q->bufs[next]) {
q->bufs[next] = b;
entry = &q->fbpt[next * CIO2_MAX_LOPS];
cio2_fbpt_entry_init_buf(cio2, b, entry);
local_irq_restore(flags);
q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
for (j = 0; j < vb->num_planes; j++)
vb2_set_plane_payload(vb, j,
q->format.plane_fmt[j].sizeimage);
return;
}
dev_dbg(dev, "entry %i was full!\n", next);
next = (next + 1) % CIO2_MAX_BUFFERS;
}
local_irq_restore(flags);
dev_err(dev, "error: all cio2 entries were full!\n");
atomic_dec(&q->bufs_queued);
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
/* Called when each buffer is freed */
static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &cio2->pci_dev->dev;
struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int i;
/* Free LOP table */
for (i = 0; i < CIO2_MAX_LOPS; i++) {
if (b->lop[i])
dma_free_coherent(dev, PAGE_SIZE,
b->lop[i], b->lop_bus_addr[i]);
}
}
static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
struct device *dev = &cio2->pci_dev->dev;
int r;
cio2->cur_queue = q;
atomic_set(&q->frame_sequence, 0);
r = pm_runtime_resume_and_get(dev);
if (r < 0) {
dev_info(dev, "failed to set power %d\n", r);
return r;
}
r = video_device_pipeline_start(&q->vdev, &q->pipe);
if (r)
goto fail_pipeline;
r = cio2_hw_init(cio2, q);
if (r)
goto fail_hw;
/* Start streaming on sensor */
r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
if (r)
goto fail_csi2_subdev;
cio2->streaming = true;
return 0;
fail_csi2_subdev:
cio2_hw_exit(cio2, q);
fail_hw:
video_device_pipeline_stop(&q->vdev);
fail_pipeline:
dev_dbg(dev, "failed to start streaming (%d)\n", r);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
pm_runtime_put(dev);
return r;
}
static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
struct device *dev = &cio2->pci_dev->dev;
if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
dev_err(dev, "failed to stop sensor streaming\n");
cio2_hw_exit(cio2, q);
synchronize_irq(cio2->pci_dev->irq);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
video_device_pipeline_stop(&q->vdev);
pm_runtime_put(dev);
cio2->streaming = false;
}
static const struct vb2_ops cio2_vb2_ops = {
.buf_init = cio2_vb2_buf_init,
.buf_queue = cio2_vb2_buf_queue,
.buf_cleanup = cio2_vb2_buf_cleanup,
.queue_setup = cio2_vb2_queue_setup,
.start_streaming = cio2_vb2_start_streaming,
.stop_streaming = cio2_vb2_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/**************** V4L2 interface ****************/
static int cio2_v4l2_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
return 0;
}
static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
f->pixelformat = formats[f->index].fourcc;
return 0;
}
/* The format is validated in cio2_video_link_validate() */
static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct cio2_queue *q = file_to_cio2_queue(file);
f->fmt.pix_mp = q->format;
return 0;
}
static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
const struct ipu3_cio2_fmt *fmt;
struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
fmt = cio2_find_format(&mpix->pixelformat, NULL);
if (!fmt)
fmt = &formats[0];
/* Only supports up to 4224x3136 */
if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
mpix->width = CIO2_IMAGE_MAX_WIDTH;
if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
mpix->height = CIO2_IMAGE_MAX_HEIGHT;
mpix->num_planes = 1;
mpix->pixelformat = fmt->fourcc;
mpix->colorspace = V4L2_COLORSPACE_RAW;
mpix->field = V4L2_FIELD_NONE;
mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
mpix->height;
/* use default */
mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
return 0;
}
static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct cio2_queue *q = file_to_cio2_queue(file);
cio2_v4l2_try_fmt(file, fh, f);
q->format = f->fmt.pix_mp;
return 0;
}
static int
cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
{
if (input->index > 0)
return -EINVAL;
strscpy(input->name, "camera", sizeof(input->name));
input->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
static int
cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
{
*input = 0;
return 0;
}
static int
cio2_video_s_input(struct file *file, void *fh, unsigned int input)
{
return input == 0 ? 0 : -EINVAL;
}
static const struct v4l2_file_operations cio2_v4l2_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
.vidioc_querycap = cio2_v4l2_querycap,
.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_enum_input = cio2_video_enum_input,
.vidioc_g_input = cio2_video_g_input,
.vidioc_s_input = cio2_video_s_input,
};
static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
if (sub->type != V4L2_EVENT_FRAME_SYNC)
return -EINVAL;
/* Line number. For now only zero accepted. */
if (sub->id != 0)
return -EINVAL;
return v4l2_event_subscribe(fh, sub, 0, NULL);
}
static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format;
const struct v4l2_mbus_framefmt fmt_default = {
.width = 1936,
.height = 1096,
.code = formats[0].mbus_code,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_RAW,
.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
.quantization = V4L2_QUANTIZATION_DEFAULT,
.xfer_func = V4L2_XFER_FUNC_DEFAULT,
};
/* Initialize try_fmt */
format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
*format = fmt_default;
/* same as sink */
format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
*format = fmt_default;
return 0;
}
/*
* cio2_subdev_get_fmt - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
mutex_lock(&q->subdev_lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
fmt->pad);
else
fmt->format = q->subdev_fmt;
mutex_unlock(&q->subdev_lock);
return 0;
}
/*
* cio2_subdev_set_fmt - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
struct v4l2_mbus_framefmt *mbus;
u32 mbus_code = fmt->format.code;
unsigned int i;
/*
* Only allow setting sink pad format;
* source always propagates from sink
*/
if (fmt->pad == CIO2_PAD_SOURCE)
return cio2_subdev_get_fmt(sd, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
else
mbus = &q->subdev_fmt;
fmt->format.code = formats[0].mbus_code;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i].mbus_code == mbus_code) {
fmt->format.code = mbus_code;
break;
}
}
fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
fmt->format.field = V4L2_FIELD_NONE;
mutex_lock(&q->subdev_lock);
*mbus = fmt->format;
mutex_unlock(&q->subdev_lock);
return 0;
}
static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(formats))
return -EINVAL;
code->code = formats[code->index].mbus_code;
return 0;
}
static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
struct v4l2_subdev_format *fmt)
{
if (is_media_entity_v4l2_subdev(pad->entity)) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(pad->entity);
memset(fmt, 0, sizeof(*fmt));
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
}
return -EINVAL;
}
static int cio2_video_link_validate(struct media_link *link)
{
struct media_entity *entity = link->sink->entity;
struct video_device *vd = media_entity_to_video_device(entity);
struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
struct cio2_device *cio2 = video_get_drvdata(vd);
struct device *dev = &cio2->pci_dev->dev;
struct v4l2_subdev_format source_fmt;
int ret;
if (!media_pad_remote_pad_first(entity->pads)) {
dev_info(dev, "video node %s pad not connected\n", vd->name);
return -ENOTCONN;
}
ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
if (ret < 0)
return 0;
if (source_fmt.format.width != q->format.width ||
source_fmt.format.height != q->format.height) {
dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
q->format.width, q->format.height,
source_fmt.format.width, source_fmt.format.height);
return -EINVAL;
}
if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
return -EINVAL;
return 0;
}
static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
.subscribe_event = cio2_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
.open = cio2_subdev_open,
};
static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
.link_validate = v4l2_subdev_link_validate_default,
.get_fmt = cio2_subdev_get_fmt,
.set_fmt = cio2_subdev_set_fmt,
.enum_mbus_code = cio2_subdev_enum_mbus_code,
};
static const struct v4l2_subdev_ops cio2_subdev_ops = {
.core = &cio2_subdev_core_ops,
.pad = &cio2_subdev_pad_ops,
};
/******* V4L2 sub-device asynchronous registration callbacks***********/
struct sensor_async_subdev {
struct v4l2_async_connection asd;
struct csi2_bus_info csi2;
};
#define to_sensor_asd(__asd) \
container_of_const(__asd, struct sensor_async_subdev, asd)
/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct cio2_device *cio2 = to_cio2_device(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
struct cio2_queue *q;
int ret;
if (cio2->queue[s_asd->csi2.port].sensor)
return -EBUSY;
ret = ipu_bridge_instantiate_vcm(sd->dev);
if (ret)
return ret;
q = &cio2->queue[s_asd->csi2.port];
q->csi2 = s_asd->csi2;
q->sensor = sd;
q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
return 0;
}
/* The .unbind callback */
static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct cio2_device *cio2 = to_cio2_device(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
cio2->queue[s_asd->csi2.port].sensor = NULL;
}
/* .complete() is called after all subdevices have been located */
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct cio2_device *cio2 = to_cio2_device(notifier);
struct device *dev = &cio2->pci_dev->dev;
struct sensor_async_subdev *s_asd;
struct v4l2_async_connection *asd;
struct cio2_queue *q;
int ret;
list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
ret = media_entity_get_fwnode_pad(&q->sensor->entity,
s_asd->asd.match.fwnode,
MEDIA_PAD_FL_SOURCE);
if (ret < 0) {
dev_err(dev, "no pad for endpoint %pfw (%d)\n",
s_asd->asd.match.fwnode, ret);
return ret;
}
ret = media_create_pad_link(&q->sensor->entity, ret,
&q->subdev.entity, CIO2_PAD_SINK,
0);
if (ret) {
dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
q->sensor->name, s_asd->asd.match.fwnode, ret);
return ret;
}
}
return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
}
static const struct v4l2_async_notifier_operations cio2_async_ops = {
.bound = cio2_notifier_bound,
.unbind = cio2_notifier_unbind,
.complete = cio2_notifier_complete,
};
static int cio2_parse_firmware(struct cio2_device *cio2)
{
struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
int ret;
for (i = 0; i < CIO2_NUM_PORTS; i++) {
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
struct sensor_async_subdev *s_asd;
struct fwnode_handle *ep;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
continue;
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (ret)
goto err_parse;
s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
struct
sensor_async_subdev);
if (IS_ERR(s_asd)) {
ret = PTR_ERR(s_asd);
goto err_parse;
}
s_asd->csi2.port = vep.base.port;
s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
fwnode_handle_put(ep);
continue;
err_parse:
fwnode_handle_put(ep);
return ret;
}
/*
* Proceed even without sensors connected to allow the device to
* suspend.
*/
cio2->notifier.ops = &cio2_async_ops;
ret = v4l2_async_nf_register(&cio2->notifier);
if (ret)
dev_err(dev, "failed to register async notifier : %d\n", ret);
return ret;
}
/**************** Queue initialization ****************/
static const struct media_entity_operations cio2_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
static const struct media_entity_operations cio2_video_entity_ops = {
.link_validate = cio2_video_link_validate,
};
static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
{
static const u32 default_width = 1936;
static const u32 default_height = 1096;
const struct ipu3_cio2_fmt dflt_fmt = formats[0];
struct device *dev = &cio2->pci_dev->dev;
struct video_device *vdev = &q->vdev;
struct vb2_queue *vbq = &q->vbq;
struct v4l2_subdev *subdev = &q->subdev;
struct v4l2_mbus_framefmt *fmt;
int r;
/* Initialize miscellaneous variables */
mutex_init(&q->lock);
mutex_init(&q->subdev_lock);
/* Initialize formats to default values */
fmt = &q->subdev_fmt;
fmt->width = default_width;
fmt->height = default_height;
fmt->code = dflt_fmt.mbus_code;
fmt->field = V4L2_FIELD_NONE;
q->format.width = default_width;
q->format.height = default_height;
q->format.pixelformat = dflt_fmt.fourcc;
q->format.colorspace = V4L2_COLORSPACE_RAW;
q->format.field = V4L2_FIELD_NONE;
q->format.num_planes = 1;
q->format.plane_fmt[0].bytesperline =
cio2_bytesperline(q->format.width);
q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
q->format.height;
/* Initialize fbpt */
r = cio2_fbpt_init(cio2, q);
if (r)
goto fail_fbpt;
/* Initialize media entities */
q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
subdev->entity.ops = &cio2_media_ops;
subdev->internal_ops = &cio2_subdev_internal_ops;
r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
if (r) {
dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
goto fail_subdev_media_entity;
}
q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
vdev->entity.ops = &cio2_video_entity_ops;
r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
if (r) {
dev_err(dev, "failed initialize videodev media entity (%d)\n",
r);
goto fail_vdev_media_entity;
}
/* Initialize subdev */
v4l2_subdev_init(subdev, &cio2_subdev_ops);
subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
subdev->owner = THIS_MODULE;
snprintf(subdev->name, sizeof(subdev->name),
CIO2_ENTITY_NAME " %td", q - cio2->queue);
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
v4l2_set_subdevdata(subdev, cio2);
r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
if (r) {
dev_err(dev, "failed initialize subdev (%d)\n", r);
goto fail_subdev;
}
/* Initialize vbq */
vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
vbq->ops = &cio2_vb2_ops;
vbq->mem_ops = &vb2_dma_sg_memops;
vbq->buf_struct_size = sizeof(struct cio2_buffer);
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vbq->min_buffers_needed = 1;
vbq->drv_priv = cio2;
vbq->lock = &q->lock;
r = vb2_queue_init(vbq);
if (r) {
dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
goto fail_subdev;
}
/* Initialize vdev */
snprintf(vdev->name, sizeof(vdev->name),
"%s %td", CIO2_NAME, q - cio2->queue);
vdev->release = video_device_release_empty;
vdev->fops = &cio2_v4l2_fops;
vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
vdev->lock = &cio2->lock;
vdev->v4l2_dev = &cio2->v4l2_dev;
vdev->queue = &q->vbq;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
video_set_drvdata(vdev, cio2);
r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
dev_err(dev, "failed to register video device (%d)\n", r);
goto fail_vdev;
}
/* Create link from CIO2 subdev to output node */
r = media_create_pad_link(
&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
if (r)
goto fail_link;
return 0;
fail_link:
vb2_video_unregister_device(&q->vdev);
fail_vdev:
v4l2_device_unregister_subdev(subdev);
fail_subdev:
media_entity_cleanup(&vdev->entity);
fail_vdev_media_entity:
media_entity_cleanup(&subdev->entity);
fail_subdev_media_entity:
cio2_fbpt_exit(q, dev);
fail_fbpt:
mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
return r;
}
static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
vb2_video_unregister_device(&q->vdev);
media_entity_cleanup(&q->vdev.entity);
v4l2_device_unregister_subdev(&q->subdev);
media_entity_cleanup(&q->subdev.entity);
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
}
static int cio2_queues_init(struct cio2_device *cio2)
{
int i, r;
for (i = 0; i < CIO2_QUEUES; i++) {
r = cio2_queue_init(cio2, &cio2->queue[i]);
if (r)
break;
}
if (i == CIO2_QUEUES)
return 0;
for (i--; i >= 0; i--)
cio2_queue_exit(cio2, &cio2->queue[i]);
return r;
}
static void cio2_queues_exit(struct cio2_device *cio2)
{
unsigned int i;
for (i = 0; i < CIO2_QUEUES; i++)
cio2_queue_exit(cio2, &cio2->queue[i]);
}
static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint;
if (IS_ERR_OR_NULL(fwnode))
return -EINVAL;
endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (endpoint) {
fwnode_handle_put(endpoint);
return 0;
}
return cio2_check_fwnode_graph(fwnode->secondary);
}
/**************** PCI interface ****************/
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
struct device *dev = &pci_dev->dev;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct cio2_device *cio2;
int r;
/*
* On some platforms no connections to sensors are defined in firmware,
* if the device has no endpoints then we can try to build those as
* software_nodes parsed from SSDB.
*/
r = cio2_check_fwnode_graph(fwnode);
if (r) {
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
dev_err(dev, "fwnode graph has no endpoints connected\n");
return -EINVAL;
}
r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
if (r)
return r;
}
cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
if (!cio2)
return -ENOMEM;
cio2->pci_dev = pci_dev;
r = pcim_enable_device(pci_dev);
if (r) {
dev_err(dev, "failed to enable device (%d)\n", r);
return r;
}
dev_info(dev, "device 0x%x (rev: 0x%x)\n",
pci_dev->device, pci_dev->revision);
r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
if (r) {
dev_err(dev, "failed to remap I/O memory (%d)\n", r);
return -ENODEV;
}
cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
pci_set_drvdata(pci_dev, cio2);
pci_set_master(pci_dev);
r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
if (r) {
dev_err(dev, "failed to set DMA mask (%d)\n", r);
return -ENODEV;
}
r = pci_enable_msi(pci_dev);
if (r) {
dev_err(dev, "failed to enable MSI (%d)\n", r);
return r;
}
r = cio2_fbpt_init_dummy(cio2);
if (r)
return r;
mutex_init(&cio2->lock);
cio2->media_dev.dev = dev;
strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
sizeof(cio2->media_dev.model));
cio2->media_dev.hw_revision = 0;
media_device_init(&cio2->media_dev);
r = media_device_register(&cio2->media_dev);
if (r < 0)
goto fail_mutex_destroy;
cio2->v4l2_dev.mdev = &cio2->media_dev;
r = v4l2_device_register(dev, &cio2->v4l2_dev);
if (r) {
dev_err(dev, "failed to register V4L2 device (%d)\n", r);
goto fail_media_device_unregister;
}
r = cio2_queues_init(cio2);
if (r)
goto fail_v4l2_device_unregister;
v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
/* Register notifier for subdevices we care */
r = cio2_parse_firmware(cio2);
if (r)
goto fail_clean_notifier;
r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
CIO2_NAME, cio2);
if (r) {
dev_err(dev, "failed to request IRQ (%d)\n", r);
goto fail_clean_notifier;
}
pm_runtime_put_noidle(dev);
pm_runtime_allow(dev);
return 0;
fail_clean_notifier:
v4l2_async_nf_unregister(&cio2->notifier);
v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
fail_v4l2_device_unregister:
v4l2_device_unregister(&cio2->v4l2_dev);
fail_media_device_unregister:
media_device_unregister(&cio2->media_dev);
media_device_cleanup(&cio2->media_dev);
fail_mutex_destroy:
mutex_destroy(&cio2->lock);
cio2_fbpt_exit_dummy(cio2);
return r;
}
static void cio2_pci_remove(struct pci_dev *pci_dev)
{
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
media_device_unregister(&cio2->media_dev);
v4l2_async_nf_unregister(&cio2->notifier);
v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
v4l2_device_unregister(&cio2->v4l2_dev);
media_device_cleanup(&cio2->media_dev);
mutex_destroy(&cio2->lock);
pm_runtime_forbid(&pci_dev->dev);
pm_runtime_get_noresume(&pci_dev->dev);
}
static int __maybe_unused cio2_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
void __iomem *const base = cio2->base;
u16 pm;
writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
dev_dbg(dev, "cio2 runtime suspend.\n");
pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
pm |= CIO2_PMCSR_D3;
pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
return 0;
}
static int __maybe_unused cio2_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
void __iomem *const base = cio2->base;
u16 pm;
writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
dev_dbg(dev, "cio2 runtime resume.\n");
pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
return 0;
}
/*
* Helper function to advance all the elements of a circular buffer by "start"
* positions
*/
static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
{
struct {
size_t begin, end;
} arr[2] = {
{ 0, start - 1 },
{ start, elems - 1 },
};
#define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
/* Loop as long as we have out-of-place entries */
while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
size_t size0, i;
/*
* Find the number of entries that can be arranged on this
* iteration.
*/
size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
/* Swap the entries in two parts of the array. */
for (i = 0; i < size0; i++) {
u8 *d = ptr + elem_size * (arr[1].begin + i);
u8 *s = ptr + elem_size * (arr[0].begin + i);
size_t j;
for (j = 0; j < elem_size; j++)
swap(d[j], s[j]);
}
if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
/* The end of the first array remains unarranged. */
arr[0].begin += size0;
} else {
/*
* The first array is fully arranged so we proceed
* handling the next one.
*/
arr[0].begin = arr[1].begin;
arr[0].end = arr[1].begin + size0 - 1;
arr[1].begin += size0;
}
}
}
static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
{
unsigned int i, j;
for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
i++, j = (j + 1) % CIO2_MAX_BUFFERS)
if (q->bufs[j])
break;
if (i == CIO2_MAX_BUFFERS)
return;
if (j) {
arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
CIO2_MAX_BUFFERS, j);
arrange(q->bufs, sizeof(struct cio2_buffer *),
CIO2_MAX_BUFFERS, j);
}
/*
* DMA clears the valid bit when accessing the buffer.
* When stopping stream in suspend callback, some of the buffers
* may be in invalid state. After resume, when DMA meets the invalid
* buffer, it will halt and stop receiving new data.
* To avoid DMA halting, set the valid bit for all buffers in FBPT.
*/
for (i = 0; i < CIO2_MAX_BUFFERS; i++)
cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
}
static int __maybe_unused cio2_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
struct cio2_queue *q = cio2->cur_queue;
int r;
dev_dbg(dev, "cio2 suspend\n");
if (!cio2->streaming)
return 0;
/* Stop stream */
r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
if (r) {
dev_err(dev, "failed to stop sensor streaming\n");
return r;
}
cio2_hw_exit(cio2, q);
synchronize_irq(pci_dev->irq);
pm_runtime_force_suspend(dev);
/*
* Upon resume, hw starts to process the fbpt entries from beginning,
* so relocate the queued buffs to the fbpt head before suspend.
*/
cio2_fbpt_rearrange(cio2, q);
q->bufs_first = 0;
q->bufs_next = 0;
return 0;
}
static int __maybe_unused cio2_resume(struct device *dev)
{
struct cio2_device *cio2 = dev_get_drvdata(dev);
struct cio2_queue *q = cio2->cur_queue;
int r;
dev_dbg(dev, "cio2 resume\n");
if (!cio2->streaming)
return 0;
/* Start stream */
r = pm_runtime_force_resume(dev);
if (r < 0) {
dev_err(dev, "failed to set power %d\n", r);
return r;
}
r = cio2_hw_init(cio2, q);
if (r) {
dev_err(dev, "fail to init cio2 hw\n");
return r;
}
r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
if (r) {
dev_err(dev, "fail to start sensor streaming\n");
cio2_hw_exit(cio2, q);
}
return r;
}
static const struct dev_pm_ops cio2_pm_ops = {
SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
};
static const struct pci_device_id cio2_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
{ }
};
MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
static struct pci_driver cio2_pci_driver = {
.name = CIO2_NAME,
.id_table = cio2_pci_id_table,
.probe = cio2_pci_probe,
.remove = cio2_pci_remove,
.driver = {
.pm = &cio2_pm_ops,
},
};
module_pci_driver(cio2_pci_driver);
MODULE_AUTHOR("Tuukka Toivonen <[email protected]>");
MODULE_AUTHOR("Tianshu Qiu <[email protected]>");
MODULE_AUTHOR("Jian Xu Zheng");
MODULE_AUTHOR("Yuning Pu <[email protected]>");
MODULE_AUTHOR("Yong Zhi <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPU3 CIO2 driver");
MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
| linux-master | drivers/media/pci/intel/ipu3/ipu3-cio2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-version.h"
#include "ivtv-mailbox.h"
#include "ivtv-i2c.h"
#include "ivtv-queue.h"
#include "ivtv-fileops.h"
#include "ivtv-vbi.h"
#include "ivtv-routing.h"
#include "ivtv-streams.h"
#include "ivtv-yuv.h"
#include "ivtv-ioctl.h"
#include "ivtv-gpio.h"
#include "ivtv-controls.h"
#include "ivtv-cards.h"
#include <media/i2c/saa7127.h>
#include <media/tveeprom.h>
#include <media/v4l2-event.h>
u16 ivtv_service2vbi(int type)
{
switch (type) {
case V4L2_SLICED_TELETEXT_B:
return IVTV_SLICED_TYPE_TELETEXT_B;
case V4L2_SLICED_CAPTION_525:
return IVTV_SLICED_TYPE_CAPTION_525;
case V4L2_SLICED_WSS_625:
return IVTV_SLICED_TYPE_WSS_625;
case V4L2_SLICED_VPS:
return IVTV_SLICED_TYPE_VPS;
default:
return 0;
}
}
static int valid_service_line(int field, int line, int is_pal)
{
return (is_pal && line >= 6 && (line != 23 || field == 0)) ||
(!is_pal && line >= 10 && line < 22);
}
static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
{
u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525);
int i;
set = set & valid_set;
if (set == 0 || !valid_service_line(field, line, is_pal)) {
return 0;
}
if (!is_pal) {
if (line == 21 && (set & V4L2_SLICED_CAPTION_525))
return V4L2_SLICED_CAPTION_525;
}
else {
if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS))
return V4L2_SLICED_VPS;
if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625))
return V4L2_SLICED_WSS_625;
if (line == 23)
return 0;
}
for (i = 0; i < 32; i++) {
if (BIT(i) & set)
return BIT(i);
}
return 0;
}
void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
{
u16 set = fmt->service_set;
int f, l;
fmt->service_set = 0;
for (f = 0; f < 2; f++) {
for (l = 0; l < 24; l++) {
fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal);
}
}
}
static void check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
{
int f, l;
for (f = 0; f < 2; f++) {
for (l = 0; l < 24; l++) {
fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal);
}
}
}
u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt)
{
int f, l;
u16 set = 0;
for (f = 0; f < 2; f++) {
for (l = 0; l < 24; l++) {
set |= fmt->service_lines[f][l];
}
}
return set;
}
void ivtv_set_osd_alpha(struct ivtv *itv)
{
ivtv_vapi(itv, CX2341X_OSD_SET_GLOBAL_ALPHA, 3,
itv->osd_global_alpha_state, itv->osd_global_alpha, !itv->osd_local_alpha_state);
ivtv_vapi(itv, CX2341X_OSD_SET_CHROMA_KEY, 2, itv->osd_chroma_key_state, itv->osd_chroma_key);
}
int ivtv_set_speed(struct ivtv *itv, int speed)
{
u32 data[CX2341X_MBOX_MAX_DATA];
int single_step = (speed == 1 || speed == -1);
DEFINE_WAIT(wait);
if (speed == 0) speed = 1000;
/* No change? */
if (speed == itv->speed && !single_step)
return 0;
if (single_step && (speed < 0) == (itv->speed < 0)) {
/* Single step video and no need to change direction */
ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0);
itv->speed = speed;
return 0;
}
if (single_step)
/* Need to change direction */
speed = speed < 0 ? -1000 : 1000;
data[0] = (speed > 1000 || speed < -1000) ? 0x80000000 : 0;
data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0;
data[1] = (speed < 0);
data[2] = speed < 0 ? 3 : 7;
data[3] = v4l2_ctrl_g_ctrl(itv->cxhdl.video_b_frames);
data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0;
data[5] = 0;
data[6] = 0;
if (speed == 1500 || speed == -1500) data[0] |= 1;
else if (speed == 2000 || speed == -2000) data[0] |= 2;
else if (speed > -1000 && speed < 0) data[0] |= (-1000 / speed);
else if (speed < 1000 && speed > 0) data[0] |= (1000 / speed);
/* If not decoding, just change speed setting */
if (atomic_read(&itv->decoding) > 0) {
int got_sig = 0;
/* Stop all DMA and decoding activity */
ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
/* Wait for any DMA to finish */
mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
got_sig = signal_pending(current);
if (got_sig)
break;
got_sig = 0;
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (got_sig)
return -EINTR;
/* Change Speed safely */
ivtv_api(itv, CX2341X_DEC_SET_PLAYBACK_SPEED, 7, data);
IVTV_DEBUG_INFO("Setting Speed to 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
}
if (single_step) {
speed = (speed < 0) ? -1 : 1;
ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0);
}
itv->speed = speed;
return 0;
}
static int ivtv_validate_speed(int cur_speed, int new_speed)
{
int fact = new_speed < 0 ? -1 : 1;
int s;
if (cur_speed == 0)
cur_speed = 1000;
if (new_speed < 0)
new_speed = -new_speed;
if (cur_speed < 0)
cur_speed = -cur_speed;
if (cur_speed <= new_speed) {
if (new_speed > 1500)
return fact * 2000;
if (new_speed > 1000)
return fact * 1500;
}
else {
if (new_speed >= 2000)
return fact * 2000;
if (new_speed >= 1500)
return fact * 1500;
if (new_speed >= 1000)
return fact * 1000;
}
if (new_speed == 0)
return 1000;
if (new_speed == 1 || new_speed == 1000)
return fact * new_speed;
s = new_speed;
new_speed = 1000 / new_speed;
if (1000 / cur_speed == new_speed)
new_speed += (cur_speed < s) ? -1 : 1;
if (new_speed > 60) return 1000 / (fact * 60);
return 1000 / (fact * new_speed);
}
static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id,
struct v4l2_decoder_cmd *dc, int try)
{
struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
switch (dc->cmd) {
case V4L2_DEC_CMD_START: {
dc->flags &= V4L2_DEC_CMD_START_MUTE_AUDIO;
dc->start.speed = ivtv_validate_speed(itv->speed, dc->start.speed);
if (dc->start.speed < 0)
dc->start.format = V4L2_DEC_START_FMT_GOP;
else
dc->start.format = V4L2_DEC_START_FMT_NONE;
if (dc->start.speed != 500 && dc->start.speed != 1500)
dc->flags = dc->start.speed == 1000 ? 0 :
V4L2_DEC_CMD_START_MUTE_AUDIO;
if (try) break;
itv->speed_mute_audio = dc->flags & V4L2_DEC_CMD_START_MUTE_AUDIO;
if (ivtv_set_output_mode(itv, OUT_MPG) != OUT_MPG)
return -EBUSY;
if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) {
/* forces ivtv_set_speed to be called */
itv->speed = 0;
}
return ivtv_start_decoding(id, dc->start.speed);
}
case V4L2_DEC_CMD_STOP:
dc->flags &= V4L2_DEC_CMD_STOP_IMMEDIATELY | V4L2_DEC_CMD_STOP_TO_BLACK;
if (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY)
dc->stop.pts = 0;
if (try) break;
if (atomic_read(&itv->decoding) == 0)
return 0;
if (itv->output_mode != OUT_MPG)
return -EBUSY;
itv->output_mode = OUT_NONE;
return ivtv_stop_v4l2_decode_stream(s, dc->flags, dc->stop.pts);
case V4L2_DEC_CMD_PAUSE:
dc->flags &= V4L2_DEC_CMD_PAUSE_TO_BLACK;
if (try) break;
if (!atomic_read(&itv->decoding))
return -EPERM;
if (itv->output_mode != OUT_MPG)
return -EBUSY;
if (atomic_read(&itv->decoding) > 0) {
ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1,
(dc->flags & V4L2_DEC_CMD_PAUSE_TO_BLACK) ? 1 : 0);
set_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
}
break;
case V4L2_DEC_CMD_RESUME:
dc->flags = 0;
if (try) break;
if (!atomic_read(&itv->decoding))
return -EPERM;
if (itv->output_mode != OUT_MPG)
return -EBUSY;
if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) {
int speed = itv->speed;
itv->speed = 0;
return ivtv_start_decoding(id, speed);
}
break;
default:
return -EINVAL;
}
return 0;
}
static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->reserved[0] = 0;
vbifmt->reserved[1] = 0;
if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT))
return -EINVAL;
vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines));
if (itv->is_60hz) {
vbifmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
vbifmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
} else {
vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
vbifmt->service_lines[0][16] = V4L2_SLICED_VPS;
}
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
pixfmt->width = itv->cxhdl.width;
pixfmt->height = itv->cxhdl.height;
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
if (id->type == IVTV_ENC_STREAM_TYPE_YUV) {
pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
/* YUV size is (Y=(h*720) + UV=(h*(720/2))) */
pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2;
pixfmt->bytesperline = 720;
} else {
pixfmt->pixelformat = V4L2_PIX_FMT_MPEG;
pixfmt->sizeimage = 128 * 1024;
pixfmt->bytesperline = 0;
}
return 0;
}
static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
vbifmt->offset = 248;
vbifmt->samples_per_line = itv->vbi.raw_decoder_line_size - 4;
vbifmt->sample_format = V4L2_PIX_FMT_GREY;
vbifmt->start[0] = itv->vbi.start[0];
vbifmt->start[1] = itv->vbi.start[1];
vbifmt->count[0] = vbifmt->count[1] = itv->vbi.count;
vbifmt->flags = 0;
vbifmt->reserved[0] = 0;
vbifmt->reserved[1] = 0;
return 0;
}
static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
vbifmt->reserved[0] = 0;
vbifmt->reserved[1] = 0;
vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
if (id->type == IVTV_DEC_STREAM_TYPE_VBI) {
vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 :
V4L2_SLICED_VBI_525;
ivtv_expand_service_set(vbifmt, itv->is_50hz);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
pixfmt->width = itv->main_rect.width;
pixfmt->height = itv->main_rect.height;
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
if (id->type == IVTV_DEC_STREAM_TYPE_YUV) {
switch (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) {
case IVTV_YUV_MODE_INTERLACED:
pixfmt->field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) ?
V4L2_FIELD_INTERLACED_BT : V4L2_FIELD_INTERLACED_TB;
break;
case IVTV_YUV_MODE_PROGRESSIVE:
pixfmt->field = V4L2_FIELD_NONE;
break;
default:
pixfmt->field = V4L2_FIELD_ANY;
break;
}
pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
pixfmt->bytesperline = 720;
pixfmt->width = itv->yuv_info.v4l2_src_w;
pixfmt->height = itv->yuv_info.v4l2_src_h;
/* YUV size is (Y=(h*w) + UV=(h*(w/2))) */
pixfmt->sizeimage =
1080 * ((pixfmt->height + 31) & ~31);
} else {
pixfmt->pixelformat = V4L2_PIX_FMT_MPEG;
pixfmt->sizeimage = 128 * 1024;
pixfmt->bytesperline = 0;
}
return 0;
}
static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -EINVAL;
if (!itv->osd_video_pbase)
return -EINVAL;
winfmt->chromakey = itv->osd_chroma_key;
winfmt->global_alpha = itv->osd_global_alpha;
winfmt->field = V4L2_FIELD_INTERLACED;
winfmt->clips = NULL;
winfmt->clipcount = 0;
winfmt->bitmap = NULL;
winfmt->w.top = winfmt->w.left = 0;
winfmt->w.width = itv->osd_rect.width;
winfmt->w.height = itv->osd_rect.height;
return 0;
}
static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt);
}
static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
int w = fmt->fmt.pix.width;
int h = fmt->fmt.pix.height;
int min_h = 2;
w = min(w, 720);
w = max(w, 2);
if (id->type == IVTV_ENC_STREAM_TYPE_YUV) {
/* YUV height must be a multiple of 32 */
h &= ~0x1f;
min_h = 32;
}
h = min(h, itv->is_50hz ? 576 : 480);
h = max(h, min_h);
ivtv_g_fmt_vid_cap(file, fh, fmt);
fmt->fmt.pix.width = w;
fmt->fmt.pix.height = h;
return 0;
}
static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
return ivtv_g_fmt_vbi_cap(file, fh, fmt);
}
static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
if (id->type == IVTV_DEC_STREAM_TYPE_VBI)
return ivtv_g_fmt_sliced_vbi_cap(file, fh, fmt);
/* set sliced VBI capture format */
vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
vbifmt->reserved[0] = 0;
vbifmt->reserved[1] = 0;
if (vbifmt->service_set)
ivtv_expand_service_set(vbifmt, itv->is_50hz);
check_service_set(vbifmt, itv->is_50hz);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv_open_id *id = fh2id(fh);
s32 w = fmt->fmt.pix.width;
s32 h = fmt->fmt.pix.height;
int field = fmt->fmt.pix.field;
int ret = ivtv_g_fmt_vid_out(file, fh, fmt);
w = min(w, 720);
w = max(w, 2);
/* Why can the height be 576 even when the output is NTSC?
Internally the buffers of the PVR350 are always set to 720x576. The
decoded video frame will always be placed in the top left corner of
this buffer. For any video which is not 720x576, the buffer will
then be cropped to remove the unused right and lower areas, with
the remaining image being scaled by the hardware to fit the display
area. The video can be scaled both up and down, so a 720x480 video
can be displayed full-screen on PAL and a 720x576 video can be
displayed without cropping on NTSC.
Note that the scaling only occurs on the video stream, the osd
resolution is locked to the broadcast standard and not scaled.
Thanks to Ian Armstrong for this explanation. */
h = min(h, 576);
h = max(h, 2);
if (id->type == IVTV_DEC_STREAM_TYPE_YUV)
fmt->fmt.pix.field = field;
fmt->fmt.pix.width = w;
fmt->fmt.pix.height = h;
return ret;
}
static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -EINVAL;
if (!itv->osd_video_pbase)
return -EINVAL;
ivtv_g_fmt_vid_out_overlay(file, fh, fmt);
fmt->fmt.win.chromakey = chromakey;
fmt->fmt.win.global_alpha = global_alpha;
return 0;
}
static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt);
}
static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret = ivtv_try_fmt_vid_cap(file, fh, fmt);
int w = fmt->fmt.pix.width;
int h = fmt->fmt.pix.height;
if (ret)
return ret;
if (itv->cxhdl.width == w && itv->cxhdl.height == h)
return 0;
if (atomic_read(&itv->capturing) > 0)
return -EBUSY;
itv->cxhdl.width = w;
itv->cxhdl.height = h;
if (v4l2_ctrl_g_ctrl(itv->cxhdl.video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
fmt->fmt.pix.width /= 2;
format.format.width = fmt->fmt.pix.width;
format.format.height = h;
format.format.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(itv->sd_video, pad, set_fmt, NULL, &format);
return ivtv_g_fmt_vid_cap(file, fh, fmt);
}
static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
itv->vbi.sliced_in->service_set = 0;
itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi);
return ivtv_g_fmt_vbi_cap(file, fh, fmt);
}
static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt);
if (ret || id->type == IVTV_DEC_STREAM_TYPE_VBI)
return ret;
check_service_set(vbifmt, itv->is_50hz);
if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt);
memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in));
return 0;
}
static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
int ret = ivtv_try_fmt_vid_out(file, fh, fmt);
if (ret)
return ret;
if (id->type != IVTV_DEC_STREAM_TYPE_YUV)
return 0;
/* Return now if we already have some frame data */
if (yi->stream_size)
return -EBUSY;
yi->v4l2_src_w = fmt->fmt.pix.width;
yi->v4l2_src_h = fmt->fmt.pix.height;
switch (fmt->fmt.pix.field) {
case V4L2_FIELD_NONE:
yi->lace_mode = IVTV_YUV_MODE_PROGRESSIVE;
break;
case V4L2_FIELD_ANY:
yi->lace_mode = IVTV_YUV_MODE_AUTO;
break;
case V4L2_FIELD_INTERLACED_BT:
yi->lace_mode =
IVTV_YUV_MODE_INTERLACED|IVTV_YUV_SYNC_ODD;
break;
case V4L2_FIELD_INTERLACED_TB:
default:
yi->lace_mode = IVTV_YUV_MODE_INTERLACED;
break;
}
yi->lace_sync_field = (yi->lace_mode & IVTV_YUV_SYNC_MASK) == IVTV_YUV_SYNC_EVEN ? 0 : 1;
if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
itv->dma_data_req_size =
1080 * ((yi->v4l2_src_h + 31) & ~31);
return 0;
}
static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt);
if (ret == 0) {
itv->osd_chroma_key = fmt->fmt.win.chromakey;
itv->osd_global_alpha = fmt->fmt.win.global_alpha;
ivtv_set_osd_alpha(itv);
}
return ret;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ivtv_itvc(struct ivtv *itv, bool get, u64 reg, u64 *val)
{
volatile u8 __iomem *reg_start;
if (reg & 0x3)
return -EINVAL;
if (reg >= IVTV_REG_OFFSET && reg < IVTV_REG_OFFSET + IVTV_REG_SIZE)
reg_start = itv->reg_mem - IVTV_REG_OFFSET;
else if (itv->has_cx23415 && reg >= IVTV_DECODER_OFFSET &&
reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE)
reg_start = itv->dec_mem - IVTV_DECODER_OFFSET;
else if (reg < IVTV_ENCODER_SIZE)
reg_start = itv->enc_mem;
else
return -EINVAL;
if (get)
*val = readl(reg + reg_start);
else
writel(*val, reg + reg_start);
return 0;
}
static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
struct ivtv *itv = fh2id(fh)->itv;
reg->size = 4;
return ivtv_itvc(itv, true, reg->reg, ®->val);
}
static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg)
{
struct ivtv *itv = fh2id(fh)->itv;
u64 val = reg->val;
return ivtv_itvc(itv, false, reg->reg, &val);
}
#endif
static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap)
{
struct ivtv_open_id *id = fh2id(file->private_data);
struct ivtv *itv = id->itv;
strscpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
strscpy(vcap->card, itv->card_name, sizeof(vcap->card));
vcap->capabilities = itv->v4l2_cap | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
struct ivtv *itv = fh2id(fh)->itv;
return ivtv_get_audio_input(itv, vin->index, vin);
}
static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
struct ivtv *itv = fh2id(fh)->itv;
vin->index = itv->audio_input;
return ivtv_get_audio_input(itv, vin->index, vin);
}
static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
struct ivtv *itv = fh2id(fh)->itv;
if (vout->index >= itv->nof_audio_inputs)
return -EINVAL;
itv->audio_input = vout->index;
ivtv_audio_set_io(itv);
return 0;
}
static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
struct ivtv *itv = fh2id(fh)->itv;
/* set it to defaults from our table */
return ivtv_get_audio_output(itv, vin->index, vin);
}
static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
struct ivtv *itv = fh2id(fh)->itv;
vin->index = 0;
return ivtv_get_audio_output(itv, vin->index, vin);
}
static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
{
struct ivtv *itv = fh2id(fh)->itv;
if (itv->card->video_outputs == NULL || vout->index != 0)
return -EINVAL;
return 0;
}
static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
struct ivtv *itv = fh2id(fh)->itv;
/* set it to defaults from our table */
return ivtv_get_input(itv, vin->index, vin);
}
static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout)
{
struct ivtv *itv = fh2id(fh)->itv;
return ivtv_get_output(itv, vout->index, vout);
}
static int ivtv_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
f->numerator = itv->is_50hz ? 54 : 11;
f->denominator = itv->is_50hz ? 59 : 10;
} else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
f->numerator = itv->is_out_50hz ? 54 : 11;
f->denominator = itv->is_out_50hz ? 59 : 10;
} else {
return -EINVAL;
}
return 0;
}
static int ivtv_s_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
int streamtype = id->type;
if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
if (sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
r.height = itv->is_out_50hz ? 576 : 480;
if (streamtype == IVTV_DEC_STREAM_TYPE_YUV && yi->track_osd) {
r.width = yi->osd_full_w;
r.height = yi->osd_full_h;
}
sel->r.width = clamp(sel->r.width, 16U, r.width);
sel->r.height = clamp(sel->r.height, 16U, r.height);
sel->r.left = clamp_t(unsigned, sel->r.left, 0, r.width - sel->r.width);
sel->r.top = clamp_t(unsigned, sel->r.top, 0, r.height - sel->r.height);
if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
yi->main_rect = sel->r;
return 0;
}
if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
sel->r.width, sel->r.height, sel->r.left, sel->r.top)) {
itv->main_rect = sel->r;
return 0;
}
return -EINVAL;
}
static int ivtv_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
int streamtype = id->type;
if (sel->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
switch (sel->target) {
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.top = sel->r.left = 0;
sel->r.width = 720;
sel->r.height = itv->is_50hz ? 576 : 480;
return 0;
default:
return -EINVAL;
}
}
if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_COMPOSE:
if (streamtype == IVTV_DEC_STREAM_TYPE_YUV)
sel->r = yi->main_rect;
else
sel->r = itv->main_rect;
return 0;
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
r.height = itv->is_out_50hz ? 576 : 480;
if (streamtype == IVTV_DEC_STREAM_TYPE_YUV && yi->track_osd) {
r.width = yi->osd_full_w;
r.height = yi->osd_full_h;
}
sel->r = r;
return 0;
}
return -EINVAL;
}
static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:2:0)",
.pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
if (fmt->index)
return -EINVAL;
if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
*fmt = mpeg;
else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
*fmt = hm12;
else
return -EINVAL;
return 0;
}
static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
.description = "HM12 (YUV 4:2:0)",
.pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
if (fmt->index)
return -EINVAL;
if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
*fmt = mpeg;
else if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
*fmt = hm12;
else
return -EINVAL;
return 0;
}
static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
{
struct ivtv *itv = fh2id(fh)->itv;
*i = itv->active_input;
return 0;
}
int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
{
struct ivtv *itv = fh2id(fh)->itv;
v4l2_std_id std;
int i;
if (inp >= itv->nof_inputs)
return -EINVAL;
if (inp == itv->active_input) {
IVTV_DEBUG_INFO("Input unchanged\n");
return 0;
}
if (atomic_read(&itv->capturing) > 0) {
return -EBUSY;
}
IVTV_DEBUG_INFO("Changing input from %d to %d\n",
itv->active_input, inp);
itv->active_input = inp;
/* Set the audio input to whatever is appropriate for the
input type. */
itv->audio_input = itv->card->video_inputs[inp].audio_index;
if (itv->card->video_inputs[inp].video_type == IVTV_CARD_INPUT_VID_TUNER)
std = itv->tuner_std;
else
std = V4L2_STD_ALL;
for (i = 0; i <= IVTV_ENC_STREAM_TYPE_VBI; i++)
itv->streams[i].vdev.tvnorms = std;
/* prevent others from messing with the streams until
we're finished changing inputs. */
ivtv_mute(itv);
ivtv_video_set_io(itv);
ivtv_audio_set_io(itv);
ivtv_unmute(itv);
return 0;
}
static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
{
struct ivtv *itv = fh2id(fh)->itv;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
*i = itv->active_output;
return 0;
}
static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
{
struct ivtv *itv = fh2id(fh)->itv;
if (outp >= itv->card->nof_outputs)
return -EINVAL;
if (outp == itv->active_output) {
IVTV_DEBUG_INFO("Output unchanged\n");
return 0;
}
IVTV_DEBUG_INFO("Changing output from %d to %d\n",
itv->active_output, outp);
itv->active_output = outp;
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing,
SAA7127_INPUT_TYPE_NORMAL,
itv->card->video_outputs[outp].video_output, 0);
return 0;
}
static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
{
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
if (vf->tuner != 0)
return -EINVAL;
ivtv_call_all(itv, tuner, g_frequency, vf);
return 0;
}
int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
{
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
if (vf->tuner != 0)
return -EINVAL;
ivtv_mute(itv);
IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency);
ivtv_call_all(itv, tuner, s_frequency, vf);
ivtv_unmute(itv);
return 0;
}
static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct ivtv *itv = fh2id(fh)->itv;
*std = itv->std;
return 0;
}
void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std)
{
itv->std = std;
itv->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0;
itv->is_50hz = !itv->is_60hz;
cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz);
itv->cxhdl.width = 720;
itv->cxhdl.height = itv->is_50hz ? 576 : 480;
itv->vbi.count = itv->is_50hz ? 18 : 12;
itv->vbi.start[0] = itv->is_50hz ? 6 : 10;
itv->vbi.start[1] = itv->is_50hz ? 318 : 273;
if (itv->hw_flags & IVTV_HW_CX25840)
itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
/* Tuner */
ivtv_call_all(itv, video, s_std, itv->std);
}
void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
{
struct yuv_playback_info *yi = &itv->yuv_info;
DEFINE_WAIT(wait);
int f;
/* set display standard */
itv->std_out = std;
itv->is_out_60hz = (std & V4L2_STD_525_60) ? 1 : 0;
itv->is_out_50hz = !itv->is_out_60hz;
ivtv_call_all(itv, video, s_std_output, itv->std_out);
/*
* The next firmware call is time sensitive. Time it to
* avoid risk of a hard lock, by trying to ensure the call
* happens within the first 100 lines of the top field.
* Make 4 attempts to sync to the decoder before giving up.
*/
mutex_unlock(&itv->serialize_lock);
for (f = 0; f < 4; f++) {
prepare_to_wait(&itv->vsync_waitq, &wait,
TASK_UNINTERRUPTIBLE);
if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
break;
schedule_timeout(msecs_to_jiffies(25));
}
finish_wait(&itv->vsync_waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (f == 4)
IVTV_WARN("Mode change failed to sync to decoder\n");
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
itv->main_rect.left = 0;
itv->main_rect.top = 0;
itv->main_rect.width = 720;
itv->main_rect.height = itv->is_out_50hz ? 576 : 480;
ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
720, itv->main_rect.height, 0, 0);
yi->main_rect = itv->main_rect;
if (!itv->osd_info) {
yi->osd_full_w = 720;
yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
}
}
static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
{
struct ivtv *itv = fh2id(fh)->itv;
if ((std & V4L2_STD_ALL) == 0)
return -EINVAL;
if (std == itv->std)
return 0;
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
atomic_read(&itv->capturing) > 0 ||
atomic_read(&itv->decoding) > 0) {
/* Switching standard would mess with already running
streams, prevent that by returning EBUSY. */
return -EBUSY;
}
IVTV_DEBUG_INFO("Switching standard to %llx.\n",
(unsigned long long)itv->std);
ivtv_s_std_enc(itv, std);
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
ivtv_s_std_dec(itv, std);
return 0;
}
static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
if (vt->index != 0)
return -EINVAL;
ivtv_call_all(itv, tuner, s_tuner, vt);
return 0;
}
static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
struct ivtv *itv = fh2id(fh)->itv;
if (vt->index != 0)
return -EINVAL;
ivtv_call_all(itv, tuner, g_tuner, vt);
if (vt->type == V4L2_TUNER_RADIO)
strscpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
else
strscpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
return 0;
}
static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
{
struct ivtv *itv = fh2id(fh)->itv;
int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
for (f = 0; f < 2; f++) {
for (l = 0; l < 24; l++) {
if (valid_service_line(f, l, itv->is_50hz))
cap->service_lines[f][l] = set;
}
}
} else if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT))
return -EINVAL;
if (itv->is_60hz) {
cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
} else {
cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
cap->service_lines[0][16] = V4L2_SLICED_VPS;
}
} else {
return -EINVAL;
}
set = 0;
for (f = 0; f < 2; f++)
for (l = 0; l < 24; l++)
set |= cap->service_lines[f][l];
cap->service_set = set;
return 0;
}
static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx)
{
struct ivtv *itv = fh2id(fh)->itv;
struct v4l2_enc_idx_entry *e = idx->entry;
int entries;
int i;
entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) %
IVTV_MAX_PGM_INDEX;
if (entries > V4L2_ENC_IDX_ENTRIES)
entries = V4L2_ENC_IDX_ENTRIES;
idx->entries = 0;
idx->entries_cap = IVTV_MAX_PGM_INDEX;
if (!atomic_read(&itv->capturing))
return 0;
for (i = 0; i < entries; i++) {
*e = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX];
if ((e->flags & V4L2_ENC_IDX_FRAME_MASK) <= V4L2_ENC_IDX_FRAME_B) {
idx->entries++;
e++;
}
}
itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX;
return 0;
}
static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n");
enc->flags = 0;
return ivtv_start_capture(id);
case V4L2_ENC_CMD_STOP:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n");
enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END;
ivtv_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END);
return 0;
case V4L2_ENC_CMD_PAUSE:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n");
enc->flags = 0;
if (!atomic_read(&itv->capturing))
return -EPERM;
if (test_and_set_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
return 0;
ivtv_mute(itv);
ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 0);
break;
case V4L2_ENC_CMD_RESUME:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n");
enc->flags = 0;
if (!atomic_read(&itv->capturing))
return -EPERM;
if (!test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
return 0;
ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1);
ivtv_unmute(itv);
break;
default:
IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd);
return -EINVAL;
}
return 0;
}
static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
struct ivtv *itv = fh2id(fh)->itv;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n");
enc->flags = 0;
return 0;
case V4L2_ENC_CMD_STOP:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n");
enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END;
return 0;
case V4L2_ENC_CMD_PAUSE:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n");
enc->flags = 0;
return 0;
case V4L2_ENC_CMD_RESUME:
IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n");
enc->flags = 0;
return 0;
default:
IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd);
return -EINVAL;
}
}
static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
{
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
u32 data[CX2341X_MBOX_MAX_DATA];
struct yuv_playback_info *yi = &itv->yuv_info;
int pixfmt;
static u32 pixel_format[16] = {
V4L2_PIX_FMT_PAL8, /* Uses a 256-entry RGB colormap */
V4L2_PIX_FMT_RGB565,
V4L2_PIX_FMT_RGB555,
V4L2_PIX_FMT_RGB444,
V4L2_PIX_FMT_RGB32,
0,
0,
0,
V4L2_PIX_FMT_PAL8, /* Uses a 256-entry YUV colormap */
V4L2_PIX_FMT_YUV565,
V4L2_PIX_FMT_YUV555,
V4L2_PIX_FMT_YUV444,
V4L2_PIX_FMT_YUV32,
0,
0,
0,
};
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY |
V4L2_FBUF_CAP_GLOBAL_ALPHA;
ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0);
data[0] |= (read_reg(0x2a00) >> 7) & 0x40;
pixfmt = (data[0] >> 3) & 0xf;
fb->fmt.pixelformat = pixel_format[pixfmt];
fb->fmt.width = itv->osd_rect.width;
fb->fmt.height = itv->osd_rect.height;
fb->fmt.field = V4L2_FIELD_INTERLACED;
fb->fmt.bytesperline = fb->fmt.width;
fb->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
fb->fmt.field = V4L2_FIELD_INTERLACED;
if (fb->fmt.pixelformat != V4L2_PIX_FMT_PAL8)
fb->fmt.bytesperline *= 2;
if (fb->fmt.pixelformat == V4L2_PIX_FMT_RGB32 ||
fb->fmt.pixelformat == V4L2_PIX_FMT_YUV32)
fb->fmt.bytesperline *= 2;
fb->fmt.sizeimage = fb->fmt.bytesperline * fb->fmt.height;
fb->base = (void *)itv->osd_video_pbase;
fb->flags = 0;
if (itv->osd_chroma_key_state)
fb->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
if (itv->osd_global_alpha_state)
fb->flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA;
if (yi->track_osd)
fb->flags |= V4L2_FBUF_FLAG_OVERLAY;
pixfmt &= 7;
/* no local alpha for RGB565 or unknown formats */
if (pixfmt == 1 || pixfmt > 4)
return 0;
/* 16-bit formats have inverted local alpha */
if (pixfmt == 2 || pixfmt == 3)
fb->capability |= V4L2_FBUF_CAP_LOCAL_INV_ALPHA;
else
fb->capability |= V4L2_FBUF_CAP_LOCAL_ALPHA;
if (itv->osd_local_alpha_state) {
/* 16-bit formats have inverted local alpha */
if (pixfmt == 2 || pixfmt == 3)
fb->flags |= V4L2_FBUF_FLAG_LOCAL_INV_ALPHA;
else
fb->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
}
return 0;
}
static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *fb)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0;
itv->osd_local_alpha_state =
(fb->flags & (V4L2_FBUF_FLAG_LOCAL_ALPHA|V4L2_FBUF_FLAG_LOCAL_INV_ALPHA)) != 0;
itv->osd_chroma_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0;
ivtv_set_osd_alpha(itv);
yi->track_osd = (fb->flags & V4L2_FBUF_FLAG_OVERLAY) != 0;
return 0;
}
static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, on != 0);
return 0;
}
static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_VSYNC:
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 0, NULL);
default:
return v4l2_ctrl_subscribe_event(fh, sub);
}
}
static int ivtv_log_status(struct file *file, void *fh)
{
struct ivtv *itv = fh2id(fh)->itv;
u32 data[CX2341X_MBOX_MAX_DATA];
int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT;
struct v4l2_input vidin;
struct v4l2_audio audin;
int i;
IVTV_INFO("Version: %s Card: %s\n", IVTV_VERSION, itv->card_name);
if (itv->hw_flags & IVTV_HW_TVEEPROM) {
struct tveeprom tv;
ivtv_read_eeprom(itv, &tv);
}
ivtv_call_all(itv, core, log_status);
ivtv_get_input(itv, itv->active_input, &vidin);
ivtv_get_audio_input(itv, itv->audio_input, &audin);
IVTV_INFO("Video Input: %s\n", vidin.name);
IVTV_INFO("Audio Input: %s%s\n", audin.name,
itv->dualwatch_stereo_mode == V4L2_MPEG_AUDIO_MODE_DUAL ?
" (Bilingual)" : "");
if (has_output) {
struct v4l2_output vidout;
struct v4l2_audioout audout;
int mode = itv->output_mode;
static const char * const output_modes[5] = {
"None",
"MPEG Streaming",
"YUV Streaming",
"YUV Frames",
"Passthrough",
};
static const char * const alpha_mode[4] = {
"None",
"Global",
"Local",
"Global and Local"
};
static const char * const pixel_format[16] = {
"ARGB Indexed",
"RGB 5:6:5",
"ARGB 1:5:5:5",
"ARGB 1:4:4:4",
"ARGB 8:8:8:8",
"5",
"6",
"7",
"AYUV Indexed",
"YUV 5:6:5",
"AYUV 1:5:5:5",
"AYUV 1:4:4:4",
"AYUV 8:8:8:8",
"13",
"14",
"15",
};
ivtv_get_output(itv, itv->active_output, &vidout);
ivtv_get_audio_output(itv, 0, &audout);
IVTV_INFO("Video Output: %s\n", vidout.name);
if (mode < 0 || mode > OUT_PASSTHROUGH)
mode = OUT_NONE;
IVTV_INFO("Output Mode: %s\n", output_modes[mode]);
ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0);
data[0] |= (read_reg(0x2a00) >> 7) & 0x40;
IVTV_INFO("Overlay: %s, Alpha: %s, Pixel Format: %s\n",
data[0] & 1 ? "On" : "Off",
alpha_mode[(data[0] >> 1) & 0x3],
pixel_format[(data[0] >> 3) & 0xf]);
}
IVTV_INFO("Tuner: %s\n",
test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV");
v4l2_ctrl_handler_log_status(&itv->cxhdl.hdl, itv->v4l2_dev.name);
IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags);
for (i = 0; i < IVTV_MAX_STREAMS; i++) {
struct ivtv_stream *s = &itv->streams[i];
if (s->vdev.v4l2_dev == NULL || s->buffers == 0)
continue;
IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags,
(s->buffers - s->q_free.buffers) * 100 / s->buffers,
(s->buffers * s->buf_size) / 1024, s->buffers);
}
IVTV_INFO("Read MPG/VBI: %lld/%lld bytes\n",
(long long)itv->mpg_data_received,
(long long)itv->vbi_data_inserted);
return 0;
}
static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
struct ivtv_open_id *id = fh2id(file->private_data);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_DECODER_CMD %d\n", dec->cmd);
return ivtv_video_command(itv, id, dec, false);
}
static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
struct ivtv_open_id *id = fh2id(file->private_data);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_TRY_DECODER_CMD %d\n", dec->cmd);
return ivtv_video_command(itv, id, dec, true);
}
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
switch (cmd) {
case IVTV_IOC_DMA_FRAME: {
struct ivtv_dma_frame *args = arg;
IVTV_DEBUG_IOCTL("IVTV_IOC_DMA_FRAME\n");
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
if (args->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
if (itv->output_mode == OUT_UDMA_YUV && args->y_source == NULL)
return 0;
if (ivtv_start_decoding(id, id->type)) {
return -EBUSY;
}
if (ivtv_set_output_mode(itv, OUT_UDMA_YUV) != OUT_UDMA_YUV) {
ivtv_release_stream(s);
return -EBUSY;
}
/* Mark that this file handle started the UDMA_YUV mode */
id->yuv_frames = 1;
if (args->y_source == NULL)
return 0;
return ivtv_yuv_prep_frame(itv, args);
}
case IVTV_IOC_PASSTHROUGH_MODE:
IVTV_DEBUG_IOCTL("IVTV_IOC_PASSTHROUGH_MODE\n");
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
return ivtv_passthrough_mode(itv, *(int *)arg != 0);
default:
return -EINVAL;
}
return 0;
}
static long ivtv_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
struct ivtv *itv = fh2id(fh)->itv;
if (!valid_prio) {
switch (cmd) {
case IVTV_IOC_PASSTHROUGH_MODE:
return -EBUSY;
}
}
switch (cmd) {
case VIDIOC_INT_RESET: {
u32 val = *(u32 *)arg;
if ((val == 0 && itv->options.newi2c) || (val & 0x01))
ivtv_reset_ir_gpio(itv);
if (val & 0x02)
v4l2_subdev_call(itv->sd_video, core, reset, 0);
break;
}
case IVTV_IOC_DMA_FRAME:
case IVTV_IOC_PASSTHROUGH_MODE:
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
return -ENOTTY;
}
return 0;
}
static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_querycap = ivtv_querycap,
.vidioc_s_audio = ivtv_s_audio,
.vidioc_g_audio = ivtv_g_audio,
.vidioc_enumaudio = ivtv_enumaudio,
.vidioc_s_audout = ivtv_s_audout,
.vidioc_g_audout = ivtv_g_audout,
.vidioc_enum_input = ivtv_enum_input,
.vidioc_enum_output = ivtv_enum_output,
.vidioc_enumaudout = ivtv_enumaudout,
.vidioc_g_pixelaspect = ivtv_g_pixelaspect,
.vidioc_s_selection = ivtv_s_selection,
.vidioc_g_selection = ivtv_g_selection,
.vidioc_g_input = ivtv_g_input,
.vidioc_s_input = ivtv_s_input,
.vidioc_g_output = ivtv_g_output,
.vidioc_s_output = ivtv_s_output,
.vidioc_g_frequency = ivtv_g_frequency,
.vidioc_s_frequency = ivtv_s_frequency,
.vidioc_s_tuner = ivtv_s_tuner,
.vidioc_g_tuner = ivtv_g_tuner,
.vidioc_g_enc_index = ivtv_g_enc_index,
.vidioc_g_fbuf = ivtv_g_fbuf,
.vidioc_s_fbuf = ivtv_s_fbuf,
.vidioc_g_std = ivtv_g_std,
.vidioc_s_std = ivtv_s_std,
.vidioc_overlay = ivtv_overlay,
.vidioc_log_status = ivtv_log_status,
.vidioc_enum_fmt_vid_cap = ivtv_enum_fmt_vid_cap,
.vidioc_encoder_cmd = ivtv_encoder_cmd,
.vidioc_try_encoder_cmd = ivtv_try_encoder_cmd,
.vidioc_decoder_cmd = ivtv_decoder_cmd,
.vidioc_try_decoder_cmd = ivtv_try_decoder_cmd,
.vidioc_enum_fmt_vid_out = ivtv_enum_fmt_vid_out,
.vidioc_g_fmt_vid_cap = ivtv_g_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = ivtv_g_fmt_vbi_cap,
.vidioc_g_fmt_sliced_vbi_cap = ivtv_g_fmt_sliced_vbi_cap,
.vidioc_g_fmt_vid_out = ivtv_g_fmt_vid_out,
.vidioc_g_fmt_vid_out_overlay = ivtv_g_fmt_vid_out_overlay,
.vidioc_g_fmt_sliced_vbi_out = ivtv_g_fmt_sliced_vbi_out,
.vidioc_s_fmt_vid_cap = ivtv_s_fmt_vid_cap,
.vidioc_s_fmt_vbi_cap = ivtv_s_fmt_vbi_cap,
.vidioc_s_fmt_sliced_vbi_cap = ivtv_s_fmt_sliced_vbi_cap,
.vidioc_s_fmt_vid_out = ivtv_s_fmt_vid_out,
.vidioc_s_fmt_vid_out_overlay = ivtv_s_fmt_vid_out_overlay,
.vidioc_s_fmt_sliced_vbi_out = ivtv_s_fmt_sliced_vbi_out,
.vidioc_try_fmt_vid_cap = ivtv_try_fmt_vid_cap,
.vidioc_try_fmt_vbi_cap = ivtv_try_fmt_vbi_cap,
.vidioc_try_fmt_sliced_vbi_cap = ivtv_try_fmt_sliced_vbi_cap,
.vidioc_try_fmt_vid_out = ivtv_try_fmt_vid_out,
.vidioc_try_fmt_vid_out_overlay = ivtv_try_fmt_vid_out_overlay,
.vidioc_try_fmt_sliced_vbi_out = ivtv_try_fmt_sliced_vbi_out,
.vidioc_g_sliced_vbi_cap = ivtv_g_sliced_vbi_cap,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = ivtv_g_register,
.vidioc_s_register = ivtv_s_register,
#endif
.vidioc_default = ivtv_default,
.vidioc_subscribe_event = ivtv_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
void ivtv_set_funcs(struct video_device *vdev)
{
vdev->ioctl_ops = &ivtv_ioctl_ops;
}
| linux-master | drivers/media/pci/ivtv/ivtv-ioctl.c |
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* License: GPL
* Author: Kevin Thayer <nufan_wfk at yahoo dot com>
*
* This file will hold API related functions, both internal (firmware api)
* and external (v4l2, etc)
*
* -----
* MPG600/MPG160 support by T.Adachi <[email protected]>
* and Takeru KOMORIYA<[email protected]>
*
* AVerMedia M179 GPIO info by Chris Pinkham <[email protected]>
* using information provided by Jiun-Kuei Jung @ AVerMedia.
*/
#include "ivtv-driver.h"
#include "ivtv-fileops.h"
#include "ivtv-queue.h"
#include "ivtv-mailbox.h"
#include "ivtv-ioctl.h"
#include "ivtv-irq.h"
#include "ivtv-yuv.h"
#include "ivtv-cards.h"
#include "ivtv-streams.h"
#include "ivtv-firmware.h"
#include <media/v4l2-event.h>
static const struct v4l2_file_operations ivtv_v4l2_enc_fops = {
.owner = THIS_MODULE,
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = video_ioctl2, /* for ivtv_default() */
#endif
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_enc_poll,
};
static const struct v4l2_file_operations ivtv_v4l2_dec_fops = {
.owner = THIS_MODULE,
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = video_ioctl2, /* for ivtv_default() */
#endif
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_dec_poll,
};
static const struct v4l2_file_operations ivtv_v4l2_radio_fops = {
.owner = THIS_MODULE,
.open = ivtv_v4l2_open,
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = video_ioctl2, /* for ivtv_default() */
#endif
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_enc_poll,
};
#define IVTV_V4L2_DEC_MPG_OFFSET 16 /* offset from 0 to register decoder mpg v4l2 minors on */
#define IVTV_V4L2_ENC_PCM_OFFSET 24 /* offset from 0 to register pcm v4l2 minors on */
#define IVTV_V4L2_ENC_YUV_OFFSET 32 /* offset from 0 to register yuv v4l2 minors on */
#define IVTV_V4L2_DEC_YUV_OFFSET 48 /* offset from 0 to register decoder yuv v4l2 minors on */
#define IVTV_V4L2_DEC_VBI_OFFSET 8 /* offset from 0 to register decoder vbi input v4l2 minors on */
#define IVTV_V4L2_DEC_VOUT_OFFSET 16 /* offset from 0 to register vbi output v4l2 minors on */
static struct {
const char *name;
int vfl_type;
int num_offset;
int dma, pio;
u32 v4l2_caps;
const struct v4l2_file_operations *fops;
} ivtv_stream_info[] = {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
VFL_TYPE_VIDEO, 0,
DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET,
DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
DMA_FROM_DEVICE, 0,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET,
DMA_FROM_DEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
DMA_NONE, 1,
V4L2_CAP_RADIO | V4L2_CAP_TUNER,
&ivtv_v4l2_radio_fops
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET,
DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
"decoder VBI",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET,
DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VOUT */
"decoder VOUT",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET,
DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET,
DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
}
};
static void ivtv_stream_init(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
/* we need to keep vdev, so restore it afterwards */
memset(s, 0, sizeof(*s));
/* initialize ivtv_stream fields */
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
s->dma = DMA_NONE;
else
s->dma = ivtv_stream_info[type].dma;
s->buf_size = itv->stream_buf_size[type];
if (s->buf_size)
s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
spin_lock_init(&s->qlock);
init_waitqueue_head(&s->waitq);
s->sg_handle = IVTV_DMA_UNMAPPED;
ivtv_queue_init(&s->q_free);
ivtv_queue_init(&s->q_full);
ivtv_queue_init(&s->q_dma);
ivtv_queue_init(&s->q_predma);
ivtv_queue_init(&s->q_io);
}
static int ivtv_prep_dev(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
int num_offset = ivtv_stream_info[type].num_offset;
int num = itv->instance + ivtv_first_minor + num_offset;
/* These four fields are always initialized. If vdev.v4l2_dev == NULL, then
this stream is not in use. In that case no other fields but these
four can be used. */
s->vdev.v4l2_dev = NULL;
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
/* Check whether the radio is supported */
if (type == IVTV_ENC_STREAM_TYPE_RAD && !(itv->v4l2_cap & V4L2_CAP_RADIO))
return 0;
if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return 0;
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
if (ivtv_stream_info[type].dma != DMA_NONE &&
itv->options.kilobytes[type] == 0) {
IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
return 0;
}
ivtv_stream_init(itv, type);
snprintf(s->vdev.name, sizeof(s->vdev.name), "%s %s",
itv->v4l2_dev.name, s->name);
s->vdev.num = num;
s->vdev.v4l2_dev = &itv->v4l2_dev;
if (ivtv_stream_info[type].v4l2_caps &
(V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_SLICED_VBI_OUTPUT))
s->vdev.vfl_dir = VFL_DIR_TX;
s->vdev.fops = ivtv_stream_info[type].fops;
s->vdev.ctrl_handler = itv->v4l2_dev.ctrl_handler;
s->vdev.release = video_device_release_empty;
s->vdev.tvnorms = V4L2_STD_ALL;
s->vdev.lock = &itv->serialize_lock;
if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
v4l2_disable_ioctl(&s->vdev, VIDIOC_S_AUDIO);
v4l2_disable_ioctl(&s->vdev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(&s->vdev, VIDIOC_ENUMAUDIO);
v4l2_disable_ioctl(&s->vdev, VIDIOC_ENUMINPUT);
v4l2_disable_ioctl(&s->vdev, VIDIOC_S_INPUT);
v4l2_disable_ioctl(&s->vdev, VIDIOC_G_INPUT);
v4l2_disable_ioctl(&s->vdev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&s->vdev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&s->vdev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&s->vdev, VIDIOC_G_TUNER);
v4l2_disable_ioctl(&s->vdev, VIDIOC_S_STD);
}
ivtv_set_funcs(&s->vdev);
return 0;
}
/* Initialize v4l2 variables and prepare v4l2 devices */
int ivtv_streams_setup(struct ivtv *itv)
{
int type;
/* Setup V4L2 Devices */
for (type = 0; type < IVTV_MAX_STREAMS; type++) {
/* Prepare device */
if (ivtv_prep_dev(itv, type))
break;
if (itv->streams[type].vdev.v4l2_dev == NULL)
continue;
/* Allocate Stream */
if (ivtv_stream_alloc(&itv->streams[type]))
break;
}
if (type == IVTV_MAX_STREAMS)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
ivtv_streams_cleanup(itv);
return -ENOMEM;
}
static int ivtv_reg_dev(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
int vfl_type = ivtv_stream_info[type].vfl_type;
const char *name;
int num;
if (s->vdev.v4l2_dev == NULL)
return 0;
num = s->vdev.num;
/* card number + user defined offset + device offset */
if (type != IVTV_ENC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
if (s_mpg->vdev.v4l2_dev)
num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset;
}
if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV ||
type == IVTV_DEC_STREAM_TYPE_MPG)) {
s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
}
video_set_drvdata(&s->vdev, s);
/* Register device. First try the desired minor, then any free one. */
if (video_register_device_no_warn(&s->vdev, vfl_type, num)) {
IVTV_ERR("Couldn't register v4l2 device for %s (device node number %d)\n",
s->name, num);
return -ENOMEM;
}
name = video_device_node_name(&s->vdev);
switch (vfl_type) {
case VFL_TYPE_VIDEO:
IVTV_INFO("Registered device %s for %s (%d kB)\n",
name, s->name, itv->options.kilobytes[type]);
break;
case VFL_TYPE_RADIO:
IVTV_INFO("Registered device %s for %s\n",
name, s->name);
break;
case VFL_TYPE_VBI:
if (itv->options.kilobytes[type])
IVTV_INFO("Registered device %s for %s (%d kB)\n",
name, s->name, itv->options.kilobytes[type]);
else
IVTV_INFO("Registered device %s for %s\n",
name, s->name);
break;
}
return 0;
}
/* Register v4l2 devices */
int ivtv_streams_register(struct ivtv *itv)
{
int type;
int err = 0;
/* Register V4L2 devices */
for (type = 0; type < IVTV_MAX_STREAMS; type++)
err |= ivtv_reg_dev(itv, type);
if (err == 0)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
ivtv_streams_cleanup(itv);
return -ENOMEM;
}
/* Unregister v4l2 devices */
void ivtv_streams_cleanup(struct ivtv *itv)
{
int type;
/* Teardown all streams */
for (type = 0; type < IVTV_MAX_STREAMS; type++) {
struct video_device *vdev = &itv->streams[type].vdev;
if (vdev->v4l2_dev == NULL)
continue;
video_unregister_device(vdev);
ivtv_stream_free(&itv->streams[type]);
itv->streams[type].vdev.v4l2_dev = NULL;
}
}
static void ivtv_vbi_setup(struct ivtv *itv)
{
int raw = ivtv_raw_vbi(itv);
u32 data[CX2341X_MBOX_MAX_DATA];
int lines;
int i;
/* Reset VBI */
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
/* setup VBI registers */
if (raw)
v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &itv->vbi.in.fmt.vbi);
else
v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, &itv->vbi.in.fmt.sliced);
/* determine number of lines and total number of VBI bytes.
A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
The '- 1' byte is probably an unused U or V byte. Or something...
A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal
header, 42 data bytes + checksum (to be confirmed) */
if (raw) {
lines = itv->vbi.count * 2;
} else {
lines = itv->is_60hz ? 24 : 38;
if (itv->is_60hz && (itv->hw_flags & IVTV_HW_CX25840))
lines += 2;
}
itv->vbi.enc_size = lines * (raw ? itv->vbi.raw_size : itv->vbi.sliced_size);
/* Note: sliced vs raw flag doesn't seem to have any effect
TODO: check mode (0x02) value with older ivtv versions. */
data[0] = raw | 0x02 | (0xbd << 8);
/* Every X number of frames a VBI interrupt arrives (frames as in 25 or 30 fps) */
data[1] = 1;
/* The VBI frames are stored in a ringbuffer with this size (with a VBI frame as unit) */
data[2] = raw ? 4 : 4 * (itv->vbi.raw_size / itv->vbi.enc_size);
/* The start/stop codes determine which VBI lines end up in the raw VBI data area.
The codes are from table 24 in the saa7115 datasheet. Each raw/sliced/video line
is framed with codes FF0000XX where XX is the SAV/EAV (Start/End of Active Video)
code. These values for raw VBI are obtained from a driver disassembly. The sliced
start/stop codes was deduced from this, but they do not appear in the driver.
Other code pairs that I found are: 0x250E6249/0x13545454 and 0x25256262/0x38137F54.
However, I have no idea what these values are for. */
if (itv->hw_flags & IVTV_HW_CX25840) {
/* Setup VBI for the cx25840 digitizer */
if (raw) {
data[3] = 0x20602060;
data[4] = 0x30703070;
} else {
data[3] = 0xB0F0B0F0;
data[4] = 0xA0E0A0E0;
}
/* Lines per frame */
data[5] = lines;
/* bytes per line */
data[6] = (raw ? itv->vbi.raw_size : itv->vbi.sliced_size);
} else {
/* Setup VBI for the saa7115 digitizer */
if (raw) {
data[3] = 0x25256262;
data[4] = 0x387F7F7F;
} else {
data[3] = 0xABABECEC;
data[4] = 0xB6F1F1F1;
}
/* Lines per frame */
data[5] = lines;
/* bytes per line */
data[6] = itv->vbi.enc_size / lines;
}
IVTV_DEBUG_INFO(
"Setup VBI API header 0x%08x pkts %d buffs %d ln %d sz %d\n",
data[0], data[1], data[2], data[5], data[6]);
ivtv_api(itv, CX2341X_ENC_SET_VBI_CONFIG, 7, data);
/* returns the VBI encoder memory area. */
itv->vbi.enc_start = data[2];
itv->vbi.fpi = data[0];
if (!itv->vbi.fpi)
itv->vbi.fpi = 1;
IVTV_DEBUG_INFO("Setup VBI start 0x%08x frames %d fpi %d\n",
itv->vbi.enc_start, data[1], itv->vbi.fpi);
/* select VBI lines.
Note that the sliced argument seems to have no effect. */
for (i = 2; i <= 24; i++) {
int valid;
if (itv->is_60hz) {
valid = i >= 10 && i < 22;
} else {
valid = i >= 6 && i < 24;
}
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, i - 1,
valid, 0 , 0, 0);
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, (i - 1) | 0x80000000,
valid, 0, 0, 0);
}
/* Remaining VBI questions:
- Is it possible to select particular VBI lines only for inclusion in the MPEG
stream? Currently you can only get the first X lines.
- Is mixed raw and sliced VBI possible?
- What's the meaning of the raw/sliced flag?
- What's the meaning of params 2, 3 & 4 of the Select VBI command? */
}
int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
int captype = 0, subtype = 0;
int enable_passthrough = 0;
if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
switch (s->type) {
case IVTV_ENC_STREAM_TYPE_MPG:
captype = 0;
subtype = 3;
/* Stop Passthrough */
if (itv->output_mode == OUT_PASSTHROUGH) {
ivtv_passthrough_mode(itv, 0);
enable_passthrough = 1;
}
itv->mpg_data_received = itv->vbi_data_inserted = 0;
itv->dualwatch_jiffies = jiffies;
itv->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(itv->cxhdl.audio_mode);
itv->search_pack_header = 0;
break;
case IVTV_ENC_STREAM_TYPE_YUV:
if (itv->output_mode == OUT_PASSTHROUGH) {
captype = 2;
subtype = 11; /* video+audio+decoder */
break;
}
captype = 1;
subtype = 1;
break;
case IVTV_ENC_STREAM_TYPE_PCM:
captype = 1;
subtype = 2;
break;
case IVTV_ENC_STREAM_TYPE_VBI:
captype = 1;
subtype = 4;
itv->vbi.frame = 0;
itv->vbi.inserted_frame = 0;
memset(itv->vbi.sliced_mpeg_size,
0, sizeof(itv->vbi.sliced_mpeg_size));
break;
default:
return -EINVAL;
}
s->subtype = subtype;
s->buffers_stolen = 0;
/* Clear Streamoff flags in case left from last capture */
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
if (atomic_read(&itv->capturing) == 0) {
int digitizer;
/* Always use frame based mode. Experiments have demonstrated that byte
stream based mode results in dropped frames and corruption. Not often,
but occasionally. Many thanks go to Leonard Orb who spent a lot of
effort and time trying to trace the cause of the drop outs. */
/* 1 frame per DMA */
/*ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 128, 0); */
ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 1, 1);
/* Stuff from Windows, we don't know what it is */
ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1, 0);
/* According to the docs, this should be correct. However, this is
untested. I don't dare enable this without having tested it.
Only very few old cards actually have this hardware combination.
ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1,
((itv->hw_flags & IVTV_HW_SAA7114) && itv->is_60hz) ? 10001 : 0);
*/
ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 3, !itv->has_cx23415);
ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 8, 0);
ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 4, 1);
ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);
/* assign placeholder */
ivtv_vapi(itv, CX2341X_ENC_SET_PLACEHOLDER, 12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
if (itv->card->hw_all & (IVTV_HW_SAA7115 | IVTV_HW_SAA717X))
digitizer = 0xF1;
else if (itv->card->hw_all & IVTV_HW_SAA7114)
digitizer = 0xEF;
else /* cx25840 */
digitizer = 0x140;
ivtv_vapi(itv, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, digitizer, digitizer);
/* Setup VBI */
if (itv->v4l2_cap & V4L2_CAP_VBI_CAPTURE) {
ivtv_vbi_setup(itv);
}
/* assign program index info. Mask 7: select I/P/B, Num_req: 400 max */
ivtv_vapi_result(itv, data, CX2341X_ENC_SET_PGM_INDEX_INFO, 2, 7, 400);
itv->pgm_info_offset = data[0];
itv->pgm_info_num = data[1];
itv->pgm_info_write_idx = 0;
itv->pgm_info_read_idx = 0;
IVTV_DEBUG_INFO("PGM Index at 0x%08x with %d elements\n",
itv->pgm_info_offset, itv->pgm_info_num);
/* Setup API for Stream */
cx2341x_handler_setup(&itv->cxhdl);
/* mute if capturing radio */
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
1 | (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Vsync Setup */
if (itv->has_cx23415 && !test_and_set_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
/* event notification (on) */
ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_ENC_VIM_RST, -1);
ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
if (atomic_read(&itv->capturing) == 0) {
/* Clear all Pending Interrupts */
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
clear_bit(IVTV_F_I_EOS, &itv->i_flags);
cx2341x_handler_set_busy(&itv->cxhdl, 1);
/* Initialize Digitizer for Capture */
/* Avoid tinny audio problem - ensure audio clocks are going */
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
ivtv_msleep_timeout(300, 0);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
}
/* begin_capture */
if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
{
IVTV_DEBUG_WARN( "Error starting capture!\n");
return -EINVAL;
}
/* Start Passthrough */
if (enable_passthrough) {
ivtv_passthrough_mode(itv, 1);
}
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
else
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
/* you're live! sit back and await interrupts :) */
atomic_inc(&itv->capturing);
return 0;
}
EXPORT_SYMBOL(ivtv_start_v4l2_encode_stream);
static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
int datatype;
u16 width;
u16 height;
if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("Setting some initial decoder settings\n");
width = itv->cxhdl.width;
height = itv->cxhdl.height;
/* set audio mode to left/stereo for dual/stereo mode. */
ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
/* set number of internal decoder buffers */
ivtv_vapi(itv, CX2341X_DEC_SET_DISPLAY_BUFFERS, 1, 0);
/* prebuffering */
ivtv_vapi(itv, CX2341X_DEC_SET_PREBUFFERING, 1, 1);
/* extract from user packets */
ivtv_vapi_result(itv, data, CX2341X_DEC_EXTRACT_VBI, 1, 1);
itv->vbi.dec_start = data[0];
IVTV_DEBUG_INFO("Decoder VBI RE-Insert start 0x%08x size 0x%08x\n",
itv->vbi.dec_start, data[1]);
/* set decoder source settings */
/* Data type: 0 = mpeg from host,
1 = yuv from encoder,
2 = yuv_from_host */
switch (s->type) {
case IVTV_DEC_STREAM_TYPE_YUV:
if (itv->output_mode == OUT_PASSTHROUGH) {
datatype = 1;
} else {
/* Fake size to avoid switching video standard */
datatype = 2;
width = 720;
height = itv->is_out_50hz ? 576 : 480;
}
IVTV_DEBUG_INFO("Setup DEC YUV Stream data[0] = %d\n", datatype);
break;
case IVTV_DEC_STREAM_TYPE_MPG:
default:
datatype = 0;
break;
}
if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype,
width, height, itv->cxhdl.audio_properties)) {
IVTV_DEBUG_WARN("Couldn't initialize decoder source\n");
}
/* Decoder sometimes dies here, so wait a moment */
ivtv_msleep_timeout(10, 0);
/* Known failure point for firmware, so check */
return ivtv_firmware_check(itv, "ivtv_setup_v4l2_decode_stream");
}
int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset)
{
struct ivtv *itv = s->itv;
int rc;
if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
if (test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags))
return 0; /* already started */
IVTV_DEBUG_INFO("Starting decode stream %s (gop_offset %d)\n", s->name, gop_offset);
rc = ivtv_setup_v4l2_decode_stream(s);
if (rc < 0) {
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
return rc;
}
/* set dma size to 65536 bytes */
ivtv_vapi(itv, CX2341X_DEC_SET_DMA_BLOCK_SIZE, 1, 65536);
/* Clear Streamoff */
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
/* Zero out decoder counters */
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[0]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[1]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[2]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[3]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[0]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[1]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[2]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[3]);
/* turn on notification of dual/stereo mode change */
ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_DEC_AUD_MODE_CHG, -1);
/* start playback */
ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, gop_offset, 0);
/* Let things settle before we actually start */
ivtv_msleep_timeout(10, 0);
/* Clear the following Interrupt mask bits for decoding */
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_DECODE);
IVTV_DEBUG_IRQ("IRQ Mask is now: 0x%08x\n", itv->irqmask);
/* you're live! sit back and await interrupts :) */
atomic_inc(&itv->decoding);
return 0;
}
void ivtv_stop_all_captures(struct ivtv *itv)
{
int i;
for (i = IVTV_MAX_STREAMS - 1; i >= 0; i--) {
struct ivtv_stream *s = &itv->streams[i];
if (s->vdev.v4l2_dev == NULL)
continue;
if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
ivtv_stop_v4l2_encode_stream(s, 0);
}
}
}
int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
{
struct ivtv *itv = s->itv;
DECLARE_WAITQUEUE(wait, current);
int cap_type;
int stopmode;
if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
/* This function assumes that you are allowed to stop the capture
and that we are actually capturing */
IVTV_DEBUG_INFO("Stop Capture\n");
if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
return 0;
if (atomic_read(&itv->capturing) == 0)
return 0;
switch (s->type) {
case IVTV_ENC_STREAM_TYPE_YUV:
cap_type = 1;
break;
case IVTV_ENC_STREAM_TYPE_PCM:
cap_type = 1;
break;
case IVTV_ENC_STREAM_TYPE_VBI:
cap_type = 1;
break;
case IVTV_ENC_STREAM_TYPE_MPG:
default:
cap_type = 0;
break;
}
/* Stop Capture Mode */
if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
stopmode = 0;
} else {
stopmode = 1;
}
/* end_capture */
/* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);
if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
/* only run these if we're shutting down the last cap */
unsigned long duration;
unsigned long then = jiffies;
add_wait_queue(&itv->eos_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/* wait 2s for EOS interrupt */
while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
time_before(jiffies,
then + msecs_to_jiffies(2000))) {
schedule_timeout(msecs_to_jiffies(10));
}
/* To convert jiffies to ms, we must multiply by 1000
* and divide by HZ. To avoid runtime division, we
* convert this to multiplication by 1000/HZ.
* Since integer division truncates, we get the best
* accuracy if we do a rounding calculation of the constant.
* Think of the case where HZ is 1024.
*/
duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);
if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
} else {
IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&itv->eos_waitq, &wait);
set_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
}
/* Handle any pending interrupts */
ivtv_msleep_timeout(100, 0);
}
atomic_dec(&itv->capturing);
/* Clear capture and no-read bits */
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
if (atomic_read(&itv->capturing) > 0) {
return 0;
}
cx2341x_handler_set_busy(&itv->cxhdl, 0);
/* Set the following Interrupt mask bits for capture */
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
del_timer(&itv->dma_timer);
/* event notification (off) */
if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
/* type: 0 = refresh */
/* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
/* Raw-passthrough is implied on start. Make sure it's stopped so
the encoder will re-initialize when next started */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);
wake_up(&s->waitq);
return 0;
}
EXPORT_SYMBOL(ivtv_stop_v4l2_encode_stream);
int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
{
static const struct v4l2_event ev = {
.type = V4L2_EVENT_EOS,
};
struct ivtv *itv = s->itv;
if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
if (s->type != IVTV_DEC_STREAM_TYPE_YUV && s->type != IVTV_DEC_STREAM_TYPE_MPG)
return -EINVAL;
if (!test_bit(IVTV_F_S_STREAMING, &s->s_flags))
return 0;
IVTV_DEBUG_INFO("Stop Decode at %llu, flags: %x\n", (unsigned long long)pts, flags);
/* Stop Decoder */
if (!(flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) || pts) {
u32 tmp = 0;
/* Wait until the decoder is no longer running */
if (pts) {
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3,
0, (u32)(pts & 0xffffffff), (u32)(pts >> 32));
}
while (1) {
u32 data[CX2341X_MBOX_MAX_DATA];
ivtv_vapi_result(itv, data, CX2341X_DEC_GET_XFER_INFO, 0);
if (s->q_full.buffers + s->q_dma.buffers == 0) {
if (tmp == data[3])
break;
tmp = data[3];
}
if (ivtv_msleep_timeout(100, 1))
break;
}
}
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, flags & V4L2_DEC_CMD_STOP_TO_BLACK, 0, 0);
/* turn off notification of dual/stereo mode change */
ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_DEC_AUD_MODE_CHG, -1);
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE);
del_timer(&itv->dma_timer);
clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
ivtv_flush_queues(s);
/* decoder needs time to settle */
ivtv_msleep_timeout(40, 0);
/* decrement decoding */
atomic_dec(&itv->decoding);
set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
wake_up(&itv->event_waitq);
v4l2_event_queue(&s->vdev, &ev);
/* wake up wait queues */
wake_up(&s->waitq);
return 0;
}
int ivtv_passthrough_mode(struct ivtv *itv, int enable)
{
struct ivtv_stream *yuv_stream = &itv->streams[IVTV_ENC_STREAM_TYPE_YUV];
struct ivtv_stream *dec_stream = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
if (yuv_stream->vdev.v4l2_dev == NULL || dec_stream->vdev.v4l2_dev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("ivtv ioctl: Select passthrough mode\n");
/* Prevent others from starting/stopping streams while we
initiate/terminate passthrough mode */
if (enable) {
if (itv->output_mode == OUT_PASSTHROUGH) {
return 0;
}
if (ivtv_set_output_mode(itv, OUT_PASSTHROUGH) != OUT_PASSTHROUGH)
return -EBUSY;
/* Fully initialize stream, and then unflag init */
set_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
set_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
/* Setup YUV Decoder */
ivtv_setup_v4l2_decode_stream(dec_stream);
/* Start Decoder */
ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1);
atomic_inc(&itv->decoding);
/* Setup capture if not already done */
if (atomic_read(&itv->capturing) == 0) {
cx2341x_handler_setup(&itv->cxhdl);
cx2341x_handler_set_busy(&itv->cxhdl, 1);
}
/* Start Passthrough Mode */
ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, 2, 11);
atomic_inc(&itv->capturing);
return 0;
}
if (itv->output_mode != OUT_PASSTHROUGH)
return 0;
/* Stop Passthrough Mode */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 11);
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, 1, 0, 0);
atomic_dec(&itv->capturing);
atomic_dec(&itv->decoding);
clear_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
clear_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
itv->output_mode = OUT_NONE;
if (atomic_read(&itv->capturing) == 0)
cx2341x_handler_set_busy(&itv->cxhdl, 0);
return 0;
}
| linux-master | drivers/media/pci/ivtv/ivtv-streams.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
yuv support
Copyright (C) 2007 Ian Armstrong <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-udma.h"
#include "ivtv-yuv.h"
/* YUV buffer offsets */
const u32 yuv_offset[IVTV_YUV_BUFFERS] = {
0x001a8600,
0x00240400,
0x002d8200,
0x00370000,
0x00029000,
0x000C0E00,
0x006B0400,
0x00748200
};
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
struct ivtv_dma_frame *args)
{
struct ivtv_dma_page_info y_dma;
struct ivtv_dma_page_info uv_dma;
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
struct yuv_frame_info *f = &yi->new_frame_info[frame];
int y_pages, uv_pages;
unsigned long y_buffer_offset, uv_buffer_offset;
int y_decode_height, uv_decode_height, y_size;
y_buffer_offset = IVTV_DECODER_OFFSET + yuv_offset[frame];
uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;
y_decode_height = uv_decode_height = f->src_h + f->src_y;
if (f->offset_y)
y_buffer_offset += 720 * 16;
if (y_decode_height & 15)
y_decode_height = (y_decode_height + 16) & ~15;
if (uv_decode_height & 31)
uv_decode_height = (uv_decode_height + 32) & ~31;
y_size = 720 * y_decode_height;
/* Still in USE */
if (dma->SG_length || dma->page_count) {
IVTV_DEBUG_WARN
("prep_user_dma: SG_length %d page_count %d still full?\n",
dma->SG_length, dma->page_count);
return -EBUSY;
}
ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
/* Pin user pages for DMA Xfer */
y_pages = pin_user_pages_unlocked(y_dma.uaddr,
y_dma.page_count, &dma->map[0], 0);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = pin_user_pages_unlocked(uv_dma.uaddr,
uv_dma.page_count, &dma->map[y_pages], 0);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
int rc = -EFAULT;
if (y_pages == y_dma.page_count) {
IVTV_DEBUG_WARN
("failed to map uv user pages, returned %d expecting %d\n",
uv_pages, uv_dma.page_count);
if (uv_pages >= 0) {
unpin_user_pages(&dma->map[y_pages], uv_pages);
rc = -EFAULT;
} else {
rc = uv_pages;
}
} else {
IVTV_DEBUG_WARN
("failed to map y user pages, returned %d expecting %d\n",
y_pages, y_dma.page_count);
}
if (y_pages >= 0) {
unpin_user_pages(dma->map, y_pages);
/*
* Inherit the -EFAULT from rc's
* initialization, but allow it to be
* overridden by uv_pages above if it was an
* actual errno.
*/
} else {
rc = y_pages;
}
return rc;
}
dma->page_count = y_pages + uv_pages;
/* Fill & map SG List */
if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
unpin_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
return -ENOMEM;
}
dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
/* If we've offset the y plane, ensure top area is blanked */
if (f->offset_y && yi->blanking_dmaptr) {
dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
dma->SG_length++;
}
/* Tag SG Array with Interrupt Bit */
dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
ivtv_udma_sync_for_device(itv);
return 0;
}
/* We rely on a table held in the firmware - Quick check. */
int ivtv_yuv_filter_check(struct ivtv *itv)
{
int i, y, uv;
for (i = 0, y = 16, uv = 4; i < 16; i++, y += 24, uv += 12) {
if ((read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + y) != i << 16) ||
(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + uv) != i << 16)) {
IVTV_WARN ("YUV filter table not found in firmware.\n");
return -1;
}
}
return 0;
}
static void ivtv_yuv_filter(struct ivtv *itv, int h_filter, int v_filter_1, int v_filter_2)
{
u32 i, line;
/* If any filter is -1, then don't update it */
if (h_filter > -1) {
if (h_filter > 4)
h_filter = 4;
i = IVTV_YUV_HORIZONTAL_FILTER_OFFSET + (h_filter * 384);
for (line = 0; line < 16; line++) {
write_reg(read_dec(i), 0x02804);
write_reg(read_dec(i), 0x0281c);
i += 4;
write_reg(read_dec(i), 0x02808);
write_reg(read_dec(i), 0x02820);
i += 4;
write_reg(read_dec(i), 0x0280c);
write_reg(read_dec(i), 0x02824);
i += 4;
write_reg(read_dec(i), 0x02810);
write_reg(read_dec(i), 0x02828);
i += 4;
write_reg(read_dec(i), 0x02814);
write_reg(read_dec(i), 0x0282c);
i += 8;
write_reg(0, 0x02818);
write_reg(0, 0x02830);
}
IVTV_DEBUG_YUV("h_filter -> %d\n", h_filter);
}
if (v_filter_1 > -1) {
if (v_filter_1 > 4)
v_filter_1 = 4;
i = IVTV_YUV_VERTICAL_FILTER_OFFSET + (v_filter_1 * 192);
for (line = 0; line < 16; line++) {
write_reg(read_dec(i), 0x02900);
i += 4;
write_reg(read_dec(i), 0x02904);
i += 8;
write_reg(0, 0x02908);
}
IVTV_DEBUG_YUV("v_filter_1 -> %d\n", v_filter_1);
}
if (v_filter_2 > -1) {
if (v_filter_2 > 4)
v_filter_2 = 4;
i = IVTV_YUV_VERTICAL_FILTER_OFFSET + (v_filter_2 * 192);
for (line = 0; line < 16; line++) {
write_reg(read_dec(i), 0x0290c);
i += 4;
write_reg(read_dec(i), 0x02910);
i += 8;
write_reg(0, 0x02914);
}
IVTV_DEBUG_YUV("v_filter_2 -> %d\n", v_filter_2);
}
}
static void ivtv_yuv_handle_horizontal(struct ivtv *itv, struct yuv_frame_info *f)
{
struct yuv_playback_info *yi = &itv->yuv_info;
u32 reg_2834, reg_2838, reg_283c;
u32 reg_2844, reg_2854, reg_285c;
u32 reg_2864, reg_2874, reg_2890;
u32 reg_2870, reg_2870_base, reg_2870_offset;
int x_cutoff;
int h_filter;
u32 master_width;
IVTV_DEBUG_WARN
("Adjust to width %d src_w %d dst_w %d src_x %d dst_x %d\n",
f->tru_w, f->src_w, f->dst_w, f->src_x, f->dst_x);
/* How wide is the src image */
x_cutoff = f->src_w + f->src_x;
/* Set the display width */
reg_2834 = f->dst_w;
reg_2838 = reg_2834;
/* Set the display position */
reg_2890 = f->dst_x;
/* Index into the image horizontally */
reg_2870 = 0;
/* 2870 is normally fudged to align video coords with osd coords.
If running full screen, it causes an unwanted left shift
Remove the fudge if we almost fill the screen.
Gradually adjust the offset to avoid the video 'snapping'
left/right if it gets dragged through this region.
Only do this if osd is full width. */
if (f->vis_w == 720) {
if ((f->tru_x - f->pan_x > -1) && (f->tru_x - f->pan_x <= 40) && (f->dst_w >= 680))
reg_2870 = 10 - (f->tru_x - f->pan_x) / 4;
else if ((f->tru_x - f->pan_x < 0) && (f->tru_x - f->pan_x >= -20) && (f->dst_w >= 660))
reg_2870 = (10 + (f->tru_x - f->pan_x) / 2);
if (f->dst_w >= f->src_w)
reg_2870 = reg_2870 << 16 | reg_2870;
else
reg_2870 = ((reg_2870 & ~1) << 15) | (reg_2870 & ~1);
}
if (f->dst_w < f->src_w)
reg_2870 = 0x000d000e - reg_2870;
else
reg_2870 = 0x0012000e - reg_2870;
/* We're also using 2870 to shift the image left (src_x & negative dst_x) */
reg_2870_offset = (f->src_x * ((f->dst_w << 21) / f->src_w)) >> 19;
if (f->dst_w >= f->src_w) {
x_cutoff &= ~1;
master_width = (f->src_w * 0x00200000) / (f->dst_w);
if (master_width * f->dst_w != f->src_w * 0x00200000)
master_width++;
reg_2834 = (reg_2834 << 16) | x_cutoff;
reg_2838 = (reg_2838 << 16) | x_cutoff;
reg_283c = master_width >> 2;
reg_2844 = master_width >> 2;
reg_2854 = master_width;
reg_285c = master_width >> 1;
reg_2864 = master_width >> 1;
/* We also need to factor in the scaling
(src_w - dst_w) / (src_w / 4) */
if (f->dst_w > f->src_w)
reg_2870_base = ((f->dst_w - f->src_w)<<16) / (f->src_w <<14);
else
reg_2870_base = 0;
reg_2870 += (((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 2) + (reg_2870_base << 17 | reg_2870_base);
reg_2874 = 0;
} else if (f->dst_w < f->src_w / 2) {
master_width = (f->src_w * 0x00080000) / f->dst_w;
if (master_width * f->dst_w != f->src_w * 0x00080000)
master_width++;
reg_2834 = (reg_2834 << 16) | x_cutoff;
reg_2838 = (reg_2838 << 16) | x_cutoff;
reg_283c = master_width >> 2;
reg_2844 = master_width >> 1;
reg_2854 = master_width;
reg_285c = master_width >> 1;
reg_2864 = master_width >> 1;
reg_2870 += ((reg_2870_offset << 15) & 0xFFFF0000) | reg_2870_offset;
reg_2870 += (5 - (((f->src_w + f->src_w / 2) - 1) / f->dst_w)) << 16;
reg_2874 = 0x00000012;
} else {
master_width = (f->src_w * 0x00100000) / f->dst_w;
if (master_width * f->dst_w != f->src_w * 0x00100000)
master_width++;
reg_2834 = (reg_2834 << 16) | x_cutoff;
reg_2838 = (reg_2838 << 16) | x_cutoff;
reg_283c = master_width >> 2;
reg_2844 = master_width >> 1;
reg_2854 = master_width;
reg_285c = master_width >> 1;
reg_2864 = master_width >> 1;
reg_2870 += ((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 1;
reg_2870 += (5 - (((f->src_w * 3) - 1) / f->dst_w)) << 16;
reg_2874 = 0x00000001;
}
/* Select the horizontal filter */
if (f->src_w == f->dst_w) {
/* An exact size match uses filter 0 */
h_filter = 0;
} else {
/* Figure out which filter to use */
h_filter = ((f->src_w << 16) / f->dst_w) >> 15;
h_filter = (h_filter >> 1) + (h_filter & 1);
/* Only an exact size match can use filter 0 */
h_filter += !h_filter;
}
write_reg(reg_2834, 0x02834);
write_reg(reg_2838, 0x02838);
IVTV_DEBUG_YUV("Update reg 0x2834 %08x->%08x 0x2838 %08x->%08x\n",
yi->reg_2834, reg_2834, yi->reg_2838, reg_2838);
write_reg(reg_283c, 0x0283c);
write_reg(reg_2844, 0x02844);
IVTV_DEBUG_YUV("Update reg 0x283c %08x->%08x 0x2844 %08x->%08x\n",
yi->reg_283c, reg_283c, yi->reg_2844, reg_2844);
write_reg(0x00080514, 0x02840);
write_reg(0x00100514, 0x02848);
IVTV_DEBUG_YUV("Update reg 0x2840 %08x->%08x 0x2848 %08x->%08x\n",
yi->reg_2840, 0x00080514, yi->reg_2848, 0x00100514);
write_reg(reg_2854, 0x02854);
IVTV_DEBUG_YUV("Update reg 0x2854 %08x->%08x \n",
yi->reg_2854, reg_2854);
write_reg(reg_285c, 0x0285c);
write_reg(reg_2864, 0x02864);
IVTV_DEBUG_YUV("Update reg 0x285c %08x->%08x 0x2864 %08x->%08x\n",
yi->reg_285c, reg_285c, yi->reg_2864, reg_2864);
write_reg(reg_2874, 0x02874);
IVTV_DEBUG_YUV("Update reg 0x2874 %08x->%08x\n",
yi->reg_2874, reg_2874);
write_reg(reg_2870, 0x02870);
IVTV_DEBUG_YUV("Update reg 0x2870 %08x->%08x\n",
yi->reg_2870, reg_2870);
write_reg(reg_2890, 0x02890);
IVTV_DEBUG_YUV("Update reg 0x2890 %08x->%08x\n",
yi->reg_2890, reg_2890);
/* Only update the filter if we really need to */
if (h_filter != yi->h_filter) {
ivtv_yuv_filter(itv, h_filter, -1, -1);
yi->h_filter = h_filter;
}
}
static void ivtv_yuv_handle_vertical(struct ivtv *itv, struct yuv_frame_info *f)
{
struct yuv_playback_info *yi = &itv->yuv_info;
u32 master_height;
u32 reg_2918, reg_291c, reg_2920, reg_2928;
u32 reg_2930, reg_2934, reg_293c;
u32 reg_2940, reg_2944, reg_294c;
u32 reg_2950, reg_2954, reg_2958, reg_295c;
u32 reg_2960, reg_2964, reg_2968, reg_296c;
u32 reg_289c;
u32 src_major_y, src_minor_y;
u32 src_major_uv, src_minor_uv;
u32 reg_2964_base, reg_2968_base;
int v_filter_1, v_filter_2;
IVTV_DEBUG_WARN
("Adjust to height %d src_h %d dst_h %d src_y %d dst_y %d\n",
f->tru_h, f->src_h, f->dst_h, f->src_y, f->dst_y);
/* What scaling mode is being used... */
IVTV_DEBUG_YUV("Scaling mode Y: %s\n",
f->interlaced_y ? "Interlaced" : "Progressive");
IVTV_DEBUG_YUV("Scaling mode UV: %s\n",
f->interlaced_uv ? "Interlaced" : "Progressive");
/* What is the source video being treated as... */
IVTV_DEBUG_WARN("Source video: %s\n",
f->interlaced ? "Interlaced" : "Progressive");
/* We offset into the image using two different index methods, so split
the y source coord into two parts. */
if (f->src_y < 8) {
src_minor_uv = f->src_y;
src_major_uv = 0;
} else {
src_minor_uv = 8;
src_major_uv = f->src_y - 8;
}
src_minor_y = src_minor_uv;
src_major_y = src_major_uv;
if (f->offset_y)
src_minor_y += 16;
if (f->interlaced_y)
reg_2918 = (f->dst_h << 16) | (f->src_h + src_minor_y);
else
reg_2918 = (f->dst_h << 16) | ((f->src_h + src_minor_y) << 1);
if (f->interlaced_uv)
reg_291c = (f->dst_h << 16) | ((f->src_h + src_minor_uv) >> 1);
else
reg_291c = (f->dst_h << 16) | (f->src_h + src_minor_uv);
reg_2964_base = (src_minor_y * ((f->dst_h << 16) / f->src_h)) >> 14;
reg_2968_base = (src_minor_uv * ((f->dst_h << 16) / f->src_h)) >> 14;
if (f->dst_h / 2 >= f->src_h && !f->interlaced_y) {
master_height = (f->src_h * 0x00400000) / f->dst_h;
if ((f->src_h * 0x00400000) - (master_height * f->dst_h) >= f->dst_h / 2)
master_height++;
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 3;
reg_2930 = master_height;
reg_2940 = master_height >> 1;
reg_2964_base >>= 3;
reg_2968_base >>= 3;
reg_296c = 0x00000000;
} else if (f->dst_h >= f->src_h) {
master_height = (f->src_h * 0x00400000) / f->dst_h;
master_height = (master_height >> 1) + (master_height & 1);
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 2;
reg_2930 = master_height;
reg_2940 = master_height >> 1;
reg_296c = 0x00000000;
if (f->interlaced_y) {
reg_2964_base >>= 3;
} else {
reg_296c++;
reg_2964_base >>= 2;
}
if (f->interlaced_uv)
reg_2928 >>= 1;
reg_2968_base >>= 3;
} else if (f->dst_h >= f->src_h / 2) {
master_height = (f->src_h * 0x00200000) / f->dst_h;
master_height = (master_height >> 1) + (master_height & 1);
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 2;
reg_2930 = master_height;
reg_2940 = master_height;
reg_296c = 0x00000101;
if (f->interlaced_y) {
reg_2964_base >>= 2;
} else {
reg_296c++;
reg_2964_base >>= 1;
}
if (f->interlaced_uv)
reg_2928 >>= 1;
reg_2968_base >>= 2;
} else {
master_height = (f->src_h * 0x00100000) / f->dst_h;
master_height = (master_height >> 1) + (master_height & 1);
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 2;
reg_2930 = master_height;
reg_2940 = master_height;
reg_2964_base >>= 1;
reg_2968_base >>= 2;
reg_296c = 0x00000102;
}
/* FIXME These registers change depending on scaled / unscaled output
We really need to work out what they should be */
if (f->src_h == f->dst_h) {
reg_2934 = 0x00020000;
reg_293c = 0x00100000;
reg_2944 = 0x00040000;
reg_294c = 0x000b0000;
} else {
reg_2934 = 0x00000FF0;
reg_293c = 0x00000FF0;
reg_2944 = 0x00000FF0;
reg_294c = 0x00000FF0;
}
/* The first line to be displayed */
reg_2950 = 0x00010000 + src_major_y;
if (f->interlaced_y)
reg_2950 += 0x00010000;
reg_2954 = reg_2950 + 1;
reg_2958 = 0x00010000 + (src_major_y >> 1);
if (f->interlaced_uv)
reg_2958 += 0x00010000;
reg_295c = reg_2958 + 1;
if (yi->decode_height == 480)
reg_289c = 0x011e0017;
else
reg_289c = 0x01500017;
if (f->dst_y < 0)
reg_289c = (reg_289c - ((f->dst_y & ~1)<<15))-(f->dst_y >>1);
else
reg_289c = (reg_289c + ((f->dst_y & ~1)<<15))+(f->dst_y >>1);
/* How much of the source to decode.
Take into account the source offset */
reg_2960 = ((src_minor_y + f->src_h + src_major_y) - 1) |
(((src_minor_uv + f->src_h + src_major_uv - 1) & ~1) << 15);
/* Calculate correct value for register 2964 */
if (f->src_h == f->dst_h) {
reg_2964 = 1;
} else {
reg_2964 = 2 + ((f->dst_h << 1) / f->src_h);
reg_2964 = (reg_2964 >> 1) + (reg_2964 & 1);
}
reg_2968 = (reg_2964 << 16) + reg_2964 + (reg_2964 >> 1);
reg_2964 = (reg_2964 << 16) + reg_2964 + (reg_2964 * 46 / 94);
/* Okay, we've wasted time working out the correct value,
but if we use it, it fouls the window alignment.
Fudge it to what we want... */
reg_2964 = 0x00010001 + ((reg_2964 & 0x0000FFFF) - (reg_2964 >> 16));
reg_2968 = 0x00010001 + ((reg_2968 & 0x0000FFFF) - (reg_2968 >> 16));
/* Deviate further from what it should be. I find the flicker headache
inducing so try to reduce it slightly. Leave 2968 as-is otherwise
colours foul. */
if ((reg_2964 != 0x00010001) && (f->dst_h / 2 <= f->src_h))
reg_2964 = (reg_2964 & 0xFFFF0000) + ((reg_2964 & 0x0000FFFF) / 2);
if (!f->interlaced_y)
reg_2964 -= 0x00010001;
if (!f->interlaced_uv)
reg_2968 -= 0x00010001;
reg_2964 += ((reg_2964_base << 16) | reg_2964_base);
reg_2968 += ((reg_2968_base << 16) | reg_2968_base);
/* Select the vertical filter */
if (f->src_h == f->dst_h) {
/* An exact size match uses filter 0/1 */
v_filter_1 = 0;
v_filter_2 = 1;
} else {
/* Figure out which filter to use */
v_filter_1 = ((f->src_h << 16) / f->dst_h) >> 15;
v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1);
/* Only an exact size match can use filter 0 */
v_filter_1 += !v_filter_1;
v_filter_2 = v_filter_1;
}
write_reg(reg_2934, 0x02934);
write_reg(reg_293c, 0x0293c);
IVTV_DEBUG_YUV("Update reg 0x2934 %08x->%08x 0x293c %08x->%08x\n",
yi->reg_2934, reg_2934, yi->reg_293c, reg_293c);
write_reg(reg_2944, 0x02944);
write_reg(reg_294c, 0x0294c);
IVTV_DEBUG_YUV("Update reg 0x2944 %08x->%08x 0x294c %08x->%08x\n",
yi->reg_2944, reg_2944, yi->reg_294c, reg_294c);
/* Ensure 2970 is 0 (does it ever change ?) */
/* write_reg(0,0x02970); */
/* IVTV_DEBUG_YUV("Update reg 0x2970 %08x->%08x\n", yi->reg_2970, 0); */
write_reg(reg_2930, 0x02938);
write_reg(reg_2930, 0x02930);
IVTV_DEBUG_YUV("Update reg 0x2930 %08x->%08x 0x2938 %08x->%08x\n",
yi->reg_2930, reg_2930, yi->reg_2938, reg_2930);
write_reg(reg_2928, 0x02928);
write_reg(reg_2928 + 0x514, 0x0292C);
IVTV_DEBUG_YUV("Update reg 0x2928 %08x->%08x 0x292c %08x->%08x\n",
yi->reg_2928, reg_2928, yi->reg_292c, reg_2928 + 0x514);
write_reg(reg_2920, 0x02920);
write_reg(reg_2920 + 0x514, 0x02924);
IVTV_DEBUG_YUV("Update reg 0x2920 %08x->%08x 0x2924 %08x->%08x\n",
yi->reg_2920, reg_2920, yi->reg_2924, reg_2920 + 0x514);
write_reg(reg_2918, 0x02918);
write_reg(reg_291c, 0x0291C);
IVTV_DEBUG_YUV("Update reg 0x2918 %08x->%08x 0x291C %08x->%08x\n",
yi->reg_2918, reg_2918, yi->reg_291c, reg_291c);
write_reg(reg_296c, 0x0296c);
IVTV_DEBUG_YUV("Update reg 0x296c %08x->%08x\n",
yi->reg_296c, reg_296c);
write_reg(reg_2940, 0x02948);
write_reg(reg_2940, 0x02940);
IVTV_DEBUG_YUV("Update reg 0x2940 %08x->%08x 0x2948 %08x->%08x\n",
yi->reg_2940, reg_2940, yi->reg_2948, reg_2940);
write_reg(reg_2950, 0x02950);
write_reg(reg_2954, 0x02954);
IVTV_DEBUG_YUV("Update reg 0x2950 %08x->%08x 0x2954 %08x->%08x\n",
yi->reg_2950, reg_2950, yi->reg_2954, reg_2954);
write_reg(reg_2958, 0x02958);
write_reg(reg_295c, 0x0295C);
IVTV_DEBUG_YUV("Update reg 0x2958 %08x->%08x 0x295C %08x->%08x\n",
yi->reg_2958, reg_2958, yi->reg_295c, reg_295c);
write_reg(reg_2960, 0x02960);
IVTV_DEBUG_YUV("Update reg 0x2960 %08x->%08x \n",
yi->reg_2960, reg_2960);
write_reg(reg_2964, 0x02964);
write_reg(reg_2968, 0x02968);
IVTV_DEBUG_YUV("Update reg 0x2964 %08x->%08x 0x2968 %08x->%08x\n",
yi->reg_2964, reg_2964, yi->reg_2968, reg_2968);
write_reg(reg_289c, 0x0289c);
IVTV_DEBUG_YUV("Update reg 0x289c %08x->%08x\n",
yi->reg_289c, reg_289c);
/* Only update filter 1 if we really need to */
if (v_filter_1 != yi->v_filter_1) {
ivtv_yuv_filter(itv, -1, v_filter_1, -1);
yi->v_filter_1 = v_filter_1;
}
/* Only update filter 2 if we really need to */
if (v_filter_2 != yi->v_filter_2) {
ivtv_yuv_filter(itv, -1, -1, v_filter_2);
yi->v_filter_2 = v_filter_2;
}
}
/* Modify the supplied coordinate information to fit the visible osd area */
static u32 ivtv_yuv_window_setup(struct ivtv *itv, struct yuv_frame_info *f)
{
struct yuv_frame_info *of = &itv->yuv_info.old_frame_info;
int osd_crop;
u32 osd_scale;
u32 yuv_update = 0;
/* Sorry, but no negative coords for src */
if (f->src_x < 0)
f->src_x = 0;
if (f->src_y < 0)
f->src_y = 0;
/* Can only reduce width down to 1/4 original size */
if ((osd_crop = f->src_w - 4 * f->dst_w) > 0) {
f->src_x += osd_crop / 2;
f->src_w = (f->src_w - osd_crop) & ~3;
f->dst_w = f->src_w / 4;
f->dst_w += f->dst_w & 1;
}
/* Can only reduce height down to 1/4 original size */
if (f->src_h / f->dst_h >= 2) {
/* Overflow may be because we're running progressive,
so force mode switch */
f->interlaced_y = 1;
/* Make sure we're still within limits for interlace */
if ((osd_crop = f->src_h - 4 * f->dst_h) > 0) {
/* If we reach here we'll have to force the height. */
f->src_y += osd_crop / 2;
f->src_h = (f->src_h - osd_crop) & ~3;
f->dst_h = f->src_h / 4;
f->dst_h += f->dst_h & 1;
}
}
/* If there's nothing to safe to display, we may as well stop now */
if ((int)f->dst_w <= 2 || (int)f->dst_h <= 2 ||
(int)f->src_w <= 2 || (int)f->src_h <= 2) {
return IVTV_YUV_UPDATE_INVALID;
}
/* Ensure video remains inside OSD area */
osd_scale = (f->src_h << 16) / f->dst_h;
if ((osd_crop = f->pan_y - f->dst_y) > 0) {
/* Falls off the upper edge - crop */
f->src_y += (osd_scale * osd_crop) >> 16;
f->src_h -= (osd_scale * osd_crop) >> 16;
f->dst_h -= osd_crop;
f->dst_y = 0;
} else {
f->dst_y -= f->pan_y;
}
if ((osd_crop = f->dst_h + f->dst_y - f->vis_h) > 0) {
/* Falls off the lower edge - crop */
f->dst_h -= osd_crop;
f->src_h -= (osd_scale * osd_crop) >> 16;
}
osd_scale = (f->src_w << 16) / f->dst_w;
if ((osd_crop = f->pan_x - f->dst_x) > 0) {
/* Fall off the left edge - crop */
f->src_x += (osd_scale * osd_crop) >> 16;
f->src_w -= (osd_scale * osd_crop) >> 16;
f->dst_w -= osd_crop;
f->dst_x = 0;
} else {
f->dst_x -= f->pan_x;
}
if ((osd_crop = f->dst_w + f->dst_x - f->vis_w) > 0) {
/* Falls off the right edge - crop */
f->dst_w -= osd_crop;
f->src_w -= (osd_scale * osd_crop) >> 16;
}
if (itv->yuv_info.track_osd) {
/* The OSD can be moved. Track to it */
f->dst_x += itv->yuv_info.osd_x_offset;
f->dst_y += itv->yuv_info.osd_y_offset;
}
/* Width & height for both src & dst must be even.
Same for coordinates. */
f->dst_w &= ~1;
f->dst_x &= ~1;
f->src_w += f->src_x & 1;
f->src_x &= ~1;
f->src_w &= ~1;
f->dst_w &= ~1;
f->dst_h &= ~1;
f->dst_y &= ~1;
f->src_h += f->src_y & 1;
f->src_y &= ~1;
f->src_h &= ~1;
f->dst_h &= ~1;
/* Due to rounding, we may have reduced the output size to <1/4 of
the source. Check again, but this time just resize. Don't change
source coordinates */
if (f->dst_w < f->src_w / 4) {
f->src_w &= ~3;
f->dst_w = f->src_w / 4;
f->dst_w += f->dst_w & 1;
}
if (f->dst_h < f->src_h / 4) {
f->src_h &= ~3;
f->dst_h = f->src_h / 4;
f->dst_h += f->dst_h & 1;
}
/* Check again. If there's nothing to safe to display, stop now */
if ((int)f->dst_w <= 2 || (int)f->dst_h <= 2 ||
(int)f->src_w <= 2 || (int)f->src_h <= 2) {
return IVTV_YUV_UPDATE_INVALID;
}
/* Both x offset & width are linked, so they have to be done together */
if ((of->dst_w != f->dst_w) || (of->src_w != f->src_w) ||
(of->dst_x != f->dst_x) || (of->src_x != f->src_x) ||
(of->pan_x != f->pan_x) || (of->vis_w != f->vis_w)) {
yuv_update |= IVTV_YUV_UPDATE_HORIZONTAL;
}
if ((of->src_h != f->src_h) || (of->dst_h != f->dst_h) ||
(of->dst_y != f->dst_y) || (of->src_y != f->src_y) ||
(of->pan_y != f->pan_y) || (of->vis_h != f->vis_h) ||
(of->lace_mode != f->lace_mode) ||
(of->interlaced_y != f->interlaced_y) ||
(of->interlaced_uv != f->interlaced_uv)) {
yuv_update |= IVTV_YUV_UPDATE_VERTICAL;
}
return yuv_update;
}
/* Update the scaling register to the requested value */
void ivtv_yuv_work_handler(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct yuv_frame_info f;
int frame = yi->update_frame;
u32 yuv_update;
IVTV_DEBUG_YUV("Update yuv registers for frame %d\n", frame);
f = yi->new_frame_info[frame];
if (yi->track_osd) {
/* Snapshot the osd pan info */
f.pan_x = yi->osd_x_pan;
f.pan_y = yi->osd_y_pan;
f.vis_w = yi->osd_vis_w;
f.vis_h = yi->osd_vis_h;
} else {
/* Not tracking the osd, so assume full screen */
f.pan_x = 0;
f.pan_y = 0;
f.vis_w = 720;
f.vis_h = yi->decode_height;
}
/* Calculate the display window coordinates. Exit if nothing left */
if (!(yuv_update = ivtv_yuv_window_setup(itv, &f)))
return;
if (yuv_update & IVTV_YUV_UPDATE_INVALID) {
write_reg(0x01008080, 0x2898);
} else if (yuv_update) {
write_reg(0x00108080, 0x2898);
if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL)
ivtv_yuv_handle_horizontal(itv, &f);
if (yuv_update & IVTV_YUV_UPDATE_VERTICAL)
ivtv_yuv_handle_vertical(itv, &f);
}
yi->old_frame_info = f;
}
static void ivtv_yuv_init(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
IVTV_DEBUG_YUV("ivtv_yuv_init\n");
/* Take a snapshot of the current register settings */
yi->reg_2834 = read_reg(0x02834);
yi->reg_2838 = read_reg(0x02838);
yi->reg_283c = read_reg(0x0283c);
yi->reg_2840 = read_reg(0x02840);
yi->reg_2844 = read_reg(0x02844);
yi->reg_2848 = read_reg(0x02848);
yi->reg_2854 = read_reg(0x02854);
yi->reg_285c = read_reg(0x0285c);
yi->reg_2864 = read_reg(0x02864);
yi->reg_2870 = read_reg(0x02870);
yi->reg_2874 = read_reg(0x02874);
yi->reg_2898 = read_reg(0x02898);
yi->reg_2890 = read_reg(0x02890);
yi->reg_289c = read_reg(0x0289c);
yi->reg_2918 = read_reg(0x02918);
yi->reg_291c = read_reg(0x0291c);
yi->reg_2920 = read_reg(0x02920);
yi->reg_2924 = read_reg(0x02924);
yi->reg_2928 = read_reg(0x02928);
yi->reg_292c = read_reg(0x0292c);
yi->reg_2930 = read_reg(0x02930);
yi->reg_2934 = read_reg(0x02934);
yi->reg_2938 = read_reg(0x02938);
yi->reg_293c = read_reg(0x0293c);
yi->reg_2940 = read_reg(0x02940);
yi->reg_2944 = read_reg(0x02944);
yi->reg_2948 = read_reg(0x02948);
yi->reg_294c = read_reg(0x0294c);
yi->reg_2950 = read_reg(0x02950);
yi->reg_2954 = read_reg(0x02954);
yi->reg_2958 = read_reg(0x02958);
yi->reg_295c = read_reg(0x0295c);
yi->reg_2960 = read_reg(0x02960);
yi->reg_2964 = read_reg(0x02964);
yi->reg_2968 = read_reg(0x02968);
yi->reg_296c = read_reg(0x0296c);
yi->reg_2970 = read_reg(0x02970);
yi->v_filter_1 = -1;
yi->v_filter_2 = -1;
yi->h_filter = -1;
/* Set some valid size info */
yi->osd_x_offset = read_reg(0x02a04) & 0x00000FFF;
yi->osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF;
/* Bit 2 of reg 2878 indicates current decoder output format
0 : NTSC 1 : PAL */
if (read_reg(0x2878) & 4)
yi->decode_height = 576;
else
yi->decode_height = 480;
if (!itv->osd_info) {
yi->osd_vis_w = 720 - yi->osd_x_offset;
yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
} else {
/* If no visible size set, assume full size */
if (!yi->osd_vis_w)
yi->osd_vis_w = 720 - yi->osd_x_offset;
if (!yi->osd_vis_h) {
yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
} else if (yi->osd_vis_h + yi->osd_y_offset > yi->decode_height) {
/* If output video standard has changed, requested height may
not be legal */
IVTV_DEBUG_WARN("Clipping yuv output - fb size (%d) exceeds video standard limit (%d)\n",
yi->osd_vis_h + yi->osd_y_offset,
yi->decode_height);
yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
}
}
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN);
if (yi->blanking_ptr) {
yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
yi->blanking_ptr,
720 * 16, DMA_TO_DEVICE);
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
}
/* Enable YUV decoder output */
write_reg_sync(0x01, IVTV_REG_VDM);
set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
atomic_set(&yi->next_dma_frame, 0);
}
/* Get next available yuv buffer on PVR350 */
static void ivtv_yuv_next_free(struct ivtv *itv)
{
int draw, display;
struct yuv_playback_info *yi = &itv->yuv_info;
if (atomic_read(&yi->next_dma_frame) == -1)
ivtv_yuv_init(itv);
draw = atomic_read(&yi->next_fill_frame);
display = atomic_read(&yi->next_dma_frame);
if (display > draw)
display -= IVTV_YUV_BUFFERS;
if (draw - display >= yi->max_frames_buffered)
draw = (u8)(draw - 1) % IVTV_YUV_BUFFERS;
else
yi->new_frame_info[draw].update = 0;
yi->draw_frame = draw;
}
/* Set up frame according to ivtv_dma_frame parameters */
static void ivtv_yuv_setup_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
u8 last_frame = (u8)(frame - 1) % IVTV_YUV_BUFFERS;
struct yuv_frame_info *nf = &yi->new_frame_info[frame];
struct yuv_frame_info *of = &yi->new_frame_info[last_frame];
int lace_threshold = yi->lace_threshold;
/* Preserve old update flag in case we're overwriting a queued frame */
int update = nf->update;
/* Take a snapshot of the yuv coordinate information */
nf->src_x = args->src.left;
nf->src_y = args->src.top;
nf->src_w = args->src.width;
nf->src_h = args->src.height;
nf->dst_x = args->dst.left;
nf->dst_y = args->dst.top;
nf->dst_w = args->dst.width;
nf->dst_h = args->dst.height;
nf->tru_x = args->dst.left;
nf->tru_w = args->src_width;
nf->tru_h = args->src_height;
/* Are we going to offset the Y plane */
nf->offset_y = (nf->tru_h + nf->src_x < 512 - 16) ? 1 : 0;
nf->update = 0;
nf->interlaced_y = 0;
nf->interlaced_uv = 0;
nf->delay = 0;
nf->sync_field = 0;
nf->lace_mode = yi->lace_mode & IVTV_YUV_MODE_MASK;
if (lace_threshold < 0)
lace_threshold = yi->decode_height - 1;
/* Work out the lace settings */
switch (nf->lace_mode) {
case IVTV_YUV_MODE_PROGRESSIVE: /* Progressive mode */
nf->interlaced = 0;
if (nf->tru_h < 512 || (nf->tru_h > 576 && nf->tru_h < 1021))
nf->interlaced_y = 0;
else
nf->interlaced_y = 1;
if (nf->tru_h < 1021 && (nf->dst_h >= nf->src_h / 2))
nf->interlaced_uv = 0;
else
nf->interlaced_uv = 1;
break;
case IVTV_YUV_MODE_AUTO:
if (nf->tru_h <= lace_threshold || nf->tru_h > 576 || nf->tru_w > 720) {
nf->interlaced = 0;
if ((nf->tru_h < 512) ||
(nf->tru_h > 576 && nf->tru_h < 1021) ||
(nf->tru_w > 720 && nf->tru_h < 1021))
nf->interlaced_y = 0;
else
nf->interlaced_y = 1;
if (nf->tru_h < 1021 && (nf->dst_h >= nf->src_h / 2))
nf->interlaced_uv = 0;
else
nf->interlaced_uv = 1;
} else {
nf->interlaced = 1;
nf->interlaced_y = 1;
nf->interlaced_uv = 1;
}
break;
case IVTV_YUV_MODE_INTERLACED: /* Interlace mode */
default:
nf->interlaced = 1;
nf->interlaced_y = 1;
nf->interlaced_uv = 1;
break;
}
if (memcmp(&yi->old_frame_info_args, nf, sizeof(*nf))) {
yi->old_frame_info_args = *nf;
nf->update = 1;
IVTV_DEBUG_YUV("Requesting reg update for frame %d\n", frame);
}
nf->update |= update;
nf->sync_field = yi->lace_sync_field;
nf->delay = nf->sync_field != of->sync_field;
}
/* Frame is complete & ready for display */
void ivtv_yuv_frame_complete(struct ivtv *itv)
{
atomic_set(&itv->yuv_info.next_fill_frame,
(itv->yuv_info.draw_frame + 1) % IVTV_YUV_BUFFERS);
}
static int ivtv_yuv_udma_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
DEFINE_WAIT(wait);
int rc = 0;
int got_sig = 0;
/* DMA the frame */
mutex_lock(&itv->udma.lock);
if ((rc = ivtv_yuv_prep_user_dma(itv, &itv->udma, args)) != 0) {
mutex_unlock(&itv->udma.lock);
return rc;
}
ivtv_udma_prepare(itv);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
/* if no UDMA is pending and no UDMA is in progress, then the DMA
is finished */
while (test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags) ||
test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
/* don't interrupt if the DMA is in progress but break off
a still pending DMA. */
got_sig = signal_pending(current);
if (got_sig && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
break;
got_sig = 0;
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
/* Unmap Last DMA Xfer */
ivtv_udma_unmap(itv);
if (got_sig) {
IVTV_DEBUG_INFO("User stopped YUV UDMA\n");
mutex_unlock(&itv->udma.lock);
return -EINTR;
}
ivtv_yuv_frame_complete(itv);
mutex_unlock(&itv->udma.lock);
return rc;
}
/* Setup frame according to V4L2 parameters */
void ivtv_yuv_setup_stream_frame(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
ivtv_yuv_next_free(itv);
/* Copy V4L2 parameters to an ivtv_dma_frame struct... */
dma_args.y_source = NULL;
dma_args.uv_source = NULL;
dma_args.src.left = 0;
dma_args.src.top = 0;
dma_args.src.width = yi->v4l2_src_w;
dma_args.src.height = yi->v4l2_src_h;
dma_args.dst = yi->main_rect;
dma_args.src_width = yi->v4l2_src_w;
dma_args.src_height = yi->v4l2_src_h;
/* ... and use the same setup routine as ivtv_yuv_prep_frame */
ivtv_yuv_setup_frame(itv, &dma_args);
if (!itv->dma_data_req_offset)
itv->dma_data_req_offset = yuv_offset[yi->draw_frame];
}
/* Attempt to dma a frame from a user buffer */
int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
int res;
ivtv_yuv_setup_stream_frame(itv);
/* We only need to supply source addresses for this */
dma_args.y_source = src;
dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
/* Wait for frame DMA. Note that serialize_lock is locked,
so to allow other processes to access the driver while
we are waiting unlock first and later lock again. */
mutex_unlock(&itv->serialize_lock);
res = ivtv_yuv_udma_frame(itv, &dma_args);
mutex_lock(&itv->serialize_lock);
return res;
}
/* IVTV_IOC_DMA_FRAME ioctl handler */
int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
int res;
/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
ivtv_yuv_next_free(itv);
ivtv_yuv_setup_frame(itv, args);
/* Wait for frame DMA. Note that serialize_lock is locked,
so to allow other processes to access the driver while
we are waiting unlock first and later lock again. */
mutex_unlock(&itv->serialize_lock);
res = ivtv_yuv_udma_frame(itv, args);
mutex_lock(&itv->serialize_lock);
return res;
}
void ivtv_yuv_close(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
int h_filter, v_filter_1, v_filter_2;
IVTV_DEBUG_YUV("ivtv_yuv_close\n");
mutex_unlock(&itv->serialize_lock);
ivtv_waitq(&itv->vsync_waitq);
mutex_lock(&itv->serialize_lock);
yi->running = 0;
atomic_set(&yi->next_dma_frame, -1);
atomic_set(&yi->next_fill_frame, 0);
/* Reset registers we have changed so mpeg playback works */
/* If we fully restore this register, the display may remain active.
Restore, but set one bit to blank the video. Firmware will always
clear this bit when needed, so not a problem. */
write_reg(yi->reg_2898 | 0x01000000, 0x2898);
write_reg(yi->reg_2834, 0x02834);
write_reg(yi->reg_2838, 0x02838);
write_reg(yi->reg_283c, 0x0283c);
write_reg(yi->reg_2840, 0x02840);
write_reg(yi->reg_2844, 0x02844);
write_reg(yi->reg_2848, 0x02848);
write_reg(yi->reg_2854, 0x02854);
write_reg(yi->reg_285c, 0x0285c);
write_reg(yi->reg_2864, 0x02864);
write_reg(yi->reg_2870, 0x02870);
write_reg(yi->reg_2874, 0x02874);
write_reg(yi->reg_2890, 0x02890);
write_reg(yi->reg_289c, 0x0289c);
write_reg(yi->reg_2918, 0x02918);
write_reg(yi->reg_291c, 0x0291c);
write_reg(yi->reg_2920, 0x02920);
write_reg(yi->reg_2924, 0x02924);
write_reg(yi->reg_2928, 0x02928);
write_reg(yi->reg_292c, 0x0292c);
write_reg(yi->reg_2930, 0x02930);
write_reg(yi->reg_2934, 0x02934);
write_reg(yi->reg_2938, 0x02938);
write_reg(yi->reg_293c, 0x0293c);
write_reg(yi->reg_2940, 0x02940);
write_reg(yi->reg_2944, 0x02944);
write_reg(yi->reg_2948, 0x02948);
write_reg(yi->reg_294c, 0x0294c);
write_reg(yi->reg_2950, 0x02950);
write_reg(yi->reg_2954, 0x02954);
write_reg(yi->reg_2958, 0x02958);
write_reg(yi->reg_295c, 0x0295c);
write_reg(yi->reg_2960, 0x02960);
write_reg(yi->reg_2964, 0x02964);
write_reg(yi->reg_2968, 0x02968);
write_reg(yi->reg_296c, 0x0296c);
write_reg(yi->reg_2970, 0x02970);
/* Prepare to restore filters */
/* First the horizontal filter */
if ((yi->reg_2834 & 0x0000FFFF) == (yi->reg_2834 >> 16)) {
/* An exact size match uses filter 0 */
h_filter = 0;
} else {
/* Figure out which filter to use */
h_filter = ((yi->reg_2834 << 16) / (yi->reg_2834 >> 16)) >> 15;
h_filter = (h_filter >> 1) + (h_filter & 1);
/* Only an exact size match can use filter 0. */
h_filter += !h_filter;
}
/* Now the vertical filter */
if ((yi->reg_2918 & 0x0000FFFF) == (yi->reg_2918 >> 16)) {
/* An exact size match uses filter 0/1 */
v_filter_1 = 0;
v_filter_2 = 1;
} else {
/* Figure out which filter to use */
v_filter_1 = ((yi->reg_2918 << 16) / (yi->reg_2918 >> 16)) >> 15;
v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1);
/* Only an exact size match can use filter 0 */
v_filter_1 += !v_filter_1;
v_filter_2 = v_filter_1;
}
/* Now restore the filters */
ivtv_yuv_filter(itv, h_filter, v_filter_1, v_filter_2);
/* and clear a few registers */
write_reg(0, 0x02814);
write_reg(0, 0x0282c);
write_reg(0, 0x02904);
write_reg(0, 0x02910);
/* Release the blanking buffer */
if (yi->blanking_ptr) {
kfree(yi->blanking_ptr);
yi->blanking_ptr = NULL;
dma_unmap_single(&itv->pdev->dev, yi->blanking_dmaptr,
720 * 16, DMA_TO_DEVICE);
}
/* Invalidate the old dimension information */
yi->old_frame_info.src_w = 0;
yi->old_frame_info.src_h = 0;
yi->old_frame_info_args.src_w = 0;
yi->old_frame_info_args.src_h = 0;
/* All done. */
clear_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
}
| linux-master | drivers/media/pci/ivtv/ivtv-yuv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ALSA interface to ivtv PCM capture streams
*
* Copyright (C) 2009,2012 Andy Walls <[email protected]>
* Copyright (C) 2009 Devin Heitmueller <[email protected]>
*
* Portions of this work were sponsored by ONELAN Limited for the cx18 driver
*/
#include "ivtv-driver.h"
#include "ivtv-version.h"
#include "ivtv-alsa.h"
#include "ivtv-alsa-pcm.h"
#include <sound/core.h>
#include <sound/initval.h>
int ivtv_alsa_debug;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
#define IVTV_DEBUG_ALSA_INFO(__fmt, __arg...) \
do { \
if (ivtv_alsa_debug & 2) \
printk(KERN_INFO pr_fmt("%s: alsa:" __fmt), \
__func__, ##__arg); \
} while (0)
module_param_named(debug, ivtv_alsa_debug, int, 0644);
MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: 0\n"
"\t\t\t 1/0x0001: warning\n"
"\t\t\t 2/0x0002: info\n");
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index,
"Index value for IVTV ALSA capture interface(s).\n");
MODULE_AUTHOR("Andy Walls");
MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface");
MODULE_LICENSE("GPL");
MODULE_VERSION(IVTV_VERSION);
static inline
struct snd_ivtv_card *to_snd_ivtv_card(struct v4l2_device *v4l2_dev)
{
return to_ivtv(v4l2_dev)->alsa;
}
static void snd_ivtv_card_free(struct snd_ivtv_card *itvsc)
{
if (itvsc == NULL)
return;
if (itvsc->v4l2_dev != NULL)
to_ivtv(itvsc->v4l2_dev)->alsa = NULL;
/* FIXME - take any other stopping actions needed */
kfree(itvsc);
}
static void snd_ivtv_card_private_free(struct snd_card *sc)
{
if (sc == NULL)
return;
snd_ivtv_card_free(sc->private_data);
sc->private_data = NULL;
sc->private_free = NULL;
}
static int snd_ivtv_card_create(struct v4l2_device *v4l2_dev,
struct snd_card *sc,
struct snd_ivtv_card **itvsc)
{
*itvsc = kzalloc(sizeof(struct snd_ivtv_card), GFP_KERNEL);
if (*itvsc == NULL)
return -ENOMEM;
(*itvsc)->v4l2_dev = v4l2_dev;
(*itvsc)->sc = sc;
sc->private_data = *itvsc;
sc->private_free = snd_ivtv_card_private_free;
return 0;
}
static int snd_ivtv_card_set_names(struct snd_ivtv_card *itvsc)
{
struct ivtv *itv = to_ivtv(itvsc->v4l2_dev);
struct snd_card *sc = itvsc->sc;
/* sc->driver is used by alsa-lib's configurator: simple, unique */
strscpy(sc->driver, "CX2341[56]", sizeof(sc->driver));
/* sc->shortname is a symlink in /proc/asound: IVTV-M -> cardN */
snprintf(sc->shortname, sizeof(sc->shortname), "IVTV-%d",
itv->instance);
/* sc->longname is read from /proc/asound/cards */
snprintf(sc->longname, sizeof(sc->longname),
"CX2341[56] #%d %s TV/FM Radio/Line-In Capture",
itv->instance, itv->card_name);
return 0;
}
static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
{
struct ivtv *itv = to_ivtv(v4l2_dev);
struct snd_card *sc = NULL;
struct snd_ivtv_card *itvsc;
int ret, idx;
/* Numbrs steps from "Writing an ALSA Driver" by Takashi Iwai */
/* (1) Check and increment the device index */
/* This is a no-op for us. We'll use the itv->instance */
/* (2) Create a card instance */
/* use first available id if not specified otherwise*/
idx = index[itv->instance] == -1 ? SNDRV_DEFAULT_IDX1 : index[itv->instance];
ret = snd_card_new(&itv->pdev->dev,
idx,
SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
THIS_MODULE, 0, &sc);
if (ret) {
IVTV_ALSA_ERR("%s: snd_card_new() failed with err %d\n",
__func__, ret);
goto err_exit;
}
/* (3) Create a main component */
ret = snd_ivtv_card_create(v4l2_dev, sc, &itvsc);
if (ret) {
IVTV_ALSA_ERR("%s: snd_ivtv_card_create() failed with err %d\n",
__func__, ret);
goto err_exit_free;
}
/* (4) Set the driver ID and name strings */
snd_ivtv_card_set_names(itvsc);
/* (5) Create other components: PCM, & proc files */
ret = snd_ivtv_pcm_create(itvsc);
if (ret) {
IVTV_ALSA_ERR("%s: snd_ivtv_pcm_create() failed with err %d\n",
__func__, ret);
goto err_exit_free;
}
/* FIXME - proc files */
/* (7) Set the driver data and return 0 */
/* We do this out of normal order for PCI drivers to avoid races */
itv->alsa = itvsc;
/* (6) Register the card instance */
ret = snd_card_register(sc);
if (ret) {
itv->alsa = NULL;
IVTV_ALSA_ERR("%s: snd_card_register() failed with err %d\n",
__func__, ret);
goto err_exit_free;
}
IVTV_ALSA_INFO("%s: Instance %d registered as ALSA card %d\n",
__func__, itv->instance, sc->number);
return 0;
err_exit_free:
if (sc != NULL)
snd_card_free(sc);
kfree(itvsc);
err_exit:
return ret;
}
static int ivtv_alsa_load(struct ivtv *itv)
{
struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
struct ivtv_stream *s;
if (v4l2_dev == NULL) {
pr_err("ivtv-alsa: %s: struct v4l2_device * is NULL\n",
__func__);
return 0;
}
itv = to_ivtv(v4l2_dev);
if (itv == NULL) {
pr_err("ivtv-alsa itv is NULL\n");
return 0;
}
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
if (s->vdev.v4l2_dev == NULL) {
IVTV_DEBUG_ALSA_INFO("PCM stream for card is disabled - skipping\n");
return 0;
}
if (itv->alsa != NULL) {
IVTV_ALSA_ERR("%s: struct snd_ivtv_card * already exists\n",
__func__);
return 0;
}
if (snd_ivtv_init(v4l2_dev)) {
IVTV_ALSA_ERR("%s: failed to create struct snd_ivtv_card\n",
__func__);
} else {
IVTV_DEBUG_ALSA_INFO("created ivtv ALSA interface instance\n");
}
return 0;
}
static int __init ivtv_alsa_init(void)
{
pr_info("ivtv-alsa: module loading...\n");
ivtv_ext_init = &ivtv_alsa_load;
return 0;
}
static void __exit snd_ivtv_exit(struct snd_ivtv_card *itvsc)
{
struct ivtv *itv = to_ivtv(itvsc->v4l2_dev);
/* FIXME - pointer checks & shutdown itvsc */
snd_card_free(itvsc->sc);
itv->alsa = NULL;
}
static int __exit ivtv_alsa_exit_callback(struct device *dev, void *data)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct snd_ivtv_card *itvsc;
if (v4l2_dev == NULL) {
pr_err("ivtv-alsa: %s: struct v4l2_device * is NULL\n",
__func__);
return 0;
}
itvsc = to_snd_ivtv_card(v4l2_dev);
if (itvsc == NULL) {
IVTV_ALSA_WARN("%s: struct snd_ivtv_card * is NULL\n",
__func__);
return 0;
}
snd_ivtv_exit(itvsc);
return 0;
}
static void __exit ivtv_alsa_exit(void)
{
struct device_driver *drv;
int ret;
pr_info("ivtv-alsa: module unloading...\n");
drv = driver_find("ivtv", &pci_bus_type);
ret = driver_for_each_device(drv, NULL, NULL, ivtv_alsa_exit_callback);
(void)ret; /* suppress compiler warning */
ivtv_ext_init = NULL;
pr_info("ivtv-alsa: module unload complete\n");
}
module_init(ivtv_alsa_init);
module_exit(ivtv_alsa_exit);
| linux-master | drivers/media/pci/ivtv/ivtv-alsa-main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-mailbox.h"
#include "ivtv-firmware.h"
#include "ivtv-yuv.h"
#include "ivtv-ioctl.h"
#include "ivtv-cards.h"
#include <linux/firmware.h>
#include <media/i2c/saa7127.h>
#define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE
#define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6
#define IVTV_MASK_VPU_ENABLE16 0xFFFFFFFB
#define IVTV_CMD_VDM_STOP 0x00000000
#define IVTV_CMD_AO_STOP 0x00000005
#define IVTV_CMD_APU_PING 0x00000000
#define IVTV_CMD_VPU_STOP15 0xFFFFFFFE
#define IVTV_CMD_VPU_STOP16 0xFFFFFFEE
#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
#define IVTV_CMD_SPU_STOP 0x00000001
#define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A
#define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640
#define IVTV_SDRAM_SLEEPTIME 600
#define IVTV_DECODE_INIT_MPEG_FILENAME "v4l-cx2341x-init.mpg"
#define IVTV_DECODE_INIT_MPEG_SIZE (152*1024)
/* Encoder/decoder firmware sizes */
#define IVTV_FW_ENC_SIZE (376836)
#define IVTV_FW_DEC_SIZE (256*1024)
static int load_fw_direct(const char *fn, volatile u8 __iomem *mem, struct ivtv *itv, long size)
{
const struct firmware *fw = NULL;
int retries = 3;
retry:
if (retries && request_firmware(&fw, fn, &itv->pdev->dev) == 0) {
int i;
volatile u32 __iomem *dst = (volatile u32 __iomem *)mem;
const u32 *src = (const u32 *)fw->data;
if (fw->size != size) {
/* Due to race conditions in firmware loading (esp. with udev <0.95)
the wrong file was sometimes loaded. So we check filesizes to
see if at least the right-sized file was loaded. If not, then we
retry. */
IVTV_INFO("Retry: file loaded was not %s (expected size %ld, got %zu)\n", fn, size, fw->size);
release_firmware(fw);
retries--;
goto retry;
}
for (i = 0; i < fw->size; i += 4) {
/* no need for endianness conversion on the ppc */
__raw_writel(*src, dst);
dst++;
src++;
}
IVTV_INFO("Loaded %s firmware (%zu bytes)\n", fn, fw->size);
release_firmware(fw);
return size;
}
IVTV_ERR("Unable to open firmware %s (must be %ld bytes)\n", fn, size);
IVTV_ERR("Did you put the firmware in the hotplug firmware directory?\n");
return -ENOMEM;
}
void ivtv_halt_firmware(struct ivtv *itv)
{
IVTV_DEBUG_INFO("Preparing for firmware halt.\n");
if (itv->has_cx23415 && itv->dec_mbox.mbox)
ivtv_vapi(itv, CX2341X_DEC_HALT_FW, 0);
if (itv->enc_mbox.mbox)
ivtv_vapi(itv, CX2341X_ENC_HALT_FW, 0);
ivtv_msleep_timeout(10, 0);
itv->enc_mbox.mbox = itv->dec_mbox.mbox = NULL;
IVTV_DEBUG_INFO("Stopping VDM\n");
write_reg(IVTV_CMD_VDM_STOP, IVTV_REG_VDM);
IVTV_DEBUG_INFO("Stopping AO\n");
write_reg(IVTV_CMD_AO_STOP, IVTV_REG_AO);
IVTV_DEBUG_INFO("pinging (?) APU\n");
write_reg(IVTV_CMD_APU_PING, IVTV_REG_APU);
IVTV_DEBUG_INFO("Stopping VPU\n");
if (!itv->has_cx23415)
write_reg(IVTV_CMD_VPU_STOP16, IVTV_REG_VPU);
else
write_reg(IVTV_CMD_VPU_STOP15, IVTV_REG_VPU);
IVTV_DEBUG_INFO("Resetting Hw Blocks\n");
write_reg(IVTV_CMD_HW_BLOCKS_RST, IVTV_REG_HW_BLOCKS);
IVTV_DEBUG_INFO("Stopping SPU\n");
write_reg(IVTV_CMD_SPU_STOP, IVTV_REG_SPU);
ivtv_msleep_timeout(10, 0);
IVTV_DEBUG_INFO("init Encoder SDRAM pre-charge\n");
write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_ENC_SDRAM_PRECHARGE);
IVTV_DEBUG_INFO("init Encoder SDRAM refresh to 1us\n");
write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_ENC_SDRAM_REFRESH);
if (itv->has_cx23415) {
IVTV_DEBUG_INFO("init Decoder SDRAM pre-charge\n");
write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_DEC_SDRAM_PRECHARGE);
IVTV_DEBUG_INFO("init Decoder SDRAM refresh to 1us\n");
write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_DEC_SDRAM_REFRESH);
}
IVTV_DEBUG_INFO("Sleeping for %dms\n", IVTV_SDRAM_SLEEPTIME);
ivtv_msleep_timeout(IVTV_SDRAM_SLEEPTIME, 0);
}
void ivtv_firmware_versions(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
/* Encoder */
ivtv_vapi_result(itv, data, CX2341X_ENC_GET_VERSION, 0);
IVTV_INFO("Encoder revision: 0x%08x\n", data[0]);
if (data[0] != 0x02060039)
IVTV_WARN("Recommended firmware version is 0x02060039.\n");
if (itv->has_cx23415) {
/* Decoder */
ivtv_vapi_result(itv, data, CX2341X_DEC_GET_VERSION, 0);
IVTV_INFO("Decoder revision: 0x%08x\n", data[0]);
}
}
static int ivtv_firmware_copy(struct ivtv *itv)
{
IVTV_DEBUG_INFO("Loading encoder image\n");
if (load_fw_direct(CX2341X_FIRM_ENC_FILENAME,
itv->enc_mem, itv, IVTV_FW_ENC_SIZE) != IVTV_FW_ENC_SIZE) {
IVTV_DEBUG_WARN("failed loading encoder firmware\n");
return -3;
}
if (!itv->has_cx23415)
return 0;
IVTV_DEBUG_INFO("Loading decoder image\n");
if (load_fw_direct(CX2341X_FIRM_DEC_FILENAME,
itv->dec_mem, itv, IVTV_FW_DEC_SIZE) != IVTV_FW_DEC_SIZE) {
IVTV_DEBUG_WARN("failed loading decoder firmware\n");
return -1;
}
return 0;
}
static volatile struct ivtv_mailbox __iomem *ivtv_search_mailbox(const volatile u8 __iomem *mem, u32 size)
{
int i;
/* mailbox is preceded by a 16 byte 'magic cookie' starting at a 256-byte
address boundary */
for (i = 0; i < size; i += 0x100) {
if (readl(mem + i) == 0x12345678 &&
readl(mem + i + 4) == 0x34567812 &&
readl(mem + i + 8) == 0x56781234 &&
readl(mem + i + 12) == 0x78123456) {
return (volatile struct ivtv_mailbox __iomem *)(mem + i + 16);
}
}
return NULL;
}
int ivtv_firmware_init(struct ivtv *itv)
{
int err;
ivtv_halt_firmware(itv);
/* load firmware */
err = ivtv_firmware_copy(itv);
if (err) {
IVTV_DEBUG_WARN("Error %d loading firmware\n", err);
return err;
}
/* start firmware */
write_reg(read_reg(IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE, IVTV_REG_SPU);
ivtv_msleep_timeout(100, 0);
if (itv->has_cx23415)
write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE15, IVTV_REG_VPU);
else
write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE16, IVTV_REG_VPU);
ivtv_msleep_timeout(100, 0);
/* find mailboxes and ping firmware */
itv->enc_mbox.mbox = ivtv_search_mailbox(itv->enc_mem, IVTV_ENCODER_SIZE);
if (itv->enc_mbox.mbox == NULL)
IVTV_ERR("Encoder mailbox not found\n");
else if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0)) {
IVTV_ERR("Encoder firmware dead!\n");
itv->enc_mbox.mbox = NULL;
}
if (itv->enc_mbox.mbox == NULL)
return -ENODEV;
if (!itv->has_cx23415)
return 0;
itv->dec_mbox.mbox = ivtv_search_mailbox(itv->dec_mem, IVTV_DECODER_SIZE);
if (itv->dec_mbox.mbox == NULL) {
IVTV_ERR("Decoder mailbox not found\n");
} else if (itv->has_cx23415 && ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0)) {
IVTV_ERR("Decoder firmware dead!\n");
itv->dec_mbox.mbox = NULL;
} else {
/* Firmware okay, so check yuv output filter table */
ivtv_yuv_filter_check(itv);
}
return itv->dec_mbox.mbox ? 0 : -ENODEV;
}
void ivtv_init_mpeg_decoder(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
long readbytes;
volatile u8 __iomem *mem_offset;
data[0] = 0;
data[1] = itv->cxhdl.width; /* YUV source width */
data[2] = itv->cxhdl.height;
data[3] = itv->cxhdl.audio_properties; /* Audio settings to use,
bitmap. see docs. */
if (ivtv_api(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, data)) {
IVTV_ERR("ivtv_init_mpeg_decoder failed to set decoder source\n");
return;
}
if (ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1) != 0) {
IVTV_ERR("ivtv_init_mpeg_decoder failed to start playback\n");
return;
}
ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
mem_offset = itv->dec_mem + data[1];
if ((readbytes = load_fw_direct(IVTV_DECODE_INIT_MPEG_FILENAME,
mem_offset, itv, IVTV_DECODE_INIT_MPEG_SIZE)) <= 0) {
IVTV_DEBUG_WARN("failed to read mpeg decoder initialisation file %s\n",
IVTV_DECODE_INIT_MPEG_FILENAME);
} else {
ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, readbytes, 0);
ivtv_msleep_timeout(100, 0);
}
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 4, 0, 0, 0, 1);
}
/* Try to restart the card & restore previous settings */
static int ivtv_firmware_restart(struct ivtv *itv)
{
int rc = 0;
v4l2_std_id std;
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
/* Display test image during restart */
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing,
SAA7127_INPUT_TYPE_TEST_IMAGE,
itv->card->video_outputs[itv->active_output].video_output,
0);
mutex_lock(&itv->udma.lock);
rc = ivtv_firmware_init(itv);
if (rc) {
mutex_unlock(&itv->udma.lock);
return rc;
}
/* Allow settings to reload */
ivtv_mailbox_cache_invalidate(itv);
/* Restore encoder video standard */
std = itv->std;
itv->std = 0;
ivtv_s_std_enc(itv, std);
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
ivtv_init_mpeg_decoder(itv);
/* Restore decoder video standard */
std = itv->std_out;
itv->std_out = 0;
ivtv_s_std_dec(itv, std);
/* Restore framebuffer if active */
if (itv->ivtvfb_restore)
itv->ivtvfb_restore(itv);
/* Restore alpha settings */
ivtv_set_osd_alpha(itv);
/* Restore normal output */
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing,
SAA7127_INPUT_TYPE_NORMAL,
itv->card->video_outputs[itv->active_output].video_output,
0);
}
mutex_unlock(&itv->udma.lock);
return rc;
}
/* Check firmware running state. The checks fall through
allowing multiple failures to be logged. */
int ivtv_firmware_check(struct ivtv *itv, char *where)
{
int res = 0;
/* Check encoder is still running */
if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0) < 0) {
IVTV_WARN("Encoder has died : %s\n", where);
res = -1;
}
/* Also check audio. Only check if not in use & encoder is okay */
if (!res && !atomic_read(&itv->capturing) &&
(!atomic_read(&itv->decoding) ||
(atomic_read(&itv->decoding) < 2 && test_bit(IVTV_F_I_DEC_YUV,
&itv->i_flags)))) {
if (ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12) < 0) {
IVTV_WARN("Audio has died (Encoder OK) : %s\n", where);
res = -2;
}
}
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
/* Second audio check. Skip if audio already failed */
if (res != -2 && read_dec(0x100) != read_dec(0x104)) {
/* Wait & try again to be certain. */
ivtv_msleep_timeout(14, 0);
if (read_dec(0x100) != read_dec(0x104)) {
IVTV_WARN("Audio has died (Decoder) : %s\n",
where);
res = -1;
}
}
/* Check decoder is still running */
if (ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0) < 0) {
IVTV_WARN("Decoder has died : %s\n", where);
res = -1;
}
}
/* If something failed & currently idle, try to reload */
if (res && !atomic_read(&itv->capturing) &&
!atomic_read(&itv->decoding)) {
IVTV_INFO("Detected in %s that firmware had failed - Reloading\n",
where);
res = ivtv_firmware_restart(itv);
/*
* Even if restarted ok, still signal a problem had occurred.
* The caller can come through this function again to check
* if things are really ok after the restart.
*/
if (!res) {
IVTV_INFO("Firmware restart okay\n");
res = -EAGAIN;
} else {
IVTV_INFO("Firmware restart failed\n");
}
} else if (res) {
res = -EIO;
}
return res;
}
MODULE_FIRMWARE(CX2341X_FIRM_ENC_FILENAME);
MODULE_FIRMWARE(CX2341X_FIRM_DEC_FILENAME);
MODULE_FIRMWARE(IVTV_DECODE_INIT_MPEG_FILENAME);
| linux-master | drivers/media/pci/ivtv/ivtv-firmware.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-mailbox.h"
/* Firmware mailbox flags*/
#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
#define IVTV_MBOX_DRIVER_DONE 0x00000002
#define IVTV_MBOX_DRIVER_BUSY 0x00000001
#define IVTV_MBOX_FREE 0x00000000
/* Firmware mailbox standard timeout */
#define IVTV_API_STD_TIMEOUT 0x02000000
#define API_CACHE (1 << 0) /* Allow the command to be stored in the cache */
#define API_RESULT (1 << 1) /* Allow 1 second for this cmd to end */
#define API_FAST_RESULT (3 << 1) /* Allow 0.1 second for this cmd to end */
#define API_DMA (1 << 3) /* DMA mailbox, has special handling */
#define API_HIGH_VOL (1 << 5) /* High volume command (i.e. called during encoding or decoding) */
#define API_NO_WAIT_MB (1 << 4) /* Command may not wait for a free mailbox */
#define API_NO_WAIT_RES (1 << 5) /* Command may not wait for the result */
#define API_NO_POLL (1 << 6) /* Avoid pointless polling */
struct ivtv_api_info {
int flags; /* Flags, see above */
const char *name; /* The name of the command */
};
#define API_ENTRY(x, f) [x] = { (f), #x }
static const struct ivtv_api_info api_info[256] = {
/* MPEG encoder API */
API_ENTRY(CX2341X_ENC_PING_FW, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_START_CAPTURE, API_RESULT | API_NO_POLL),
API_ENTRY(CX2341X_ENC_STOP_CAPTURE, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_AUDIO_ID, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_VIDEO_ID, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_PCR_ID, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_FRAME_RATE, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_FRAME_SIZE, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_BIT_RATE, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_GOP_PROPERTIES, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_ASPECT_RATIO, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_MODE, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_PROPS, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_CORING_LEVELS, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_VBI_LINE, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_STREAM_TYPE, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_OUTPUT_PORT, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_AUDIO_PROPERTIES, API_CACHE),
API_ENTRY(CX2341X_ENC_HALT_FW, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_GET_VERSION, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_SET_GOP_CLOSURE, API_CACHE),
API_ENTRY(CX2341X_ENC_GET_SEQ_END, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_PGM_INDEX_INFO, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_SET_VBI_CONFIG, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_DMA_BLOCK_SIZE, API_CACHE),
API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_10, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_9, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA | API_HIGH_VOL),
API_ENTRY(CX2341X_ENC_INITIALIZE_INPUT, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_FRAME_DROP_RATE, API_CACHE),
API_ENTRY(CX2341X_ENC_PAUSE_ENCODER, API_RESULT),
API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB | API_HIGH_VOL),
API_ENTRY(CX2341X_ENC_SET_COPYRIGHT, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_EVENT_NOTIFICATION, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_NUM_VSYNC_LINES, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_PLACEHOLDER, API_CACHE),
API_ENTRY(CX2341X_ENC_MUTE_VIDEO, API_RESULT),
API_ENTRY(CX2341X_ENC_MUTE_AUDIO, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_VERT_CROP_LINE, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_MISC, API_FAST_RESULT),
/* Obsolete PULLDOWN API command */
API_ENTRY(0xb1, API_CACHE),
/* MPEG decoder API */
API_ENTRY(CX2341X_DEC_PING_FW, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_START_PLAYBACK, API_RESULT | API_NO_POLL),
API_ENTRY(CX2341X_DEC_STOP_PLAYBACK, API_RESULT),
API_ENTRY(CX2341X_DEC_SET_PLAYBACK_SPEED, API_RESULT),
API_ENTRY(CX2341X_DEC_STEP_VIDEO, API_RESULT),
API_ENTRY(CX2341X_DEC_SET_DMA_BLOCK_SIZE, API_CACHE),
API_ENTRY(CX2341X_DEC_GET_XFER_INFO, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_GET_DMA_STATUS, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA | API_HIGH_VOL),
API_ENTRY(CX2341X_DEC_PAUSE_PLAYBACK, API_RESULT),
API_ENTRY(CX2341X_DEC_HALT_FW, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_SET_STANDARD, API_CACHE),
API_ENTRY(CX2341X_DEC_GET_VERSION, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_SET_STREAM_INPUT, API_CACHE),
API_ENTRY(CX2341X_DEC_GET_TIMING_INFO, API_RESULT /*| API_NO_WAIT_RES*/),
API_ENTRY(CX2341X_DEC_SET_AUDIO_MODE, API_CACHE),
API_ENTRY(CX2341X_DEC_SET_EVENT_NOTIFICATION, API_RESULT),
API_ENTRY(CX2341X_DEC_SET_DISPLAY_BUFFERS, API_CACHE),
API_ENTRY(CX2341X_DEC_EXTRACT_VBI, API_RESULT),
API_ENTRY(CX2341X_DEC_SET_DECODER_SOURCE, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_SET_PREBUFFERING, API_CACHE),
/* OSD API */
API_ENTRY(CX2341X_OSD_GET_FRAMEBUFFER, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_GET_PIXEL_FORMAT, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_PIXEL_FORMAT, API_CACHE),
API_ENTRY(CX2341X_OSD_GET_STATE, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_STATE, API_CACHE),
API_ENTRY(CX2341X_OSD_GET_OSD_COORDS, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_OSD_COORDS, API_CACHE),
API_ENTRY(CX2341X_OSD_GET_SCREEN_COORDS, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_SCREEN_COORDS, API_CACHE),
API_ENTRY(CX2341X_OSD_GET_GLOBAL_ALPHA, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_GLOBAL_ALPHA, API_CACHE),
API_ENTRY(CX2341X_OSD_SET_BLEND_COORDS, API_CACHE),
API_ENTRY(CX2341X_OSD_GET_FLICKER_STATE, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_FLICKER_STATE, API_CACHE),
API_ENTRY(CX2341X_OSD_BLT_COPY, API_RESULT),
API_ENTRY(CX2341X_OSD_BLT_FILL, API_RESULT),
API_ENTRY(CX2341X_OSD_BLT_TEXT, API_RESULT),
API_ENTRY(CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, API_CACHE),
API_ENTRY(CX2341X_OSD_SET_CHROMA_KEY, API_CACHE),
API_ENTRY(CX2341X_OSD_GET_ALPHA_CONTENT_INDEX, API_FAST_RESULT),
API_ENTRY(CX2341X_OSD_SET_ALPHA_CONTENT_INDEX, API_CACHE)
};
static int try_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int mb)
{
u32 flags = readl(&mbdata->mbox[mb].flags);
int is_free = flags == IVTV_MBOX_FREE || (flags & IVTV_MBOX_FIRMWARE_DONE);
/* if the mailbox is free, then try to claim it */
if (is_free && !test_and_set_bit(mb, &mbdata->busy)) {
write_sync(IVTV_MBOX_DRIVER_BUSY, &mbdata->mbox[mb].flags);
return 1;
}
return 0;
}
/* Try to find a free mailbox. Note mailbox 0 is reserved for DMA and so is not
attempted here. */
static int get_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int flags)
{
unsigned long then = jiffies;
int i, mb;
int max_mbox = mbdata->max_mbox;
int retries = 100;
/* All slow commands use the same mailbox, serializing them and also
leaving the other mailbox free for simple fast commands. */
if ((flags & API_FAST_RESULT) == API_RESULT)
max_mbox = 1;
/* find free non-DMA mailbox */
for (i = 0; i < retries; i++) {
for (mb = 1; mb <= max_mbox; mb++)
if (try_mailbox(itv, mbdata, mb))
return mb;
/* Sleep before a retry, if not atomic */
if (!(flags & API_NO_WAIT_MB)) {
if (time_after(jiffies,
then + msecs_to_jiffies(10*retries)))
break;
ivtv_msleep_timeout(10, 0);
}
}
return -ENODEV;
}
static void write_mailbox(volatile struct ivtv_mailbox __iomem *mbox, int cmd, int args, u32 data[])
{
int i;
write_sync(cmd, &mbox->cmd);
write_sync(IVTV_API_STD_TIMEOUT, &mbox->timeout);
for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++)
write_sync(data[i], &mbox->data[i]);
write_sync(IVTV_MBOX_DRIVER_DONE | IVTV_MBOX_DRIVER_BUSY, &mbox->flags);
}
static void clear_all_mailboxes(struct ivtv *itv, struct ivtv_mailbox_data *mbdata)
{
int i;
for (i = 0; i <= mbdata->max_mbox; i++) {
IVTV_DEBUG_WARN("Clearing mailbox %d: cmd 0x%08x flags 0x%08x\n",
i, readl(&mbdata->mbox[i].cmd), readl(&mbdata->mbox[i].flags));
write_sync(0, &mbdata->mbox[i].flags);
clear_bit(i, &mbdata->busy);
}
}
static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
{
struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox;
volatile struct ivtv_mailbox __iomem *mbox;
int api_timeout = msecs_to_jiffies(1000);
int flags, mb, i;
unsigned long then;
/* sanity checks */
if (NULL == mbdata) {
IVTV_ERR("No mailbox allocated\n");
return -ENODEV;
}
if (args < 0 || args > CX2341X_MBOX_MAX_DATA ||
cmd < 0 || cmd > 255 || api_info[cmd].name == NULL) {
IVTV_ERR("Invalid MB call: cmd = 0x%02x, args = %d\n", cmd, args);
return -EINVAL;
}
if (api_info[cmd].flags & API_HIGH_VOL) {
IVTV_DEBUG_HI_MB("MB Call: %s\n", api_info[cmd].name);
}
else {
IVTV_DEBUG_MB("MB Call: %s\n", api_info[cmd].name);
}
/* clear possibly uninitialized part of data array */
for (i = args; i < CX2341X_MBOX_MAX_DATA; i++)
data[i] = 0;
/* If this command was issued within the last 30 minutes and with identical
data, then just return 0 as there is no need to issue this command again.
Just an optimization to prevent unnecessary use of mailboxes. */
if (itv->api_cache[cmd].last_jiffies &&
time_before(jiffies,
itv->api_cache[cmd].last_jiffies +
msecs_to_jiffies(1800000)) &&
!memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) {
itv->api_cache[cmd].last_jiffies = jiffies;
return 0;
}
flags = api_info[cmd].flags;
if (flags & API_DMA) {
for (i = 0; i < 100; i++) {
mb = i % (mbdata->max_mbox + 1);
if (try_mailbox(itv, mbdata, mb)) {
write_mailbox(&mbdata->mbox[mb], cmd, args, data);
clear_bit(mb, &mbdata->busy);
return 0;
}
IVTV_DEBUG_WARN("%s: mailbox %d not free %08x\n",
api_info[cmd].name, mb, readl(&mbdata->mbox[mb].flags));
}
IVTV_WARN("Could not find free DMA mailbox for %s\n", api_info[cmd].name);
clear_all_mailboxes(itv, mbdata);
return -EBUSY;
}
if ((flags & API_FAST_RESULT) == API_FAST_RESULT)
api_timeout = msecs_to_jiffies(100);
mb = get_mailbox(itv, mbdata, flags);
if (mb < 0) {
IVTV_DEBUG_WARN("No free mailbox found (%s)\n", api_info[cmd].name);
clear_all_mailboxes(itv, mbdata);
return -EBUSY;
}
mbox = &mbdata->mbox[mb];
write_mailbox(mbox, cmd, args, data);
if (flags & API_CACHE) {
memcpy(itv->api_cache[cmd].data, data, sizeof(itv->api_cache[cmd].data));
itv->api_cache[cmd].last_jiffies = jiffies;
}
if ((flags & API_RESULT) == 0) {
clear_bit(mb, &mbdata->busy);
return 0;
}
/* Get results */
then = jiffies;
if (!(flags & API_NO_POLL)) {
/* First try to poll, then switch to delays */
for (i = 0; i < 100; i++) {
if (readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)
break;
}
}
while (!(readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)) {
if (time_after(jiffies, then + api_timeout)) {
IVTV_DEBUG_WARN("Could not get result (%s)\n", api_info[cmd].name);
/* reset the mailbox, but it is likely too late already */
write_sync(0, &mbox->flags);
clear_bit(mb, &mbdata->busy);
return -EIO;
}
if (flags & API_NO_WAIT_RES)
mdelay(1);
else
ivtv_msleep_timeout(1, 0);
}
if (time_after(jiffies, then + msecs_to_jiffies(100)))
IVTV_DEBUG_WARN("%s took %u jiffies\n",
api_info[cmd].name,
jiffies_to_msecs(jiffies - then));
for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++)
data[i] = readl(&mbox->data[i]);
write_sync(0, &mbox->flags);
clear_bit(mb, &mbdata->busy);
return 0;
}
int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[])
{
int res = ivtv_api_call(itv, cmd, args, data);
/* Allow a single retry, probably already too late though.
If there is no free mailbox then that is usually an indication
of a more serious problem. */
return (res == -EBUSY) ? ivtv_api_call(itv, cmd, args, data) : res;
}
int ivtv_api_func(void *priv, u32 cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
{
return ivtv_api(priv, cmd, in, data);
}
int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...)
{
va_list ap;
int i;
va_start(ap, args);
for (i = 0; i < args; i++) {
data[i] = va_arg(ap, u32);
}
va_end(ap);
return ivtv_api(itv, cmd, args, data);
}
int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...)
{
u32 data[CX2341X_MBOX_MAX_DATA];
va_list ap;
int i;
va_start(ap, args);
for (i = 0; i < args; i++) {
data[i] = va_arg(ap, u32);
}
va_end(ap);
return ivtv_api(itv, cmd, args, data);
}
/* This one is for stuff that can't sleep.. irq handlers, etc.. */
void ivtv_api_get_data(struct ivtv_mailbox_data *mbdata, int mb,
int argc, u32 data[])
{
volatile u32 __iomem *p = mbdata->mbox[mb].data;
int i;
for (i = 0; i < argc; i++, p++)
data[i] = readl(p);
}
/* Wipe api cache */
void ivtv_mailbox_cache_invalidate(struct ivtv *itv)
{
int i;
for (i = 0; i < 256; i++)
itv->api_cache[i].last_jiffies = 0;
}
| linux-master | drivers/media/pci/ivtv/ivtv-mailbox.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-fileops.h"
#include "ivtv-i2c.h"
#include "ivtv-queue.h"
#include "ivtv-udma.h"
#include "ivtv-irq.h"
#include "ivtv-vbi.h"
#include "ivtv-mailbox.h"
#include "ivtv-routing.h"
#include "ivtv-streams.h"
#include "ivtv-yuv.h"
#include "ivtv-ioctl.h"
#include "ivtv-cards.h"
#include "ivtv-firmware.h"
#include <media/v4l2-event.h>
#include <media/i2c/saa7115.h>
/* This function tries to claim the stream for a specific file descriptor.
If no one else is using this stream then the stream is claimed and
associated VBI streams are also automatically claimed.
Possible error returns: -EBUSY if someone else has claimed
the stream or 0 on success. */
int ivtv_claim_stream(struct ivtv_open_id *id, int type)
{
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[type];
struct ivtv_stream *s_vbi;
int vbi_type;
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
if (s->fh == &id->fh) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
s->fh = &id->fh;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
/* someone else is using this stream already */
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
s->fh = &id->fh;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
}
/* IVTV_DEC_STREAM_TYPE_MPG needs to claim IVTV_DEC_STREAM_TYPE_VBI,
IVTV_ENC_STREAM_TYPE_MPG needs to claim IVTV_ENC_STREAM_TYPE_VBI
(provided VBI insertion is on and sliced VBI is selected), for all
other streams we're done */
if (type == IVTV_DEC_STREAM_TYPE_MPG) {
vbi_type = IVTV_DEC_STREAM_TYPE_VBI;
} else if (type == IVTV_ENC_STREAM_TYPE_MPG &&
itv->vbi.insert_mpeg && !ivtv_raw_vbi(itv)) {
vbi_type = IVTV_ENC_STREAM_TYPE_VBI;
} else {
return 0;
}
s_vbi = &itv->streams[vbi_type];
if (!test_and_set_bit(IVTV_F_S_CLAIMED, &s_vbi->s_flags)) {
/* Enable reinsertion interrupt */
if (vbi_type == IVTV_DEC_STREAM_TYPE_VBI)
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
}
/* mark that it is used internally */
set_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags);
return 0;
}
EXPORT_SYMBOL(ivtv_claim_stream);
/* This function releases a previously claimed stream. It will take into
account associated VBI streams. */
void ivtv_release_stream(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
s->fh = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
return;
}
if (!test_and_clear_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
IVTV_DEBUG_WARN("Release stream %s not in use!\n", s->name);
return;
}
ivtv_flush_queues(s);
/* disable reinsertion interrupt */
if (s->type == IVTV_DEC_STREAM_TYPE_VBI)
ivtv_set_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
/* IVTV_DEC_STREAM_TYPE_MPG needs to release IVTV_DEC_STREAM_TYPE_VBI,
IVTV_ENC_STREAM_TYPE_MPG needs to release IVTV_ENC_STREAM_TYPE_VBI,
for all other streams we're done */
if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
s_vbi = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
else if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
else
return;
/* clear internal use flag */
if (!test_and_clear_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags)) {
/* was already cleared */
return;
}
if (s_vbi->fh) {
/* VBI stream still claimed by a file descriptor */
return;
}
/* disable reinsertion interrupt */
if (s_vbi->type == IVTV_DEC_STREAM_TYPE_VBI)
ivtv_set_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
clear_bit(IVTV_F_S_CLAIMED, &s_vbi->s_flags);
ivtv_flush_queues(s_vbi);
}
EXPORT_SYMBOL(ivtv_release_stream);
static void ivtv_dualwatch(struct ivtv *itv)
{
struct v4l2_tuner vt;
u32 new_stereo_mode;
const u32 dual = 0x02;
new_stereo_mode = v4l2_ctrl_g_ctrl(itv->cxhdl.audio_mode);
memset(&vt, 0, sizeof(vt));
ivtv_call_all(itv, tuner, g_tuner, &vt);
if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2))
new_stereo_mode = dual;
if (new_stereo_mode == itv->dualwatch_stereo_mode)
return;
IVTV_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x.\n",
itv->dualwatch_stereo_mode, new_stereo_mode);
if (v4l2_ctrl_s_ctrl(itv->cxhdl.audio_mode, new_stereo_mode))
IVTV_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
}
static void ivtv_update_pgm_info(struct ivtv *itv)
{
u32 wr_idx = (read_enc(itv->pgm_info_offset) - itv->pgm_info_offset - 4) / 24;
int cnt;
int i = 0;
if (wr_idx >= itv->pgm_info_num) {
IVTV_DEBUG_WARN("Invalid PGM index %d (>= %d)\n", wr_idx, itv->pgm_info_num);
return;
}
cnt = (wr_idx + itv->pgm_info_num - itv->pgm_info_write_idx) % itv->pgm_info_num;
while (i < cnt) {
int idx = (itv->pgm_info_write_idx + i) % itv->pgm_info_num;
struct v4l2_enc_idx_entry *e = itv->pgm_info + idx;
u32 addr = itv->pgm_info_offset + 4 + idx * 24;
const int mapping[8] = { -1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P, -1,
V4L2_ENC_IDX_FRAME_B, -1, -1, -1 };
// 1=I, 2=P, 4=B
e->offset = read_enc(addr + 4) + ((u64)read_enc(addr + 8) << 32);
if (e->offset > itv->mpg_data_received) {
break;
}
e->offset += itv->vbi_data_inserted;
e->length = read_enc(addr);
e->pts = read_enc(addr + 16) + ((u64)(read_enc(addr + 20) & 1) << 32);
e->flags = mapping[read_enc(addr + 12) & 7];
i++;
}
itv->pgm_info_write_idx = (itv->pgm_info_write_idx + i) % itv->pgm_info_num;
}
static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err)
{
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
struct ivtv_buffer *buf;
DEFINE_WAIT(wait);
*err = 0;
while (1) {
if (s->type == IVTV_ENC_STREAM_TYPE_MPG) {
/* Process pending program info updates and pending VBI data */
ivtv_update_pgm_info(itv);
if (time_after(jiffies,
itv->dualwatch_jiffies +
msecs_to_jiffies(1000))) {
itv->dualwatch_jiffies = jiffies;
ivtv_dualwatch(itv);
}
if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
!test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) {
/* byteswap and process VBI data */
ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type);
ivtv_enqueue(s_vbi, buf, &s_vbi->q_free);
}
}
buf = &itv->vbi.sliced_mpeg_buf;
if (buf->readpos != buf->bytesused) {
return buf;
}
}
/* do we have leftover data? */
buf = ivtv_dequeue(s, &s->q_io);
if (buf)
return buf;
/* do we have new data? */
buf = ivtv_dequeue(s, &s->q_full);
if (buf) {
if ((buf->b_flags & IVTV_F_B_NEED_BUF_SWAP) == 0)
return buf;
buf->b_flags &= ~IVTV_F_B_NEED_BUF_SWAP;
if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
/* byteswap MPG data */
ivtv_buf_swap(buf);
else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) {
/* byteswap and process VBI data */
ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type);
}
return buf;
}
/* return if end of stream */
if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
IVTV_DEBUG_INFO("EOS %s\n", s->name);
return NULL;
}
/* return if file was opened with O_NONBLOCK */
if (non_block) {
*err = -EAGAIN;
return NULL;
}
/* wait for more data to arrive */
mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become available before we were added to the waitqueue */
if (!s->q_full.buffers)
schedule();
finish_wait(&s->waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
/* return if a signal was received */
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
*err = -EINTR;
return NULL;
}
}
}
static void ivtv_setup_sliced_vbi_buf(struct ivtv *itv)
{
int idx = itv->vbi.inserted_frame % IVTV_VBI_FRAMES;
itv->vbi.sliced_mpeg_buf.buf = itv->vbi.sliced_mpeg_data[idx];
itv->vbi.sliced_mpeg_buf.bytesused = itv->vbi.sliced_mpeg_size[idx];
itv->vbi.sliced_mpeg_buf.readpos = 0;
}
static size_t ivtv_copy_buf_to_user(struct ivtv_stream *s, struct ivtv_buffer *buf,
char __user *ubuf, size_t ucount)
{
struct ivtv *itv = s->itv;
size_t len = buf->bytesused - buf->readpos;
if (len > ucount) len = ucount;
if (itv->vbi.insert_mpeg && s->type == IVTV_ENC_STREAM_TYPE_MPG &&
!ivtv_raw_vbi(itv) && buf != &itv->vbi.sliced_mpeg_buf) {
const char *start = buf->buf + buf->readpos;
const char *p = start + 1;
const u8 *q;
u8 ch = itv->search_pack_header ? 0xba : 0xe0;
int stuffing, i;
while (start + len > p && (q = memchr(p, 0, start + len - p))) {
p = q + 1;
if ((char *)q + 15 >= buf->buf + buf->bytesused ||
q[1] != 0 || q[2] != 1 || q[3] != ch) {
continue;
}
if (!itv->search_pack_header) {
if ((q[6] & 0xc0) != 0x80)
continue;
if (((q[7] & 0xc0) == 0x80 && (q[9] & 0xf0) == 0x20) ||
((q[7] & 0xc0) == 0xc0 && (q[9] & 0xf0) == 0x30)) {
ch = 0xba;
itv->search_pack_header = 1;
p = q + 9;
}
continue;
}
stuffing = q[13] & 7;
/* all stuffing bytes must be 0xff */
for (i = 0; i < stuffing; i++)
if (q[14 + i] != 0xff)
break;
if (i == stuffing && (q[4] & 0xc4) == 0x44 && (q[12] & 3) == 3 &&
q[14 + stuffing] == 0 && q[15 + stuffing] == 0 &&
q[16 + stuffing] == 1) {
itv->search_pack_header = 0;
len = (char *)q - start;
ivtv_setup_sliced_vbi_buf(itv);
break;
}
}
}
if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) {
IVTV_DEBUG_WARN("copy %zd bytes to user failed for %s\n", len, s->name);
return -EFAULT;
}
/*IVTV_INFO("copied %lld %d %d %d %d %d vbi %d\n", itv->mpg_data_received, len, ucount,
buf->readpos, buf->bytesused, buf->bytesused - buf->readpos - len,
buf == &itv->vbi.sliced_mpeg_buf); */
buf->readpos += len;
if (s->type == IVTV_ENC_STREAM_TYPE_MPG && buf != &itv->vbi.sliced_mpeg_buf)
itv->mpg_data_received += len;
return len;
}
static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_count, int non_block)
{
struct ivtv *itv = s->itv;
size_t tot_written = 0;
int single_frame = 0;
if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
}
/* Each VBI buffer is one frame, the v4l2 API says that for VBI the frames should
arrive one-by-one, so make sure we never output more than one VBI frame at a time */
if (s->type == IVTV_DEC_STREAM_TYPE_VBI ||
(s->type == IVTV_ENC_STREAM_TYPE_VBI && !ivtv_raw_vbi(itv)))
single_frame = 1;
for (;;) {
struct ivtv_buffer *buf;
int rc;
buf = ivtv_get_buffer(s, non_block, &rc);
/* if there is no data available... */
if (buf == NULL) {
/* if we got data, then return that regardless */
if (tot_written)
break;
/* EOS condition */
if (rc == 0) {
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
ivtv_release_stream(s);
}
/* set errno */
return rc;
}
rc = ivtv_copy_buf_to_user(s, buf, ubuf + tot_written, tot_count - tot_written);
if (buf != &itv->vbi.sliced_mpeg_buf) {
ivtv_enqueue(s, buf, (buf->readpos == buf->bytesused) ? &s->q_free : &s->q_io);
}
else if (buf->readpos == buf->bytesused) {
int idx = itv->vbi.inserted_frame % IVTV_VBI_FRAMES;
itv->vbi.sliced_mpeg_size[idx] = 0;
itv->vbi.inserted_frame++;
itv->vbi_data_inserted += buf->bytesused;
}
if (rc < 0)
return rc;
tot_written += rc;
if (tot_written == tot_count || single_frame)
break;
}
return tot_written;
}
static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t count,
loff_t *pos, int non_block)
{
ssize_t rc = count ? ivtv_read(s, ubuf, count, non_block) : 0;
struct ivtv *itv = s->itv;
IVTV_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
*pos += rc;
return rc;
}
int ivtv_start_capture(struct ivtv_open_id *id)
{
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct ivtv_stream *s_vbi;
if (s->type == IVTV_ENC_STREAM_TYPE_RAD ||
s->type == IVTV_DEC_STREAM_TYPE_MPG ||
s->type == IVTV_DEC_STREAM_TYPE_YUV ||
s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
/* you cannot read from these stream types. */
return -EINVAL;
}
/* Try to claim this stream. */
if (ivtv_claim_stream(id, s->type))
return -EBUSY;
/* This stream does not need to start capturing */
if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
return 0;
}
/* If capture is already in progress, then we also have to
do nothing extra. */
if (test_bit(IVTV_F_S_STREAMOFF, &s->s_flags) || test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
return 0;
}
/* Start VBI capture if required */
s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
if (s->type == IVTV_ENC_STREAM_TYPE_MPG &&
test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
!test_and_set_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) {
/* Note: the IVTV_ENC_STREAM_TYPE_VBI is claimed
automatically when the MPG stream is claimed.
We only need to start the VBI capturing. */
if (ivtv_start_v4l2_encode_stream(s_vbi)) {
IVTV_DEBUG_WARN("VBI capture start failed\n");
/* Failure, clean up and return an error */
clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags);
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
/* also releases the associated VBI stream */
ivtv_release_stream(s);
return -EIO;
}
IVTV_DEBUG_INFO("VBI insertion started\n");
}
/* Tell the card to start capturing */
if (!ivtv_start_v4l2_encode_stream(s)) {
/* We're done */
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
/* Resume a possibly paused encoder */
if (test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1);
return 0;
}
/* failure, clean up */
IVTV_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name);
/* Note: the IVTV_ENC_STREAM_TYPE_VBI is released
automatically when the MPG stream is released.
We only need to stop the VBI capturing. */
if (s->type == IVTV_ENC_STREAM_TYPE_MPG &&
test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) {
ivtv_stop_v4l2_encode_stream(s_vbi, 0);
clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags);
}
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
ivtv_release_stream(s);
return -EIO;
}
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
ssize_t rc;
IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
if (mutex_lock_interruptible(&itv->serialize_lock))
return -ERESTARTSYS;
rc = ivtv_start_capture(id);
if (!rc)
rc = ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
mutex_unlock(&itv->serialize_lock);
return rc;
}
int ivtv_start_decoding(struct ivtv_open_id *id, int speed)
{
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int rc;
if (atomic_read(&itv->decoding) == 0) {
if (ivtv_claim_stream(id, s->type)) {
/* someone else is using this stream already */
IVTV_DEBUG_WARN("start decode, stream already claimed\n");
return -EBUSY;
}
rc = ivtv_start_v4l2_decode_stream(s, 0);
if (rc < 0) {
if (rc == -EAGAIN)
rc = ivtv_start_v4l2_decode_stream(s, 0);
if (rc < 0)
return rc;
}
}
if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
return ivtv_set_speed(itv, speed);
return 0;
}
static ssize_t ivtv_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_buffer *buf;
struct ivtv_queue q;
int bytes_written = 0;
int mode;
int rc;
DEFINE_WAIT(wait);
IVTV_DEBUG_HI_FILE("write %zd bytes to %s\n", count, s->name);
if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
s->type != IVTV_DEC_STREAM_TYPE_YUV &&
s->type != IVTV_DEC_STREAM_TYPE_VOUT)
/* not decoder streams */
return -EINVAL;
/* Try to claim this stream */
if (ivtv_claim_stream(id, s->type))
return -EBUSY;
/* This stream does not need to start any decoding */
if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
int elems = count / sizeof(struct v4l2_sliced_vbi_data);
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
return ivtv_write_vbi_from_user(itv,
(const struct v4l2_sliced_vbi_data __user *)user_buf, elems);
}
mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV;
if (ivtv_set_output_mode(itv, mode) != mode) {
ivtv_release_stream(s);
return -EBUSY;
}
ivtv_queue_init(&q);
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
/* Start decoder (returns 0 if already started) */
rc = ivtv_start_decoding(id, itv->speed);
if (rc) {
IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
/* failure, clean up */
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
return rc;
}
retry:
/* If possible, just DMA the entire frame - Check the data transfer size
since we may get here before the stream has been fully set-up */
if (mode == OUT_YUV && s->q_full.length == 0 && itv->dma_data_req_size) {
while (count >= itv->dma_data_req_size) {
rc = ivtv_yuv_udma_stream_frame(itv, (void __user *)user_buf);
if (rc < 0)
return rc;
bytes_written += itv->dma_data_req_size;
user_buf += itv->dma_data_req_size;
count -= itv->dma_data_req_size;
}
if (count == 0) {
IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
return bytes_written;
}
}
for (;;) {
/* Gather buffers */
while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
ivtv_enqueue(s, buf, &q);
while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
ivtv_enqueue(s, buf, &q);
}
if (q.buffers)
break;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become free before we were added to the waitqueue */
if (!s->q_free.buffers)
schedule();
finish_wait(&s->waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
return -EINTR;
}
}
/* copy user data into buffers */
while ((buf = ivtv_dequeue(s, &q))) {
/* yuv is a pain. Don't copy more data than needed for a single
frame, otherwise we lose sync with the incoming stream */
if (s->type == IVTV_DEC_STREAM_TYPE_YUV &&
yi->stream_size + count > itv->dma_data_req_size)
rc = ivtv_buf_copy_from_user(s, buf, user_buf,
itv->dma_data_req_size - yi->stream_size);
else
rc = ivtv_buf_copy_from_user(s, buf, user_buf, count);
/* Make sure we really got all the user data */
if (rc < 0) {
ivtv_queue_move(s, &q, NULL, &s->q_free, 0);
return rc;
}
user_buf += rc;
count -= rc;
bytes_written += rc;
if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
yi->stream_size += rc;
/* If we have a complete yuv frame, break loop now */
if (yi->stream_size == itv->dma_data_req_size) {
ivtv_enqueue(s, buf, &s->q_full);
yi->stream_size = 0;
break;
}
}
if (buf->bytesused != s->buf_size) {
/* incomplete, leave in q_io for next time */
ivtv_enqueue(s, buf, &s->q_io);
break;
}
/* Byteswap MPEG buffer */
if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
ivtv_buf_swap(buf);
ivtv_enqueue(s, buf, &s->q_full);
}
if (test_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags)) {
if (s->q_full.length >= itv->dma_data_req_size) {
int got_sig;
if (mode == OUT_YUV)
ivtv_yuv_setup_stream_frame(itv);
mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (!(got_sig = signal_pending(current)) &&
test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (got_sig) {
IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
return -EINTR;
}
clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 1);
}
}
/* more user data is available, wait until buffers become free
to transfer the rest. */
if (count && !(filp->f_flags & O_NONBLOCK))
goto retry;
IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
return bytes_written;
}
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
ssize_t res;
if (mutex_lock_interruptible(&itv->serialize_lock))
return -ERESTARTSYS;
res = ivtv_write(filp, user_buf, count, pos);
mutex_unlock(&itv->serialize_lock);
return res;
}
__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
__poll_t res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
/* If there are subscribed events, then only use the new event
API instead of the old video.h based API. */
if (!list_empty(&id->fh.subscribed)) {
poll_wait(filp, &id->fh.wait, wait);
/* Turn off the old-style vsync events */
clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
if (v4l2_event_pending(&id->fh))
res = EPOLLPRI;
} else {
/* This is the old-style API which is here only for backwards
compatibility. */
poll_wait(filp, &s->waitq, wait);
set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
res = EPOLLPRI;
}
/* Allow write if buffers are available for writing */
if (s->q_free.buffers)
res |= EPOLLOUT | EPOLLWRNORM;
return res;
}
__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
__poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
s->type != IVTV_ENC_STREAM_TYPE_RAD &&
(req_events & (EPOLLIN | EPOLLRDNORM))) {
int rc;
mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
s->name, rc);
return EPOLLERR;
}
IVTV_DEBUG_FILE("Encoder poll started capture\n");
}
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Encoder poll\n");
poll_wait(filp, &s->waitq, wait);
if (v4l2_event_pending(&id->fh))
res |= EPOLLPRI;
else
poll_wait(filp, &id->fh.wait, wait);
if (s->q_full.length || s->q_io.length)
return res | EPOLLIN | EPOLLRDNORM;
if (eof)
return res | EPOLLHUP;
return res;
}
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
{
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
IVTV_DEBUG_FILE("close() of %s\n", s->name);
/* 'Unclaim' this stream */
/* Stop capturing */
if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
IVTV_DEBUG_INFO("close stopping capture\n");
/* Special case: a running VBI capture for VBI insertion
in the mpeg stream. Need to stop that too. */
if (id->type == IVTV_ENC_STREAM_TYPE_MPG &&
test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags) &&
!test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
IVTV_DEBUG_INFO("close stopping embedded VBI capture\n");
ivtv_stop_v4l2_encode_stream(s_vbi, 0);
}
if ((id->type == IVTV_DEC_STREAM_TYPE_VBI ||
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
s->fh = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
}
}
if (!gop_end) {
clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
ivtv_release_stream(s);
}
}
static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
{
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
IVTV_DEBUG_FILE("close() of %s\n", s->name);
if (id->type == IVTV_DEC_STREAM_TYPE_YUV &&
test_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags)) {
/* Restore registers we've changed & clean up any mess */
ivtv_yuv_close(itv);
}
/* Stop decoding */
if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
IVTV_DEBUG_INFO("close stopping decode\n");
ivtv_stop_v4l2_decode_stream(s, flags, pts);
itv->output_mode = OUT_NONE;
}
clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
if (itv->output_mode == OUT_UDMA_YUV && id->yuv_frames)
itv->output_mode = OUT_NONE;
itv->speed = 0;
clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
ivtv_release_stream(s);
}
int ivtv_v4l2_close(struct file *filp)
{
struct v4l2_fh *fh = filp->private_data;
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
IVTV_DEBUG_FILE("close %s\n", s->name);
mutex_lock(&itv->serialize_lock);
/* Stop radio */
if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
ivtv_mute(itv);
/* Mark that the radio is no longer in use */
clear_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
/* Switch tuner to TV */
ivtv_call_all(itv, video, s_std, itv->std);
/* Select correct audio input (i.e. TV tuner or Line in) */
ivtv_audio_set_io(itv);
if (itv->hw_flags & IVTV_HW_SAA711X) {
ivtv_call_hw(itv, IVTV_HW_SAA711X, video, s_crystal_freq,
SAA7115_FREQ_32_11_MHZ, 0);
}
if (atomic_read(&itv->capturing) > 0) {
/* Undo video mute */
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
(v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Done! Unmute and continue. */
ivtv_unmute(itv);
}
v4l2_fh_del(fh);
v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
if (s->fh != &id->fh)
goto close_done;
/* 'Unclaim' this stream */
if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
ivtv_stop_decoding(id, V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY, 0);
/* If all output streams are closed, and if the user doesn't have
IVTV_DEC_STREAM_TYPE_VOUT open, then disable CC on TV-out. */
if (itv->output_mode == OUT_NONE && !test_bit(IVTV_F_S_APPL_IO, &s_vout->s_flags)) {
/* disable CC on TV-out */
ivtv_disable_cc(itv);
}
} else {
ivtv_stop_capture(id, 0);
}
close_done:
kfree(id);
mutex_unlock(&itv->serialize_lock);
return 0;
}
static int ivtv_open(struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
struct ivtv_stream *s = video_get_drvdata(vdev);
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
if (ivtv_init_on_first_open(itv)) {
IVTV_ERR("Failed to initialize on device %s\n",
video_device_node_name(vdev));
return -ENXIO;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
/* Unless ivtv_fw_debug is set, error out if firmware dead. */
if (ivtv_fw_debug) {
IVTV_WARN("Opening %s with dead firmware lockout disabled\n",
video_device_node_name(vdev));
IVTV_WARN("Selected firmware errors will be ignored\n");
} else {
#else
if (1) {
#endif
res = ivtv_firmware_check(itv, "ivtv_serialized_open");
if (res == -EAGAIN)
res = ivtv_firmware_check(itv, "ivtv_serialized_open");
if (res < 0)
return -EIO;
}
if (s->type == IVTV_DEC_STREAM_TYPE_MPG &&
test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_YUV].s_flags))
return -EBUSY;
if (s->type == IVTV_DEC_STREAM_TYPE_YUV &&
test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_MPG].s_flags))
return -EBUSY;
if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
if (read_reg(0x82c) == 0) {
IVTV_ERR("Tried to open YUV output device but need to send data to mpeg decoder before it can be used\n");
/* return -ENODEV; */
}
ivtv_udma_alloc(itv);
}
/* Allocate memory */
item = kzalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
if (NULL == item) {
IVTV_DEBUG_WARN("nomem on v4l2 open\n");
return -ENOMEM;
}
v4l2_fh_init(&item->fh, &s->vdev);
item->itv = itv;
item->type = s->type;
filp->private_data = &item->fh;
v4l2_fh_add(&item->fh);
if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
}
}
/* Mark that the radio is being used. */
set_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
/* We have the radio */
ivtv_mute(itv);
/* Switch tuner to radio */
ivtv_call_all(itv, tuner, s_radio);
/* Select the correct audio input (i.e. radio tuner) */
ivtv_audio_set_io(itv);
if (itv->hw_flags & IVTV_HW_SAA711X) {
ivtv_call_hw(itv, IVTV_HW_SAA711X, video, s_crystal_freq,
SAA7115_FREQ_32_11_MHZ, SAA7115_FREQ_FL_APLL);
}
/* Done! Unmute and continue. */
ivtv_unmute(itv);
}
/* YUV or MPG Decoding Mode? */
if (s->type == IVTV_DEC_STREAM_TYPE_MPG) {
clear_bit(IVTV_F_I_DEC_YUV, &itv->i_flags);
} else if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
set_bit(IVTV_F_I_DEC_YUV, &itv->i_flags);
/* For yuv, we need to know the dma size before we start */
itv->dma_data_req_size =
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
return 0;
}
int ivtv_v4l2_open(struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
int res;
if (mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
res = ivtv_open(filp);
mutex_unlock(vdev->lock);
return res;
}
void ivtv_mute(struct ivtv *itv)
{
if (atomic_read(&itv->capturing))
ivtv_vapi(itv, CX2341X_ENC_MUTE_AUDIO, 1, 1);
IVTV_DEBUG_INFO("Mute\n");
}
void ivtv_unmute(struct ivtv *itv)
{
if (atomic_read(&itv->capturing)) {
ivtv_msleep_timeout(100, 0);
ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);
ivtv_vapi(itv, CX2341X_ENC_MUTE_AUDIO, 1, 0);
}
IVTV_DEBUG_INFO("Unmute\n");
}
| linux-master | drivers/media/pci/ivtv/ivtv-fileops.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
On Screen Display cx23415 Framebuffer driver
This module presents the cx23415 OSD (onscreen display) framebuffer memory
as a standard Linux /dev/fb style framebuffer device. The framebuffer has
support for 8, 16 & 32 bpp packed pixel formats with alpha channel. In 16bpp
mode, there is a choice of a three color depths (12, 15 or 16 bits), but no
local alpha. The colorspace is selectable between rgb & yuv.
Depending on the TV standard configured in the ivtv module at load time,
the initial resolution is either 640x400 (NTSC) or 640x480 (PAL) at 8bpp.
Video timings are locked to ensure a vertical refresh rate of 50Hz (PAL)
or 59.94 (NTSC)
Copyright (c) 2003 Matt T. Yourst <[email protected]>
Derived from drivers/video/vesafb.c
Portions (c) 1998 Gerd Knorr <[email protected]>
2.6 kernel port:
Copyright (C) 2004 Matthias Badaire
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2006 Ian Armstrong <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-i2c.h"
#include "ivtv-udma.h"
#include "ivtv-mailbox.h"
#include "ivtv-firmware.h"
#include <linux/fb.h>
#include <linux/ivtvfb.h>
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
#include <asm/memtype.h>
#endif
/* card parameters */
static int ivtvfb_card_id = -1;
static int ivtvfb_debug;
static bool ivtvfb_force_pat = IS_ENABLED(CONFIG_VIDEO_FB_IVTV_FORCE_PAT);
static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
static unsigned int osd_yres;
static unsigned int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
module_param_named(force_pat, ivtvfb_force_pat, bool, 0644);
module_param(osd_laced, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
module_param(osd_left, int, 0444);
module_param(osd_yres, uint, 0444);
module_param(osd_xres, uint, 0444);
MODULE_PARM_DESC(ivtvfb_card_id,
"Only use framebuffer of the specified ivtv card (0-31)\n"
"\t\t\tdefault -1: initialize all available framebuffers");
MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: errors only\n"
"\t\t\t(debug = 3 gives full debugging)");
MODULE_PARM_DESC(force_pat,
"Force initialization on x86 PAT-enabled systems (bool).\n");
/* Why upper, left, xres, yres, depth, laced ? To match terminology used
by fbset.
Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */
MODULE_PARM_DESC(osd_laced,
"Interlaced mode\n"
"\t\t\t0=off\n"
"\t\t\t1=on\n"
"\t\t\tdefault off");
MODULE_PARM_DESC(osd_depth,
"Bits per pixel - 8, 16, 32\n"
"\t\t\tdefault 8");
MODULE_PARM_DESC(osd_upper,
"Vertical start position\n"
"\t\t\tdefault 0 (Centered)");
MODULE_PARM_DESC(osd_left,
"Horizontal start position\n"
"\t\t\tdefault 0 (Centered)");
MODULE_PARM_DESC(osd_yres,
"Display height\n"
"\t\t\tdefault 480 (PAL)\n"
"\t\t\t 400 (NTSC)");
MODULE_PARM_DESC(osd_xres,
"Display width\n"
"\t\t\tdefault 640");
MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil, John Harvey, Ian Armstrong");
MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
#define IVTVFB_DBGFLG_WARN (1 << 0)
#define IVTVFB_DBGFLG_INFO (1 << 1)
#define IVTVFB_DEBUG(x, type, fmt, args...) \
do { \
if ((x) & ivtvfb_debug) \
printk(KERN_INFO "ivtvfb%d " type ": " fmt, itv->instance , ## args); \
} while (0)
#define IVTVFB_DEBUG_WARN(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_WARN, "warning", fmt , ## args)
#define IVTVFB_DEBUG_INFO(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_INFO, "info", fmt , ## args)
/* Standard kernel messages */
#define IVTVFB_ERR(fmt, args...) printk(KERN_ERR "ivtvfb%d: " fmt, itv->instance , ## args)
#define IVTVFB_WARN(fmt, args...) printk(KERN_WARNING "ivtvfb%d: " fmt, itv->instance , ## args)
#define IVTVFB_INFO(fmt, args...) printk(KERN_INFO "ivtvfb%d: " fmt, itv->instance , ## args)
/* --------------------------------------------------------------------- */
#define IVTV_OSD_MAX_WIDTH 720
#define IVTV_OSD_MAX_HEIGHT 576
#define IVTV_OSD_BPP_8 0x00
#define IVTV_OSD_BPP_16_444 0x03
#define IVTV_OSD_BPP_16_555 0x02
#define IVTV_OSD_BPP_16_565 0x01
#define IVTV_OSD_BPP_32 0x04
struct osd_info {
/* Physical base address */
unsigned long video_pbase;
/* Relative base address (relative to start of decoder memory) */
u32 video_rbase;
/* Mapped base address */
volatile char __iomem *video_vbase;
/* Buffer size */
u32 video_buffer_size;
/* video_base rounded down as required by hardware MTRRs */
unsigned long fb_start_aligned_physaddr;
/* video_base rounded up as required by hardware MTRRs */
unsigned long fb_end_aligned_physaddr;
int wc_cookie;
/* Store the buffer offset */
int set_osd_coords_x;
int set_osd_coords_y;
/* Current dimensions (NOT VISIBLE SIZE!) */
int display_width;
int display_height;
int display_byte_stride;
/* Current bits per pixel */
int bits_per_pixel;
int bytes_per_pixel;
/* Frame buffer stuff */
struct fb_info ivtvfb_info;
struct fb_var_screeninfo ivtvfb_defined;
struct fb_fix_screeninfo ivtvfb_fix;
/* Used for a warm start */
struct fb_var_screeninfo fbvar_cur;
int blank_cur;
u32 palette_cur[256];
u32 pan_cur;
};
struct ivtv_osd_coords {
unsigned long offset;
unsigned long max_offset;
int pixel_stride;
int lines;
int x;
int y;
};
/* --------------------------------------------------------------------- */
/* ivtv API calls for framebuffer related support */
static int ivtvfb_get_framebuffer(struct ivtv *itv, u32 *fbbase,
u32 *fblength)
{
u32 data[CX2341X_MBOX_MAX_DATA];
int rc;
ivtv_firmware_check(itv, "ivtvfb_get_framebuffer");
rc = ivtv_vapi_result(itv, data, CX2341X_OSD_GET_FRAMEBUFFER, 0);
*fbbase = data[0];
*fblength = data[1];
return rc;
}
static int ivtvfb_get_osd_coords(struct ivtv *itv,
struct ivtv_osd_coords *osd)
{
struct osd_info *oi = itv->osd_info;
u32 data[CX2341X_MBOX_MAX_DATA];
ivtv_vapi_result(itv, data, CX2341X_OSD_GET_OSD_COORDS, 0);
osd->offset = data[0] - oi->video_rbase;
osd->max_offset = oi->display_width * oi->display_height * 4;
osd->pixel_stride = data[1];
osd->lines = data[2];
osd->x = data[3];
osd->y = data[4];
return 0;
}
static int ivtvfb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords *osd)
{
struct osd_info *oi = itv->osd_info;
oi->display_width = osd->pixel_stride;
oi->display_byte_stride = osd->pixel_stride * oi->bytes_per_pixel;
oi->set_osd_coords_x += osd->x;
oi->set_osd_coords_y = osd->y;
return ivtv_vapi(itv, CX2341X_OSD_SET_OSD_COORDS, 5,
osd->offset + oi->video_rbase,
osd->pixel_stride,
osd->lines, osd->x, osd->y);
}
static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window)
{
int osd_height_limit = itv->is_out_50hz ? 576 : 480;
/* Only fail if resolution too high, otherwise fudge the start coords. */
if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH))
return -EINVAL;
/* Ensure we don't exceed display limits */
if (ivtv_window->top + ivtv_window->height > osd_height_limit) {
IVTVFB_DEBUG_WARN("ivtv_ioctl_fb_set_display_window - Invalid height setting (%d, %d)\n",
ivtv_window->top, ivtv_window->height);
ivtv_window->top = osd_height_limit - ivtv_window->height;
}
if (ivtv_window->left + ivtv_window->width > IVTV_OSD_MAX_WIDTH) {
IVTVFB_DEBUG_WARN("ivtv_ioctl_fb_set_display_window - Invalid width setting (%d, %d)\n",
ivtv_window->left, ivtv_window->width);
ivtv_window->left = IVTV_OSD_MAX_WIDTH - ivtv_window->width;
}
/* Set the OSD origin */
write_reg((ivtv_window->top << 16) | ivtv_window->left, 0x02a04);
/* How much to display */
write_reg(((ivtv_window->top+ivtv_window->height) << 16) | (ivtv_window->left+ivtv_window->width), 0x02a08);
/* Pass this info back the yuv handler */
itv->yuv_info.osd_vis_w = ivtv_window->width;
itv->yuv_info.osd_vis_h = ivtv_window->height;
itv->yuv_info.osd_x_offset = ivtv_window->left;
itv->yuv_info.osd_y_offset = ivtv_window->top;
return 0;
}
static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
unsigned long ivtv_dest_addr, void __user *userbuf,
int size_in_bytes)
{
DEFINE_WAIT(wait);
int got_sig = 0;
mutex_lock(&itv->udma.lock);
/* Map User DMA */
if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
mutex_unlock(&itv->udma.lock);
IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with pin_user_pages: %d bytes, %d pages returned\n",
size_in_bytes, itv->udma.page_count);
/* pin_user_pages must have failed completely */
return -EIO;
}
IVTVFB_DEBUG_INFO("ivtvfb_prep_dec_dma_to_device, %d bytes, %d pages\n",
size_in_bytes, itv->udma.page_count);
ivtv_udma_prepare(itv);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
/* if no UDMA is pending and no UDMA is in progress, then the DMA
is finished */
while (test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags) ||
test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
/* don't interrupt if the DMA is in progress but break off
a still pending DMA. */
got_sig = signal_pending(current);
if (got_sig && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
break;
got_sig = 0;
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
/* Unmap Last DMA Xfer */
ivtv_udma_unmap(itv);
mutex_unlock(&itv->udma.lock);
if (got_sig) {
IVTV_DEBUG_INFO("User stopped OSD\n");
return -EINTR;
}
return 0;
}
static int ivtvfb_prep_frame(struct ivtv *itv, int cmd, void __user *source,
unsigned long dest_offset, int count)
{
DEFINE_WAIT(wait);
struct osd_info *oi = itv->osd_info;
/* Nothing to do */
if (count == 0) {
IVTVFB_DEBUG_WARN("ivtvfb_prep_frame: Nothing to do. count = 0\n");
return -EINVAL;
}
/* Check Total FB Size */
if ((dest_offset + count) > oi->video_buffer_size) {
IVTVFB_WARN("ivtvfb_prep_frame: Overflowing the framebuffer %ld, only %d available\n",
dest_offset + count, oi->video_buffer_size);
return -E2BIG;
}
/* Not fatal, but will have undesirable results */
if ((unsigned long)source & 3)
IVTVFB_WARN("ivtvfb_prep_frame: Source address not 32 bit aligned (%p)\n",
source);
if (dest_offset & 3)
IVTVFB_WARN("ivtvfb_prep_frame: Dest offset not 32 bit aligned (%ld)\n", dest_offset);
if (count & 3)
IVTVFB_WARN("ivtvfb_prep_frame: Count not a multiple of 4 (%d)\n", count);
/* Check Source */
if (!access_ok(source + dest_offset, count)) {
IVTVFB_WARN("Invalid userspace pointer %p\n", source);
IVTVFB_DEBUG_WARN("access_ok() failed for offset 0x%08lx source %p count %d\n",
dest_offset, source, count);
return -EINVAL;
}
/* OSD Address to send DMA to */
dest_offset += IVTV_DECODER_OFFSET + oi->video_rbase;
/* Fill Buffers */
return ivtvfb_prep_dec_dma_to_device(itv, dest_offset, source, count);
}
static ssize_t ivtvfb_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
void *dst;
int err = 0;
int dma_err;
unsigned long total_size;
struct ivtv *itv = (struct ivtv *) info->par;
unsigned long dma_offset =
IVTV_DECODER_OFFSET + itv->osd_info->video_rbase;
unsigned long dma_size;
u16 lead = 0, tail = 0;
if (!info->screen_base)
return -ENODEV;
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p > total_size)
return -EFBIG;
if (count > total_size) {
err = -EFBIG;
count = total_size;
}
if (count + p > total_size) {
if (!err)
err = -ENOSPC;
count = total_size - p;
}
dst = (void __force *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
/* If transfer size > threshold and both src/dst
addresses are aligned, use DMA */
if (count >= 4096 &&
((unsigned long)buf & 3) == ((unsigned long)dst & 3)) {
/* Odd address = can't DMA. Align */
if ((unsigned long)dst & 3) {
lead = 4 - ((unsigned long)dst & 3);
if (copy_from_user(dst, buf, lead))
return -EFAULT;
buf += lead;
dst += lead;
}
/* DMA resolution is 32 bits */
if ((count - lead) & 3)
tail = (count - lead) & 3;
/* DMA the data */
dma_size = count - lead - tail;
dma_err = ivtvfb_prep_dec_dma_to_device(itv,
p + lead + dma_offset, (void __user *)buf, dma_size);
if (dma_err)
return dma_err;
dst += dma_size;
buf += dma_size;
/* Copy any leftover data */
if (tail && copy_from_user(dst, buf, tail))
return -EFAULT;
} else if (copy_from_user(dst, buf, count)) {
return -EFAULT;
}
if (!err)
*ppos += count;
return (err) ? err : count;
}
static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
DEFINE_WAIT(wait);
struct ivtv *itv = (struct ivtv *)info->par;
int rc = 0;
switch (cmd) {
case FBIOGET_VBLANK: {
struct fb_vblank vblank;
u32 trace;
memset(&vblank, 0, sizeof(struct fb_vblank));
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
if (itv->is_out_50hz && trace > 312)
trace -= 312;
else if (itv->is_out_60hz && trace > 262)
trace -= 262;
if (trace == 1)
vblank.flags |= FB_VBLANK_VSYNCING;
vblank.count = itv->last_vsync_field;
vblank.vcount = trace;
vblank.hcount = 0;
if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank)))
return -EFAULT;
return 0;
}
case FBIO_WAITFORVSYNC:
prepare_to_wait(&itv->vsync_waitq, &wait, TASK_INTERRUPTIBLE);
if (!schedule_timeout(msecs_to_jiffies(50)))
rc = -ETIMEDOUT;
finish_wait(&itv->vsync_waitq, &wait);
return rc;
case IVTVFB_IOC_DMA_FRAME: {
struct ivtvfb_dma_frame args;
IVTVFB_DEBUG_INFO("IVTVFB_IOC_DMA_FRAME\n");
if (copy_from_user(&args, (void __user *)arg, sizeof(args)))
return -EFAULT;
return ivtvfb_prep_frame(itv, cmd, args.source, args.dest_offset, args.count);
}
default:
IVTVFB_DEBUG_INFO("Unknown ioctl %08x\n", cmd);
return -EINVAL;
}
return 0;
}
/* Framebuffer device handling */
static int ivtvfb_set_var(struct ivtv *itv, struct fb_var_screeninfo *var)
{
struct osd_info *oi = itv->osd_info;
struct ivtv_osd_coords ivtv_osd;
struct v4l2_rect ivtv_window;
int osd_mode = -1;
IVTVFB_DEBUG_INFO("ivtvfb_set_var\n");
/* Select color space */
if (var->nonstd) /* YUV */
write_reg(read_reg(0x02a00) | 0x0002000, 0x02a00);
else /* RGB */
write_reg(read_reg(0x02a00) & ~0x0002000, 0x02a00);
/* Set the color mode */
switch (var->bits_per_pixel) {
case 8:
osd_mode = IVTV_OSD_BPP_8;
break;
case 32:
osd_mode = IVTV_OSD_BPP_32;
break;
case 16:
switch (var->green.length) {
case 4:
osd_mode = IVTV_OSD_BPP_16_444;
break;
case 5:
osd_mode = IVTV_OSD_BPP_16_555;
break;
case 6:
osd_mode = IVTV_OSD_BPP_16_565;
break;
default:
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n");
}
break;
default:
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n");
}
/* Set video mode. Although rare, the display can become scrambled even
if we don't change mode. Always 'bounce' to osd_mode via mode 0 */
if (osd_mode != -1) {
ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, 0);
ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, osd_mode);
}
oi->bits_per_pixel = var->bits_per_pixel;
oi->bytes_per_pixel = var->bits_per_pixel / 8;
/* Set the flicker filter */
switch (var->vmode & FB_VMODE_MASK) {
case FB_VMODE_NONINTERLACED: /* Filter on */
ivtv_vapi(itv, CX2341X_OSD_SET_FLICKER_STATE, 1, 1);
break;
case FB_VMODE_INTERLACED: /* Filter off */
ivtv_vapi(itv, CX2341X_OSD_SET_FLICKER_STATE, 1, 0);
break;
default:
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid video mode\n");
}
/* Read the current osd info */
ivtvfb_get_osd_coords(itv, &ivtv_osd);
/* Now set the OSD to the size we want */
ivtv_osd.pixel_stride = var->xres_virtual;
ivtv_osd.lines = var->yres_virtual;
ivtv_osd.x = 0;
ivtv_osd.y = 0;
ivtvfb_set_osd_coords(itv, &ivtv_osd);
/* Can't seem to find the right API combo for this.
Use another function which does what we need through direct register access. */
ivtv_window.width = var->xres;
ivtv_window.height = var->yres;
/* Minimum margin cannot be 0, as X won't allow such a mode */
if (!var->upper_margin)
var->upper_margin++;
if (!var->left_margin)
var->left_margin++;
ivtv_window.top = var->upper_margin - 1;
ivtv_window.left = var->left_margin - 1;
ivtvfb_set_display_window(itv, &ivtv_window);
/* Pass screen size back to yuv handler */
itv->yuv_info.osd_full_w = ivtv_osd.pixel_stride;
itv->yuv_info.osd_full_h = ivtv_osd.lines;
/* Force update of yuv registers */
itv->yuv_info.yuv_forced_update = 1;
/* Keep a copy of these settings */
memcpy(&oi->fbvar_cur, var, sizeof(oi->fbvar_cur));
IVTVFB_DEBUG_INFO("Display size: %dx%d (virtual %dx%d) @ %dbpp\n",
var->xres, var->yres,
var->xres_virtual, var->yres_virtual,
var->bits_per_pixel);
IVTVFB_DEBUG_INFO("Display position: %d, %d\n",
var->left_margin, var->upper_margin);
IVTVFB_DEBUG_INFO("Display filter: %s\n",
(var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED ? "on" : "off");
IVTVFB_DEBUG_INFO("Color space: %s\n", var->nonstd ? "YUV" : "RGB");
return 0;
}
static int ivtvfb_get_fix(struct ivtv *itv, struct fb_fix_screeninfo *fix)
{
struct osd_info *oi = itv->osd_info;
IVTVFB_DEBUG_INFO("ivtvfb_get_fix\n");
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strscpy(fix->id, "cx23415 TV out", sizeof(fix->id));
fix->smem_start = oi->video_pbase;
fix->smem_len = oi->video_buffer_size;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->visual = (oi->bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
fix->xpanstep = 1;
fix->ypanstep = 1;
fix->ywrapstep = 0;
fix->line_length = oi->display_byte_stride;
fix->accel = FB_ACCEL_NONE;
return 0;
}
/* Check the requested display mode, returning -EINVAL if we can't
handle it. */
static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
int osd_height_limit;
u32 pixclock, hlimit, vlimit;
IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
/* Set base references for mode calcs. */
if (itv->is_out_50hz) {
pixclock = 84316;
hlimit = 776;
vlimit = 591;
osd_height_limit = 576;
}
else {
pixclock = 83926;
hlimit = 776;
vlimit = 495;
osd_height_limit = 480;
}
if (var->bits_per_pixel == 8 || var->bits_per_pixel == 32) {
var->transp.offset = 24;
var->transp.length = 8;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
}
else if (var->bits_per_pixel == 16) {
/* To find out the true mode, check green length */
switch (var->green.length) {
case 4:
var->red.offset = 8;
var->red.length = 4;
var->green.offset = 4;
var->green.length = 4;
var->blue.offset = 0;
var->blue.length = 4;
var->transp.offset = 12;
var->transp.length = 1;
break;
case 5:
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 15;
var->transp.length = 1;
break;
default:
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
}
}
else {
IVTVFB_DEBUG_WARN("Invalid colour mode: %d\n", var->bits_per_pixel);
return -EINVAL;
}
/* Check the resolution */
if (var->xres > IVTV_OSD_MAX_WIDTH || var->yres > osd_height_limit) {
IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d\n",
var->xres, var->yres);
return -EINVAL;
}
/* Max horizontal size is 1023 @ 32bpp, 2046 & 16bpp, 4092 @ 8bpp */
if (var->xres_virtual > 4095 / (var->bits_per_pixel / 8) ||
var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8) > oi->video_buffer_size ||
var->xres_virtual < var->xres ||
var->yres_virtual < var->yres) {
IVTVFB_DEBUG_WARN("Invalid virtual resolution: %dx%d\n",
var->xres_virtual, var->yres_virtual);
return -EINVAL;
}
/* Some extra checks if in 8 bit mode */
if (var->bits_per_pixel == 8) {
/* Width must be a multiple of 4 */
if (var->xres & 3) {
IVTVFB_DEBUG_WARN("Invalid resolution for 8bpp: %d\n", var->xres);
return -EINVAL;
}
if (var->xres_virtual & 3) {
IVTVFB_DEBUG_WARN("Invalid virtual resolution for 8bpp: %d)\n", var->xres_virtual);
return -EINVAL;
}
}
else if (var->bits_per_pixel == 16) {
/* Width must be a multiple of 2 */
if (var->xres & 1) {
IVTVFB_DEBUG_WARN("Invalid resolution for 16bpp: %d\n", var->xres);
return -EINVAL;
}
if (var->xres_virtual & 1) {
IVTVFB_DEBUG_WARN("Invalid virtual resolution for 16bpp: %d)\n", var->xres_virtual);
return -EINVAL;
}
}
/* Now check the offsets */
if (var->xoffset >= var->xres_virtual || var->yoffset >= var->yres_virtual) {
IVTVFB_DEBUG_WARN("Invalid offset: %d (%d) %d (%d)\n",
var->xoffset, var->xres_virtual, var->yoffset, var->yres_virtual);
return -EINVAL;
}
/* Check pixel format */
if (var->nonstd > 1) {
IVTVFB_DEBUG_WARN("Invalid nonstd % d\n", var->nonstd);
return -EINVAL;
}
/* Check video mode */
if (((var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) &&
((var->vmode & FB_VMODE_MASK) != FB_VMODE_INTERLACED)) {
IVTVFB_DEBUG_WARN("Invalid video mode: %d\n", var->vmode & FB_VMODE_MASK);
return -EINVAL;
}
/* Check the left & upper margins
If the margins are too large, just center the screen
(enforcing margins causes too many problems) */
if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1)
var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2);
if (var->upper_margin + var->yres > (itv->is_out_50hz ? 577 : 481))
var->upper_margin = 1 + (((itv->is_out_50hz ? 576 : 480) -
var->yres) / 2);
/* Maintain overall 'size' for a constant refresh rate */
var->right_margin = hlimit - var->left_margin - var->xres;
var->lower_margin = vlimit - var->upper_margin - var->yres;
/* Fixed sync times */
var->hsync_len = 24;
var->vsync_len = 2;
/* Non-interlaced / interlaced mode is used to switch the OSD filter
on or off. Adjust the clock timings to maintain a constant
vertical refresh rate. */
if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED)
var->pixclock = pixclock / 2;
else
var->pixclock = pixclock;
itv->osd_rect.width = var->xres;
itv->osd_rect.height = var->yres;
IVTVFB_DEBUG_INFO("Display size: %dx%d (virtual %dx%d) @ %dbpp\n",
var->xres, var->yres,
var->xres_virtual, var->yres_virtual,
var->bits_per_pixel);
IVTVFB_DEBUG_INFO("Display position: %d, %d\n",
var->left_margin, var->upper_margin);
IVTVFB_DEBUG_INFO("Display filter: %s\n",
(var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED ? "on" : "off");
IVTVFB_DEBUG_INFO("Color space: %s\n", var->nonstd ? "YUV" : "RGB");
return 0;
}
static int ivtvfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct ivtv *itv = (struct ivtv *) info->par;
IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
return _ivtvfb_check_var(var, itv);
}
static int ivtvfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 osd_pan_index;
struct ivtv *itv = (struct ivtv *) info->par;
if (var->yoffset + info->var.yres > info->var.yres_virtual ||
var->xoffset + info->var.xres > info->var.xres_virtual)
return -EINVAL;
osd_pan_index = var->yoffset * info->fix.line_length
+ var->xoffset * info->var.bits_per_pixel / 8;
write_reg(osd_pan_index, 0x02A0C);
/* Pass this info back the yuv handler */
itv->yuv_info.osd_x_pan = var->xoffset;
itv->yuv_info.osd_y_pan = var->yoffset;
/* Force update of yuv registers */
itv->yuv_info.yuv_forced_update = 1;
/* Remember this value */
itv->osd_info->pan_cur = osd_pan_index;
return 0;
}
static int ivtvfb_set_par(struct fb_info *info)
{
int rc = 0;
struct ivtv *itv = (struct ivtv *) info->par;
IVTVFB_DEBUG_INFO("ivtvfb_set_par\n");
rc = ivtvfb_set_var(itv, &info->var);
ivtvfb_pan_display(&info->var, info);
ivtvfb_get_fix(itv, &info->fix);
ivtv_firmware_check(itv, "ivtvfb_set_par");
return rc;
}
static int ivtvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
u32 color, *palette;
struct ivtv *itv = (struct ivtv *)info->par;
if (regno >= info->cmap.len)
return -EINVAL;
color = ((transp & 0xFF00) << 16) |((red & 0xFF00) << 8) | (green & 0xFF00) | ((blue & 0xFF00) >> 8);
if (info->var.bits_per_pixel <= 8) {
write_reg(regno, 0x02a30);
write_reg(color, 0x02a34);
itv->osd_info->palette_cur[regno] = color;
return 0;
}
if (regno >= 16)
return -EINVAL;
palette = info->pseudo_palette;
if (info->var.bits_per_pixel == 16) {
switch (info->var.green.length) {
case 4:
color = ((red & 0xf000) >> 4) |
((green & 0xf000) >> 8) |
((blue & 0xf000) >> 12);
break;
case 5:
color = ((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
break;
case 6:
color = (red & 0xf800 ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
}
}
palette[regno] = color;
return 0;
}
/* We don't really support blanking. All this does is enable or
disable the OSD. */
static int ivtvfb_blank(int blank_mode, struct fb_info *info)
{
struct ivtv *itv = (struct ivtv *)info->par;
IVTVFB_DEBUG_INFO("Set blanking mode : %d\n", blank_mode);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
break;
case FB_BLANK_NORMAL:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
break;
case FB_BLANK_POWERDOWN:
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
break;
}
itv->osd_info->blank_cur = blank_mode;
return 0;
}
static const struct fb_ops ivtvfb_ops = {
.owner = THIS_MODULE,
.fb_write = ivtvfb_write,
.fb_check_var = ivtvfb_check_var,
.fb_set_par = ivtvfb_set_par,
.fb_setcolreg = ivtvfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_cursor = NULL,
.fb_ioctl = ivtvfb_ioctl,
.fb_pan_display = ivtvfb_pan_display,
.fb_blank = ivtvfb_blank,
};
/* Restore hardware after firmware restart */
static void ivtvfb_restore(struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
int i;
ivtvfb_set_var(itv, &oi->fbvar_cur);
ivtvfb_blank(oi->blank_cur, &oi->ivtvfb_info);
for (i = 0; i < 256; i++) {
write_reg(i, 0x02a30);
write_reg(oi->palette_cur[i], 0x02a34);
}
write_reg(oi->pan_cur, 0x02a0c);
}
/* Initialization */
/* Setup our initial video mode */
static int ivtvfb_init_vidmode(struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
struct v4l2_rect start_window;
int max_height;
/* Color mode */
if (osd_depth != 8 && osd_depth != 16 && osd_depth != 32)
osd_depth = 8;
oi->bits_per_pixel = osd_depth;
oi->bytes_per_pixel = oi->bits_per_pixel / 8;
/* Horizontal size & position */
if (osd_xres > 720)
osd_xres = 720;
/* Must be a multiple of 4 for 8bpp & 2 for 16bpp */
if (osd_depth == 8)
osd_xres &= ~3;
else if (osd_depth == 16)
osd_xres &= ~1;
start_window.width = osd_xres ? osd_xres : 640;
/* Check horizontal start (osd_left). */
if (osd_left && osd_left + start_window.width > 721) {
IVTVFB_ERR("Invalid osd_left - assuming default\n");
osd_left = 0;
}
/* Hardware coords start at 0, user coords start at 1. */
osd_left--;
start_window.left = osd_left >= 0 ?
osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
oi->display_byte_stride =
start_window.width * oi->bytes_per_pixel;
/* Vertical size & position */
max_height = itv->is_out_50hz ? 576 : 480;
if (osd_yres > max_height)
osd_yres = max_height;
start_window.height = osd_yres ?
osd_yres : itv->is_out_50hz ? 480 : 400;
/* Check vertical start (osd_upper). */
if (osd_upper + start_window.height > max_height + 1) {
IVTVFB_ERR("Invalid osd_upper - assuming default\n");
osd_upper = 0;
}
/* Hardware coords start at 0, user coords start at 1. */
osd_upper--;
start_window.top = osd_upper >= 0 ? osd_upper : ((max_height - start_window.height) / 2);
oi->display_width = start_window.width;
oi->display_height = start_window.height;
/* Generate a valid fb_var_screeninfo */
oi->ivtvfb_defined.xres = oi->display_width;
oi->ivtvfb_defined.yres = oi->display_height;
oi->ivtvfb_defined.xres_virtual = oi->display_width;
oi->ivtvfb_defined.yres_virtual = oi->display_height;
oi->ivtvfb_defined.bits_per_pixel = oi->bits_per_pixel;
oi->ivtvfb_defined.vmode = (osd_laced ? FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED);
oi->ivtvfb_defined.left_margin = start_window.left + 1;
oi->ivtvfb_defined.upper_margin = start_window.top + 1;
oi->ivtvfb_defined.accel_flags = FB_ACCEL_NONE;
oi->ivtvfb_defined.nonstd = 0;
/* We've filled in the most data, let the usual mode check
routine fill in the rest. */
_ivtvfb_check_var(&oi->ivtvfb_defined, itv);
/* Generate valid fb_fix_screeninfo */
ivtvfb_get_fix(itv, &oi->ivtvfb_fix);
/* Generate valid fb_info */
oi->ivtvfb_info.node = -1;
oi->ivtvfb_info.par = itv;
oi->ivtvfb_info.var = oi->ivtvfb_defined;
oi->ivtvfb_info.fix = oi->ivtvfb_fix;
oi->ivtvfb_info.screen_base = (u8 __iomem *)oi->video_vbase;
oi->ivtvfb_info.fbops = &ivtvfb_ops;
/* Supply some monitor specs. Bogus values will do for now */
oi->ivtvfb_info.monspecs.hfmin = 8000;
oi->ivtvfb_info.monspecs.hfmax = 70000;
oi->ivtvfb_info.monspecs.vfmin = 10;
oi->ivtvfb_info.monspecs.vfmax = 100;
/* Allocate color map */
if (fb_alloc_cmap(&oi->ivtvfb_info.cmap, 256, 1)) {
IVTVFB_ERR("abort, unable to alloc cmap\n");
return -ENOMEM;
}
/* Allocate the pseudo palette */
oi->ivtvfb_info.pseudo_palette =
kmalloc_array(16, sizeof(u32), GFP_KERNEL|__GFP_NOWARN);
if (!oi->ivtvfb_info.pseudo_palette) {
IVTVFB_ERR("abort, unable to alloc pseudo palette\n");
return -ENOMEM;
}
return 0;
}
/* Find OSD buffer base & size. Add to mtrr. Zero osd buffer. */
static int ivtvfb_init_io(struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
/* Find the largest power of two that maps the whole buffer */
int size_shift = 31;
mutex_lock(&itv->serialize_lock);
if (ivtv_init_on_first_open(itv)) {
mutex_unlock(&itv->serialize_lock);
IVTVFB_ERR("Failed to initialize ivtv\n");
return -ENXIO;
}
mutex_unlock(&itv->serialize_lock);
if (ivtvfb_get_framebuffer(itv, &oi->video_rbase,
&oi->video_buffer_size) < 0) {
IVTVFB_ERR("Firmware failed to respond\n");
return -EIO;
}
/* The osd buffer size depends on the number of video buffers allocated
on the PVR350 itself. For now we'll hardcode the smallest osd buffer
size to prevent any overlap. */
oi->video_buffer_size = 1704960;
oi->video_pbase = itv->base_addr + IVTV_DECODER_OFFSET + oi->video_rbase;
oi->video_vbase = itv->dec_mem + oi->video_rbase;
if (!oi->video_vbase) {
IVTVFB_ERR("abort, video memory 0x%x @ 0x%lx isn't mapped!\n",
oi->video_buffer_size, oi->video_pbase);
return -EIO;
}
IVTVFB_INFO("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
oi->video_pbase, oi->video_vbase,
oi->video_buffer_size / 1024);
while (!(oi->video_buffer_size & (1 << size_shift)))
size_shift--;
size_shift++;
oi->fb_start_aligned_physaddr = oi->video_pbase & ~((1 << size_shift) - 1);
oi->fb_end_aligned_physaddr = oi->video_pbase + oi->video_buffer_size;
oi->fb_end_aligned_physaddr += (1 << size_shift) - 1;
oi->fb_end_aligned_physaddr &= ~((1 << size_shift) - 1);
oi->wc_cookie = arch_phys_wc_add(oi->fb_start_aligned_physaddr,
oi->fb_end_aligned_physaddr -
oi->fb_start_aligned_physaddr);
/* Blank the entire osd. */
memset_io(oi->video_vbase, 0, oi->video_buffer_size);
return 0;
}
/* Release any memory we've grabbed & remove mtrr entry */
static void ivtvfb_release_buffers (struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
/* Release cmap */
if (oi->ivtvfb_info.cmap.len)
fb_dealloc_cmap(&oi->ivtvfb_info.cmap);
/* Release pseudo palette */
kfree(oi->ivtvfb_info.pseudo_palette);
arch_phys_wc_del(oi->wc_cookie);
kfree(oi);
itv->osd_info = NULL;
}
/* Initialize the specified card */
static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
if (pat_enabled()) {
if (ivtvfb_force_pat) {
pr_info("PAT is enabled. Write-combined framebuffer caching will be disabled.\n");
pr_info("To enable caching, boot with nopat kernel parameter\n");
} else {
pr_warn("ivtvfb needs PAT disabled for write-combined framebuffer caching.\n");
pr_warn("Boot with nopat kernel parameter to use caching, or use the\n");
pr_warn("force_pat module parameter to run with caching disabled\n");
return -ENODEV;
}
}
#endif
if (itv->osd_info) {
IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
return -EBUSY;
}
itv->osd_info = kzalloc(sizeof(struct osd_info),
GFP_KERNEL|__GFP_NOWARN);
if (itv->osd_info == NULL) {
IVTVFB_ERR("Failed to allocate memory for osd_info\n");
return -ENOMEM;
}
/* Find & setup the OSD buffer */
rc = ivtvfb_init_io(itv);
if (rc) {
ivtvfb_release_buffers(itv);
return rc;
}
/* Set the startup video mode information */
if ((rc = ivtvfb_init_vidmode(itv))) {
ivtvfb_release_buffers(itv);
return rc;
}
/* Register the framebuffer */
if (register_framebuffer(&itv->osd_info->ivtvfb_info) < 0) {
ivtvfb_release_buffers(itv);
return -EINVAL;
}
itv->osd_video_pbase = itv->osd_info->video_pbase;
/* Set the card to the requested mode */
ivtvfb_set_par(&itv->osd_info->ivtvfb_info);
/* Set color 0 to black */
write_reg(0, 0x02a30);
write_reg(0, 0x02a34);
/* Enable the osd */
ivtvfb_blank(FB_BLANK_UNBLANK, &itv->osd_info->ivtvfb_info);
/* Enable restart */
itv->ivtvfb_restore = ivtvfb_restore;
/* Allocate DMA */
ivtv_udma_alloc(itv);
itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |=
V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |=
V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
return 0;
}
static int __init ivtvfb_callback_init(struct device *dev, void *p)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct ivtv *itv = container_of(v4l2_dev, struct ivtv, v4l2_dev);
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
if (ivtvfb_init_card(itv) == 0) {
IVTVFB_INFO("Framebuffer registered on %s\n",
itv->v4l2_dev.name);
(*(int *)p)++;
}
}
return 0;
}
static int ivtvfb_callback_cleanup(struct device *dev, void *p)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct ivtv *itv = container_of(v4l2_dev, struct ivtv, v4l2_dev);
struct osd_info *oi = itv->osd_info;
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps &=
~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps &=
~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->v4l2_cap &= ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
unregister_framebuffer(&itv->osd_info->ivtvfb_info);
IVTVFB_INFO("Unregister framebuffer %d\n", itv->instance);
itv->ivtvfb_restore = NULL;
ivtvfb_blank(FB_BLANK_VSYNC_SUSPEND, &oi->ivtvfb_info);
ivtvfb_release_buffers(itv);
itv->osd_video_pbase = 0;
}
return 0;
}
static int __init ivtvfb_init(void)
{
struct device_driver *drv;
int registered = 0;
int err;
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
pr_err("ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
IVTV_MAX_CARDS - 1);
return -EINVAL;
}
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, ®istered, ivtvfb_callback_init);
(void)err; /* suppress compiler warning */
if (!registered) {
pr_err("no cards found\n");
return -ENODEV;
}
return 0;
}
static void ivtvfb_cleanup(void)
{
struct device_driver *drv;
int err;
pr_info("Unloading framebuffer module\n");
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, NULL, ivtvfb_callback_cleanup);
(void)err; /* suppress compiler warning */
}
module_init(ivtvfb_init);
module_exit(ivtvfb_cleanup);
| linux-master | drivers/media/pci/ivtv/ivtvfb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ALSA PCM device for the
* ALSA interface to ivtv PCM capture streams
*
* Copyright (C) 2009,2012 Andy Walls <[email protected]>
* Copyright (C) 2009 Devin Heitmueller <[email protected]>
*
* Portions of this work were sponsored by ONELAN Limited for the cx18 driver
*/
#include "ivtv-driver.h"
#include "ivtv-queue.h"
#include "ivtv-streams.h"
#include "ivtv-fileops.h"
#include "ivtv-alsa.h"
#include "ivtv-alsa-pcm.h"
#include <sound/core.h>
#include <sound/pcm.h>
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm");
#define dprintk(fmt, arg...) \
do { \
if (pcm_debug) \
pr_info("ivtv-alsa-pcm %s: " fmt, __func__, ##arg); \
} while (0)
static const struct snd_pcm_hardware snd_ivtv_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */
.period_bytes_min = 64, /* 12544/2, */
.period_bytes_max = 12544,
.periods_min = 2,
.periods_max = 98, /* 12544, */
};
static void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc,
u8 *pcm_data,
size_t num_bytes)
{
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
unsigned int oldptr;
unsigned int stride;
int period_elapsed = 0;
int length;
dprintk("ivtv alsa announce ptr=%p data=%p num_bytes=%zu\n", itvsc,
pcm_data, num_bytes);
substream = itvsc->capture_pcm_substream;
if (substream == NULL) {
dprintk("substream was NULL\n");
return;
}
runtime = substream->runtime;
if (runtime == NULL) {
dprintk("runtime was NULL\n");
return;
}
stride = runtime->frame_bits >> 3;
if (stride == 0) {
dprintk("stride is zero\n");
return;
}
length = num_bytes / stride;
if (length == 0) {
dprintk("%s: length was zero\n", __func__);
return;
}
if (runtime->dma_area == NULL) {
dprintk("dma area was NULL - ignoring\n");
return;
}
oldptr = itvsc->hwptr_done_capture;
if (oldptr + length >= runtime->buffer_size) {
unsigned int cnt =
runtime->buffer_size - oldptr;
memcpy(runtime->dma_area + oldptr * stride, pcm_data,
cnt * stride);
memcpy(runtime->dma_area, pcm_data + cnt * stride,
length * stride - cnt * stride);
} else {
memcpy(runtime->dma_area + oldptr * stride, pcm_data,
length * stride);
}
snd_pcm_stream_lock(substream);
itvsc->hwptr_done_capture += length;
if (itvsc->hwptr_done_capture >=
runtime->buffer_size)
itvsc->hwptr_done_capture -=
runtime->buffer_size;
itvsc->capture_transfer_done += length;
if (itvsc->capture_transfer_done >=
runtime->period_size) {
itvsc->capture_transfer_done -=
runtime->period_size;
period_elapsed = 1;
}
snd_pcm_stream_unlock(substream);
if (period_elapsed)
snd_pcm_period_elapsed(substream);
}
static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
{
struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct v4l2_device *v4l2_dev = itvsc->v4l2_dev;
struct ivtv *itv = to_ivtv(v4l2_dev);
struct ivtv_stream *s;
struct ivtv_open_id item;
int ret;
/* Instruct the CX2341[56] to start sending packets */
snd_ivtv_lock(itvsc);
if (ivtv_init_on_first_open(itv)) {
snd_ivtv_unlock(itvsc);
return -ENXIO;
}
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
v4l2_fh_init(&item.fh, &s->vdev);
item.itv = itv;
item.type = s->type;
/* See if the stream is available */
if (ivtv_claim_stream(&item, item.type)) {
/* No, it's already in use */
v4l2_fh_exit(&item.fh);
snd_ivtv_unlock(itvsc);
return -EBUSY;
}
if (test_bit(IVTV_F_S_STREAMOFF, &s->s_flags) ||
test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
/* We're already streaming. No additional action required */
snd_ivtv_unlock(itvsc);
return 0;
}
runtime->hw = snd_ivtv_hw_capture;
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
itvsc->capture_pcm_substream = substream;
runtime->private_data = itv;
itv->pcm_announce_callback = ivtv_alsa_announce_pcm_data;
/* Not currently streaming, so start it up */
set_bit(IVTV_F_S_STREAMING, &s->s_flags);
ret = ivtv_start_v4l2_encode_stream(s);
snd_ivtv_unlock(itvsc);
return ret;
}
static int snd_ivtv_pcm_capture_close(struct snd_pcm_substream *substream)
{
struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
struct v4l2_device *v4l2_dev = itvsc->v4l2_dev;
struct ivtv *itv = to_ivtv(v4l2_dev);
struct ivtv_stream *s;
/* Instruct the ivtv to stop sending packets */
snd_ivtv_lock(itvsc);
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
ivtv_stop_v4l2_encode_stream(s, 0);
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
ivtv_release_stream(s);
itv->pcm_announce_callback = NULL;
snd_ivtv_unlock(itvsc);
return 0;
}
static int snd_ivtv_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
itvsc->hwptr_done_capture = 0;
itvsc->capture_transfer_done = 0;
return 0;
}
static int snd_ivtv_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
return 0;
}
static
snd_pcm_uframes_t snd_ivtv_pcm_pointer(struct snd_pcm_substream *substream)
{
unsigned long flags;
snd_pcm_uframes_t hwptr_done;
struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
spin_lock_irqsave(&itvsc->slock, flags);
hwptr_done = itvsc->hwptr_done_capture;
spin_unlock_irqrestore(&itvsc->slock, flags);
return hwptr_done;
}
static const struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
.open = snd_ivtv_pcm_capture_open,
.close = snd_ivtv_pcm_capture_close,
.prepare = snd_ivtv_pcm_prepare,
.trigger = snd_ivtv_pcm_trigger,
.pointer = snd_ivtv_pcm_pointer,
};
int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc)
{
struct snd_pcm *sp;
struct snd_card *sc = itvsc->sc;
struct v4l2_device *v4l2_dev = itvsc->v4l2_dev;
struct ivtv *itv = to_ivtv(v4l2_dev);
int ret;
ret = snd_pcm_new(sc, "CX2341[56] PCM",
0, /* PCM device 0, the only one for this card */
0, /* 0 playback substreams */
1, /* 1 capture substream */
&sp);
if (ret) {
IVTV_ALSA_ERR("%s: snd_ivtv_pcm_create() failed with err %d\n",
__func__, ret);
goto err_exit;
}
spin_lock_init(&itvsc->slock);
snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
&snd_ivtv_pcm_capture_ops);
snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
sp->info_flags = 0;
sp->private_data = itvsc;
strscpy(sp->name, itv->card_name, sizeof(sp->name));
return 0;
err_exit:
return ret;
}
| linux-master | drivers/media/pci/ivtv/ivtv-alsa-pcm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-queue.h"
#include "ivtv-udma.h"
#include "ivtv-irq.h"
#include "ivtv-mailbox.h"
#include "ivtv-vbi.h"
#include "ivtv-yuv.h"
#include <media/v4l2-event.h>
#define DMA_MAGIC_COOKIE 0x000001fe
static void ivtv_dma_dec_start(struct ivtv_stream *s);
static const int ivtv_stream_map[] = {
IVTV_ENC_STREAM_TYPE_MPG,
IVTV_ENC_STREAM_TYPE_YUV,
IVTV_ENC_STREAM_TYPE_PCM,
IVTV_ENC_STREAM_TYPE_VBI,
};
static void ivtv_pcm_work_handler(struct ivtv *itv)
{
struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
struct ivtv_buffer *buf;
/* Pass the PCM data to ivtv-alsa */
while (1) {
/*
* Users should not be using both the ALSA and V4L2 PCM audio
* capture interfaces at the same time. If the user is doing
* this, there maybe a buffer in q_io to grab, use, and put
* back in rotation.
*/
buf = ivtv_dequeue(s, &s->q_io);
if (buf == NULL)
buf = ivtv_dequeue(s, &s->q_full);
if (buf == NULL)
break;
if (buf->readpos < buf->bytesused)
itv->pcm_announce_callback(itv->alsa,
(u8 *)(buf->buf + buf->readpos),
(size_t)(buf->bytesused - buf->readpos));
ivtv_enqueue(s, buf, &s->q_free);
}
}
static void ivtv_pio_work_handler(struct ivtv *itv)
{
struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
struct ivtv_buffer *buf;
int i = 0;
IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
s->vdev.v4l2_dev == NULL || !ivtv_use_pio(s)) {
itv->cur_pio_stream = -1;
/* trigger PIO complete user interrupt */
write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
return;
}
IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
list_for_each_entry(buf, &s->q_dma.list, list) {
u32 size = s->sg_processing[i].size & 0x3ffff;
/* Copy the data from the card to the buffer */
if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
}
else {
memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
}
i++;
if (i == s->sg_processing_size)
break;
}
write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
}
void ivtv_irq_work_handler(struct kthread_work *work)
{
struct ivtv *itv = container_of(work, struct ivtv, irq_work);
if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
ivtv_pio_work_handler(itv);
if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
ivtv_vbi_work_handler(itv);
if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
ivtv_yuv_work_handler(itv);
if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags))
ivtv_pcm_work_handler(itv);
}
/* Determine the required DMA size, setup enough buffers in the predma queue and
actually copy the data from the card to the buffers in case a PIO transfer is
required for this stream.
*/
static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
{
struct ivtv *itv = s->itv;
struct ivtv_buffer *buf;
u32 bytes_needed = 0;
u32 offset, size;
u32 UVoffset = 0, UVsize = 0;
int skip_bufs = s->q_predma.buffers;
int idx = s->sg_pending_size;
int rc;
/* sanity checks */
if (s->vdev.v4l2_dev == NULL) {
IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
return -1;
}
if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
return -1;
}
/* determine offset, size and PTS for the various streams */
switch (s->type) {
case IVTV_ENC_STREAM_TYPE_MPG:
offset = data[1];
size = data[2];
s->pending_pts = 0;
break;
case IVTV_ENC_STREAM_TYPE_YUV:
offset = data[1];
size = data[2];
UVoffset = data[3];
UVsize = data[4];
s->pending_pts = ((u64) data[5] << 32) | data[6];
break;
case IVTV_ENC_STREAM_TYPE_PCM:
offset = data[1] + 12;
size = data[2] - 12;
s->pending_pts = read_dec(offset - 8) |
((u64)(read_dec(offset - 12)) << 32);
if (itv->has_cx23415)
offset += IVTV_DECODER_OFFSET;
break;
case IVTV_ENC_STREAM_TYPE_VBI:
size = itv->vbi.enc_size * itv->vbi.fpi;
offset = read_enc(itv->vbi.enc_start - 4) + 12;
if (offset == 12) {
IVTV_DEBUG_INFO("VBI offset == 0\n");
return -1;
}
s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
break;
case IVTV_DEC_STREAM_TYPE_VBI:
size = read_dec(itv->vbi.dec_start + 4) + 8;
offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
s->pending_pts = 0;
offset += IVTV_DECODER_OFFSET;
break;
default:
/* shouldn't happen */
return -1;
}
/* if this is the start of the DMA then fill in the magic cookie */
if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET);
}
else {
s->pending_backup = read_enc(offset);
write_enc_sync(DMA_MAGIC_COOKIE, offset);
}
s->pending_offset = offset;
}
bytes_needed = size;
if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
/* The size for the Y samples needs to be rounded upwards to a
multiple of the buf_size. The UV samples then start in the
next buffer. */
bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
bytes_needed += UVsize;
}
IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
if (rc < 0) { /* Insufficient buffers */
IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
bytes_needed, s->name);
return -1;
}
if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
IVTV_WARN("Cause: the application is not reading fast enough.\n");
}
s->buffers_stolen = rc;
/* got the buffers, now fill in sg_pending */
buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
memset(buf->buf, 0, 128);
list_for_each_entry(buf, &s->q_predma.list, list) {
if (skip_bufs-- > 0)
continue;
s->sg_pending[idx].dst = buf->dma_handle;
s->sg_pending[idx].src = offset;
s->sg_pending[idx].size = s->buf_size;
buf->bytesused = min(size, s->buf_size);
buf->dma_xfer_cnt = s->dma_xfer_cnt;
s->q_predma.bytesused += buf->bytesused;
size -= buf->bytesused;
offset += s->buf_size;
/* Sync SG buffers */
ivtv_buf_sync_for_device(s, buf);
if (size == 0) { /* YUV */
/* process the UV section */
offset = UVoffset;
size = UVsize;
}
idx++;
}
s->sg_pending_size = idx;
return 0;
}
static void dma_post(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
struct ivtv_buffer *buf = NULL;
struct list_head *p;
u32 offset;
__le32 *u32buf;
int x = 0;
IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
s->name, s->dma_offset);
list_for_each(p, &s->q_dma.list) {
buf = list_entry(p, struct ivtv_buffer, list);
u32buf = (__le32 *)buf->buf;
/* Sync Buffer */
ivtv_buf_sync_for_cpu(s, buf);
if (x == 0 && ivtv_use_dma(s)) {
offset = s->dma_last_offset;
if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE)
{
for (offset = 0; offset < 64; offset++)
if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE)
break;
offset *= 4;
if (offset == 256) {
IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
offset = s->dma_last_offset;
}
if (s->dma_last_offset != offset)
IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
s->dma_last_offset = offset;
}
if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
}
else {
write_enc_sync(0, s->dma_offset);
}
if (offset) {
buf->bytesused -= offset;
memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
}
*u32buf = cpu_to_le32(s->dma_backup);
}
x++;
/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
s->type == IVTV_ENC_STREAM_TYPE_VBI)
buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
}
if (buf)
buf->bytesused += s->dma_last_offset;
if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
list_for_each_entry(buf, &s->q_dma.list, list) {
/* Parse and Groom VBI Data */
s->q_dma.bytesused -= buf->bytesused;
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
if (s->fh == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
}
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
itv->pcm_announce_callback != NULL) {
/*
* Set up the work handler to pass the data to ivtv-alsa.
*
* We just use q_full and let the work handler race with users
* making ivtv-fileops.c calls on the PCM device node.
*
* Users should not be using both the ALSA and V4L2 PCM audio
* capture interfaces at the same time. If the user does this,
* fragments of data will just go out each interface as they
* race for PCM data.
*/
set_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags);
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
}
if (s->fh)
wake_up(&s->waitq);
}
void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
{
struct ivtv *itv = s->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
struct yuv_frame_info *f = &yi->new_frame_info[frame];
struct ivtv_buffer *buf;
u32 y_size = 720 * ((f->src_h + 31) & ~31);
u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
int y_done = 0;
int bytes_written = 0;
int idx = 0;
IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
/* Insert buffer block for YUV if needed */
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
if (yi->blanking_dmaptr) {
s->sg_pending[idx].src = yi->blanking_dmaptr;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size = 720 * 16;
}
offset += 720 * 16;
idx++;
}
list_for_each_entry(buf, &s->q_predma.list, list) {
/* YUV UV Offset from Y Buffer */
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
(bytes_written + buf->bytesused) >= y_size) {
s->sg_pending[idx].src = buf->dma_handle;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size = y_size - bytes_written;
offset = uv_offset;
if (s->sg_pending[idx].size != buf->bytesused) {
idx++;
s->sg_pending[idx].src =
buf->dma_handle + s->sg_pending[idx - 1].size;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size =
buf->bytesused - s->sg_pending[idx - 1].size;
offset += s->sg_pending[idx].size;
}
y_done = 1;
} else {
s->sg_pending[idx].src = buf->dma_handle;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size = buf->bytesused;
offset += buf->bytesused;
}
bytes_written += buf->bytesused;
/* Sync SG buffers */
ivtv_buf_sync_for_device(s, buf);
idx++;
}
s->sg_pending_size = idx;
/* Sync Hardware SG List of buffers */
ivtv_stream_sync_for_device(s);
if (lock) {
unsigned long flags = 0;
spin_lock_irqsave(&itv->dma_reg_lock, flags);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
ivtv_dma_dec_start(s);
else
set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
} else {
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
ivtv_dma_dec_start(s);
else
set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
}
}
static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
s->sg_processed++;
/* Sync Hardware SG List of buffers */
ivtv_stream_sync_for_device(s);
write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
add_timer(&itv->dma_timer);
}
static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
s->sg_processed++;
/* Sync Hardware SG List of buffers */
ivtv_stream_sync_for_device(s);
write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
add_timer(&itv->dma_timer);
}
/* start the encoder DMA */
static void ivtv_dma_enc_start(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
int i;
IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
if (s->q_predma.bytesused)
ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
if (ivtv_use_dma(s))
s->sg_pending[s->sg_pending_size - 1].size += 256;
/* If this is an MPEG stream, and VBI data is also pending, then append the
VBI DMA to the MPEG DMA and transfer both sets of data at once.
VBI DMA is a second class citizen compared to MPEG and mixing them together
will confuse the firmware (the end of a VBI DMA is seen as the end of a
MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
sure we only use the MPEG DMA to transfer the VBI DMA if both are in
use. This way no conflicts occur. */
clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
if (ivtv_use_dma(s_vbi))
s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
for (i = 0; i < s_vbi->sg_pending_size; i++) {
s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
}
s_vbi->dma_offset = s_vbi->pending_offset;
s_vbi->sg_pending_size = 0;
s_vbi->dma_xfer_cnt++;
set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
}
s->dma_xfer_cnt++;
memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
s->sg_processing_size = s->sg_pending_size;
s->sg_pending_size = 0;
s->sg_processed = 0;
s->dma_offset = s->pending_offset;
s->dma_backup = s->pending_backup;
s->dma_pts = s->pending_pts;
if (ivtv_use_pio(s)) {
set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
set_bit(IVTV_F_I_PIO, &itv->i_flags);
itv->cur_pio_stream = s->type;
}
else {
itv->dma_retries = 0;
ivtv_dma_enc_start_xfer(s);
set_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = s->type;
}
}
static void ivtv_dma_dec_start(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
if (s->q_predma.bytesused)
ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
s->dma_xfer_cnt++;
memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
s->sg_processing_size = s->sg_pending_size;
s->sg_pending_size = 0;
s->sg_processed = 0;
IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
itv->dma_retries = 0;
ivtv_dma_dec_start_xfer(s);
set_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = s->type;
}
static void ivtv_irq_dma_read(struct ivtv *itv)
{
struct ivtv_stream *s = NULL;
struct ivtv_buffer *buf;
int hw_stream_type = 0;
IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
del_timer(&itv->dma_timer);
if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
return;
if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
s = &itv->streams[itv->cur_dma_stream];
ivtv_stream_sync_for_cpu(s);
if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
read_reg(IVTV_REG_DMASTATUS),
s->sg_processed, s->sg_processing_size, itv->dma_retries);
write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
if (itv->dma_retries == 3) {
/* Too many retries, give up on this frame */
itv->dma_retries = 0;
s->sg_processed = s->sg_processing_size;
}
else {
/* Retry, starting with the first xfer segment.
Just retrying the current segment is not sufficient. */
s->sg_processed = 0;
itv->dma_retries++;
}
}
if (s->sg_processed < s->sg_processing_size) {
/* DMA next buffer */
ivtv_dma_dec_start_xfer(s);
return;
}
if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
hw_stream_type = 2;
IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
/* For some reason must kick the firmware, like PIO mode,
I think this tells the firmware we are done and the size
of the xfer so it can calculate what we need next.
I think we can do this part ourselves but would have to
fully calculate xfer info ourselves and not use interrupts
*/
ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
hw_stream_type);
/* Free last DMA call */
while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
ivtv_buf_sync_for_cpu(s, buf);
ivtv_enqueue(s, buf, &s->q_free);
}
wake_up(&s->waitq);
}
clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
clear_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = -1;
wake_up(&itv->dma_waitq);
}
static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s;
ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
del_timer(&itv->dma_timer);
if (itv->cur_dma_stream < 0)
return;
s = &itv->streams[itv->cur_dma_stream];
ivtv_stream_sync_for_cpu(s);
if (data[0] & 0x18) {
IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
if (itv->dma_retries == 3) {
/* Too many retries, give up on this frame */
itv->dma_retries = 0;
s->sg_processed = s->sg_processing_size;
}
else {
/* Retry, starting with the first xfer segment.
Just retrying the current segment is not sufficient. */
s->sg_processed = 0;
itv->dma_retries++;
}
}
if (s->sg_processed < s->sg_processing_size) {
/* DMA next buffer */
ivtv_dma_enc_start_xfer(s);
return;
}
clear_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = -1;
dma_post(s);
if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dma_post(s);
}
s->sg_processing_size = 0;
s->sg_processed = 0;
wake_up(&itv->dma_waitq);
}
static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
{
struct ivtv_stream *s;
if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
itv->cur_pio_stream = -1;
return;
}
s = &itv->streams[itv->cur_pio_stream];
IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
clear_bit(IVTV_F_I_PIO, &itv->i_flags);
itv->cur_pio_stream = -1;
dma_post(s);
if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
clear_bit(IVTV_F_I_PIO, &itv->i_flags);
if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dma_post(s);
}
wake_up(&itv->dma_waitq);
}
static void ivtv_irq_dma_err(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
u32 status;
del_timer(&itv->dma_timer);
ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
status = read_reg(IVTV_REG_DMASTATUS);
IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
status, itv->cur_dma_stream);
/*
* We do *not* write back to the IVTV_REG_DMASTATUS register to
* clear the error status, if either the encoder write (0x02) or
* decoder read (0x01) bus master DMA operation do not indicate
* completed. We can race with the DMA engine, which may have
* transitioned to completed status *after* we read the register.
* Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
* DMA engine has completed, will cause the DMA engine to stop working.
*/
status &= 0x3;
if (status == 0x3)
write_reg(status, IVTV_REG_DMASTATUS);
if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
/* retry */
/*
* FIXME - handle cases of DMA error similar to
* encoder below, except conditioned on status & 0x1
*/
ivtv_dma_dec_start(s);
return;
} else {
if ((status & 0x2) == 0) {
/*
* CX2341x Bus Master DMA write is ongoing.
* Reset the timer and let it complete.
*/
itv->dma_timer.expires =
jiffies + msecs_to_jiffies(600);
add_timer(&itv->dma_timer);
return;
}
if (itv->dma_retries < 3) {
/*
* CX2341x Bus Master DMA write has ended.
* Retry the write, starting with the first
* xfer segment. Just retrying the current
* segment is not sufficient.
*/
s->sg_processed = 0;
itv->dma_retries++;
ivtv_dma_enc_start_xfer(s);
return;
}
/* Too many retries, give up on this one */
}
}
if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
ivtv_udma_start(itv);
return;
}
clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
clear_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = -1;
wake_up(&itv->dma_waitq);
}
static void ivtv_irq_enc_start_cap(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s;
/* Get DMA destination and size arguments from card */
ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
data[0], data[1], data[2]);
return;
}
s = &itv->streams[ivtv_stream_map[data[0]]];
if (!stream_enc_dma_append(s, data)) {
set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
}
}
static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s;
IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
if (!stream_enc_dma_append(s, data))
set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
}
static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
!stream_enc_dma_append(s, data)) {
set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
}
}
static void ivtv_irq_dec_data_req(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s;
/* YUV or MPG */
if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
itv->dma_data_req_size =
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->dma_data_req_offset = data[1];
if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
ivtv_yuv_frame_complete(itv);
s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
}
else {
ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
itv->dma_data_req_offset = data[1];
s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
}
IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
itv->dma_data_req_offset, itv->dma_data_req_size);
if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
}
else {
if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
ivtv_yuv_setup_stream_frame(itv);
clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
}
}
static void ivtv_irq_vsync(struct ivtv *itv)
{
/* The vsync interrupt is unusual in that it won't clear until
* the end of the first line for the current field, at which
* point it clears itself. This can result in repeated vsync
* interrupts, or a missed vsync. Read some of the registers
* to determine the line being displayed and ensure we handle
* one vsync per frame.
*/
unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
struct yuv_playback_info *yi = &itv->yuv_info;
int last_dma_frame = atomic_read(&yi->next_dma_frame);
struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
if (((frame ^ f->sync_field) == 0 &&
((itv->last_vsync_field & 1) ^ f->sync_field)) ||
(frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
int next_dma_frame = last_dma_frame;
if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
atomic_set(&yi->next_dma_frame, next_dma_frame);
yi->fields_lapsed = -1;
yi->running = 1;
}
}
}
if (frame != (itv->last_vsync_field & 1)) {
static const struct v4l2_event evtop = {
.type = V4L2_EVENT_VSYNC,
.u.vsync.field = V4L2_FIELD_TOP,
};
static const struct v4l2_event evbottom = {
.type = V4L2_EVENT_VSYNC,
.u.vsync.field = V4L2_FIELD_BOTTOM,
};
struct ivtv_stream *s = ivtv_get_output_stream(itv);
itv->last_vsync_field += 1;
if (frame == 0) {
clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
}
else {
set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
}
if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
wake_up(&itv->event_waitq);
if (s)
wake_up(&s->waitq);
}
if (s && s->vdev.v4l2_dev)
v4l2_event_queue(&s->vdev, frame ? &evtop : &evbottom);
wake_up(&itv->vsync_waitq);
/* Send VBI to saa7127 */
if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
}
/* Check if we need to update the yuv registers */
if (yi->running && (yi->yuv_forced_update || f->update)) {
if (!f->update) {
last_dma_frame =
(u8)(atomic_read(&yi->next_dma_frame) -
1) % IVTV_YUV_BUFFERS;
f = &yi->new_frame_info[last_dma_frame];
}
if (f->src_w) {
yi->update_frame = last_dma_frame;
f->update = 0;
yi->yuv_forced_update = 0;
set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
}
}
yi->fields_lapsed++;
}
}
#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
{
struct ivtv *itv = (struct ivtv *)dev_id;
u32 combo;
u32 stat;
int i;
u8 vsync_force = 0;
spin_lock(&itv->dma_reg_lock);
/* get contents of irq status register */
stat = read_reg(IVTV_REG_IRQSTATUS);
combo = ~itv->irqmask & stat;
/* Clear out IRQ */
if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
if (0 == combo) {
/* The vsync interrupt is unusual and clears itself. If we
* took too long, we may have missed it. Do some checks
*/
if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
/* vsync is enabled, see if we're in a new field */
if ((itv->last_vsync_field & 1) !=
(read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
/* New field, looks like we missed it */
IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
vsync_force = 1;
}
}
if (!vsync_force) {
/* No Vsync expected, wasn't for us */
spin_unlock(&itv->dma_reg_lock);
return IRQ_NONE;
}
}
/* Exclude interrupts noted below from the output, otherwise the log is flooded with
these messages */
if (combo & ~0xff6d0400)
IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
}
if (combo & IVTV_IRQ_DMA_READ) {
ivtv_irq_dma_read(itv);
}
if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
ivtv_irq_enc_dma_complete(itv);
}
if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
ivtv_irq_enc_pio_complete(itv);
}
if (combo & IVTV_IRQ_DMA_ERR) {
ivtv_irq_dma_err(itv);
}
if (combo & IVTV_IRQ_ENC_START_CAP) {
ivtv_irq_enc_start_cap(itv);
}
if (combo & IVTV_IRQ_ENC_VBI_CAP) {
ivtv_irq_enc_vbi_cap(itv);
}
if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
ivtv_irq_dec_vbi_reinsert(itv);
}
if (combo & IVTV_IRQ_ENC_EOS) {
IVTV_DEBUG_IRQ("ENC EOS\n");
set_bit(IVTV_F_I_EOS, &itv->i_flags);
wake_up(&itv->eos_waitq);
}
if (combo & IVTV_IRQ_DEC_DATA_REQ) {
ivtv_irq_dec_data_req(itv);
}
/* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
ivtv_irq_vsync(itv);
}
if (combo & IVTV_IRQ_ENC_VIM_RST) {
IVTV_DEBUG_IRQ("VIM RST\n");
/*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
}
if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
IVTV_DEBUG_INFO("Stereo mode changed\n");
}
if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
itv->irq_rr_idx++;
for (i = 0; i < IVTV_MAX_STREAMS; i++) {
int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
struct ivtv_stream *s = &itv->streams[idx];
if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
continue;
if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
ivtv_dma_dec_start(s);
else
ivtv_dma_enc_start(s);
break;
}
if (i == IVTV_MAX_STREAMS &&
test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
ivtv_udma_start(itv);
}
if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
itv->irq_rr_idx++;
for (i = 0; i < IVTV_MAX_STREAMS; i++) {
int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
struct ivtv_stream *s = &itv->streams[idx];
if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
continue;
if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
ivtv_dma_enc_start(s);
break;
}
}
if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
kthread_queue_work(&itv->irq_worker, &itv->irq_work);
}
spin_unlock(&itv->dma_reg_lock);
/* If we've just handled a 'forced' vsync, it's safest to say it
* wasn't ours. Another device may have triggered it at just
* the right time.
*/
return vsync_force ? IRQ_NONE : IRQ_HANDLED;
}
void ivtv_unfinished_dma(struct timer_list *t)
{
struct ivtv *itv = from_timer(itv, t, dma_timer);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
return;
IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
clear_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = -1;
wake_up(&itv->dma_waitq);
}
| linux-master | drivers/media/pci/ivtv/ivtv-irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
User DMA
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-udma.h"
void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
{
dma_page->uaddr = first & PAGE_MASK;
dma_page->offset = first & ~PAGE_MASK;
dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
dma_page->page_count = dma_page->last - dma_page->first + 1;
if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
}
int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
{
int i, offset;
unsigned long flags;
if (map_offset < 0)
return map_offset;
offset = dma_page->offset;
/* Fill SG Array with new values */
for (i = 0; i < dma_page->page_count; i++) {
unsigned int len = (i == dma_page->page_count - 1) ?
dma_page->tail : PAGE_SIZE - offset;
if (PageHighMem(dma->map[map_offset])) {
void *src;
if (dma->bouncemap[map_offset] == NULL)
dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
if (dma->bouncemap[map_offset] == NULL)
return -1;
local_irq_save(flags);
src = kmap_atomic(dma->map[map_offset]) + offset;
memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
kunmap_atomic(src);
local_irq_restore(flags);
sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
}
else {
sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
}
offset = 0;
map_offset++;
}
return map_offset;
}
void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
int i;
struct scatterlist *sg;
for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
buffer_offset += sg_dma_len(sg);
split -= sg_dma_len(sg);
if (split == 0)
buffer_offset = buffer_offset_2;
}
}
/* User DMA Buffers */
void ivtv_udma_alloc(struct ivtv *itv)
{
if (itv->udma.SG_handle == 0) {
/* Map DMA Page Array Buffer */
itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
itv->udma.SGarray,
sizeof(itv->udma.SGarray),
DMA_TO_DEVICE);
ivtv_udma_sync_for_cpu(itv);
}
}
int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
void __user *userbuf, int size_in_bytes)
{
struct ivtv_dma_page_info user_dma;
struct ivtv_user_dma *dma = &itv->udma;
int err;
IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
/* Still in USE */
if (dma->SG_length || dma->page_count) {
IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
dma->SG_length, dma->page_count);
return -EBUSY;
}
ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
if (user_dma.page_count <= 0) {
IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
user_dma.page_count, size_in_bytes, user_dma.offset);
return -EINVAL;
}
/* Pin user pages for DMA Xfer */
err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
dma->map, 0);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
err, user_dma.page_count);
if (err >= 0) {
unpin_user_pages(dma->map, err);
return -EINVAL;
}
return err;
}
dma->page_count = user_dma.page_count;
/* Fill SG List with new values */
if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
unpin_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
return -ENOMEM;
}
/* Map SG List */
dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
/* Tag SG Array with Interrupt Bit */
dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
ivtv_udma_sync_for_device(itv);
return dma->page_count;
}
void ivtv_udma_unmap(struct ivtv *itv)
{
struct ivtv_user_dma *dma = &itv->udma;
IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
/* Nothing to free */
if (dma->page_count == 0)
return;
/* Unmap Scatterlist */
if (dma->SG_length) {
dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
DMA_TO_DEVICE);
dma->SG_length = 0;
}
/* sync DMA */
ivtv_udma_sync_for_cpu(itv);
unpin_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
}
void ivtv_udma_free(struct ivtv *itv)
{
int i;
/* Unmap SG Array */
if (itv->udma.SG_handle) {
dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
}
/* Unmap Scatterlist */
if (itv->udma.SG_length) {
dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
itv->udma.page_count, DMA_TO_DEVICE);
}
for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
if (itv->udma.bouncemap[i])
__free_page(itv->udma.bouncemap[i]);
}
}
void ivtv_udma_start(struct ivtv *itv)
{
IVTV_DEBUG_DMA("start UDMA\n");
write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
set_bit(IVTV_F_I_DMA, &itv->i_flags);
set_bit(IVTV_F_I_UDMA, &itv->i_flags);
clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
}
void ivtv_udma_prepare(struct ivtv *itv)
{
unsigned long flags;
spin_lock_irqsave(&itv->dma_reg_lock, flags);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
ivtv_udma_start(itv);
else
set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
}
| linux-master | drivers/media/pci/ivtv/ivtv-udma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-i2c.h"
#include "ivtv-cards.h"
#include "ivtv-gpio.h"
#include "ivtv-routing.h"
#include <media/drv-intf/msp3400.h>
#include <media/i2c/m52790.h>
#include <media/i2c/upd64031a.h>
#include <media/i2c/upd64083.h>
/* Selects the audio input and output according to the current
settings. */
void ivtv_audio_set_io(struct ivtv *itv)
{
const struct ivtv_card_audio_input *in;
u32 input, output = 0;
/* Determine which input to use */
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
in = &itv->card->radio_input;
else
in = &itv->card->audio_inputs[itv->audio_input];
/* handle muxer chips */
input = in->muxer_input;
if (itv->card->hw_muxer & IVTV_HW_M52790)
output = M52790_OUT_STEREO;
v4l2_subdev_call(itv->sd_muxer, audio, s_routing,
input, output, 0);
input = in->audio_input;
output = 0;
if (itv->card->hw_audio & IVTV_HW_MSP34XX)
output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
ivtv_call_hw(itv, itv->card->hw_audio, audio, s_routing,
input, output, 0);
}
/* Selects the video input and output according to the current
settings. */
void ivtv_video_set_io(struct ivtv *itv)
{
int inp = itv->active_input;
u32 input;
u32 type;
v4l2_subdev_call(itv->sd_video, video, s_routing,
itv->card->video_inputs[inp].video_input, 0, 0);
type = itv->card->video_inputs[inp].video_type;
if (type == IVTV_CARD_INPUT_VID_TUNER) {
input = 0; /* Tuner */
} else if (type < IVTV_CARD_INPUT_COMPOSITE1) {
input = 2; /* S-Video */
} else {
input = 1; /* Composite */
}
if (itv->card->hw_video & IVTV_HW_GPIO)
ivtv_call_hw(itv, IVTV_HW_GPIO, video, s_routing,
input, 0, 0);
if (itv->card->hw_video & IVTV_HW_UPD64031A) {
if (type == IVTV_CARD_INPUT_VID_TUNER ||
type >= IVTV_CARD_INPUT_COMPOSITE1) {
/* Composite: GR on, connect to 3DYCS */
input = UPD64031A_GR_ON | UPD64031A_3DYCS_COMPOSITE;
} else {
/* S-Video: GR bypassed, turn it off */
input = UPD64031A_GR_OFF | UPD64031A_3DYCS_DISABLE;
}
input |= itv->card->gr_config;
ivtv_call_hw(itv, IVTV_HW_UPD64031A, video, s_routing,
input, 0, 0);
}
if (itv->card->hw_video & IVTV_HW_UPD6408X) {
input = UPD64083_YCS_MODE;
if (type > IVTV_CARD_INPUT_VID_TUNER &&
type < IVTV_CARD_INPUT_COMPOSITE1) {
/* S-Video uses YCNR mode and internal Y-ADC, the
upd64031a is not used. */
input |= UPD64083_YCNR_MODE;
}
else if (itv->card->hw_video & IVTV_HW_UPD64031A) {
/* Use upd64031a output for tuner and
composite(CX23416GYC only) inputs */
if (type == IVTV_CARD_INPUT_VID_TUNER ||
itv->card->type == IVTV_CARD_CX23416GYC) {
input |= UPD64083_EXT_Y_ADC;
}
}
ivtv_call_hw(itv, IVTV_HW_UPD6408X, video, s_routing,
input, 0, 0);
}
}
| linux-master | drivers/media/pci/ivtv/ivtv-routing.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-i2c.h"
#include <media/drv-intf/msp3400.h>
#include <media/i2c/m52790.h>
#include <media/i2c/wm8775.h>
#include <media/i2c/cs53l32a.h>
#include <media/drv-intf/cx25840.h>
#include <media/i2c/upd64031a.h>
#define MSP_TUNER MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \
MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER)
#define MSP_SCART1 MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
#define MSP_SCART2 MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1, \
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
#define MSP_SCART3 MSP_INPUT(MSP_IN_SCART3, MSP_IN_TUNER1, \
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
#define MSP_MONO MSP_INPUT(MSP_IN_MONO, MSP_IN_TUNER1, \
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
#define V4L2_STD_PAL_SECAM (V4L2_STD_PAL|V4L2_STD_SECAM)
/* usual i2c tuner addresses to probe */
static struct ivtv_card_tuner_i2c ivtv_i2c_std = {
.radio = { I2C_CLIENT_END },
.demod = { 0x43, I2C_CLIENT_END },
.tv = { 0x61, 0x60, I2C_CLIENT_END },
};
/* as above, but with possible radio tuner */
static struct ivtv_card_tuner_i2c ivtv_i2c_radio = {
.radio = { 0x60, I2C_CLIENT_END },
.demod = { 0x43, I2C_CLIENT_END },
.tv = { 0x61, I2C_CLIENT_END },
};
/* using the tda8290+75a combo */
static struct ivtv_card_tuner_i2c ivtv_i2c_tda8290 = {
.radio = { I2C_CLIENT_END },
.demod = { I2C_CLIENT_END },
.tv = { 0x4b, I2C_CLIENT_END },
};
/********************** card configuration *******************************/
/* Please add new PCI IDs to: https://pci-ids.ucw.cz/
This keeps the PCI ID database up to date. Note that the entries
must be added under vendor 0x4444 (Conexant) as subsystem IDs.
New vendor IDs should still be added to the vendor ID list. */
/* Hauppauge PVR-250 cards */
/* Note: for Hauppauge cards the tveeprom information is used instead of PCI IDs */
static const struct ivtv_card ivtv_card_pvr250 = {
.type = IVTV_CARD_PVR_250,
.name = "Hauppauge WinTV PVR-250",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_MSP34XX,
.hw_audio_ctrl = IVTV_HW_MSP34XX,
.hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 |
IVTV_HW_TVEEPROM | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
{ IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 },
{ IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 },
{ IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 },
{ IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 },
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Hauppauge PVR-350 cards */
/* Outputs for Hauppauge PVR350 cards */
static struct ivtv_card_output ivtv_pvr350_outputs[] = {
{
.name = "S-Video + Composite",
.video_output = 0,
}, {
.name = "Composite",
.video_output = 1,
}, {
.name = "S-Video",
.video_output = 2,
}, {
.name = "RGB",
.video_output = 3,
}, {
.name = "YUV C",
.video_output = 4,
}, {
.name = "YUV V",
.video_output = 5,
}
};
static const struct ivtv_card ivtv_card_pvr350 = {
.type = IVTV_CARD_PVR_350,
.name = "Hauppauge WinTV PVR-350",
.v4l2_capabilities = IVTV_CAP_ENCODER | IVTV_CAP_DECODER,
.video_outputs = ivtv_pvr350_outputs,
.nof_outputs = ARRAY_SIZE(ivtv_pvr350_outputs),
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_MSP34XX,
.hw_audio_ctrl = IVTV_HW_MSP34XX,
.hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 |
IVTV_HW_SAA7127 | IVTV_HW_TVEEPROM | IVTV_HW_TUNER |
IVTV_HW_I2C_IR_RX_HAUP_EXT | IVTV_HW_I2C_IR_RX_HAUP_INT,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
{ IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 },
{ IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 },
{ IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 },
{ IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 },
.i2c = &ivtv_i2c_std,
};
/* PVR-350 V1 boards have a different audio tuner input and use a
saa7114 instead of a saa7115.
Note that the info below comes from a pre-production model so it may
not be correct. Especially the audio behaves strangely (mono only it seems) */
static const struct ivtv_card ivtv_card_pvr350_v1 = {
.type = IVTV_CARD_PVR_350_V1,
.name = "Hauppauge WinTV PVR-350 (V1)",
.v4l2_capabilities = IVTV_CAP_ENCODER | IVTV_CAP_DECODER,
.video_outputs = ivtv_pvr350_outputs,
.nof_outputs = ARRAY_SIZE(ivtv_pvr350_outputs),
.hw_video = IVTV_HW_SAA7114,
.hw_audio = IVTV_HW_MSP34XX,
.hw_audio_ctrl = IVTV_HW_MSP34XX,
.hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7114 |
IVTV_HW_SAA7127 | IVTV_HW_TVEEPROM | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
{ IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 },
{ IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 },
{ IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, MSP_MONO },
{ IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 },
{ IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 },
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Hauppauge PVR-150/PVR-500 cards */
static const struct ivtv_card ivtv_card_pvr150 = {
.type = IVTV_CARD_PVR_150,
.name = "Hauppauge WinTV PVR-150",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_muxer = IVTV_HW_WM8775,
.hw_all = IVTV_HW_WM8775 | IVTV_HW_CX25840 |
IVTV_HW_TVEEPROM | IVTV_HW_TUNER |
IVTV_HW_I2C_IR_RX_HAUP_EXT | IVTV_HW_I2C_IR_RX_HAUP_INT |
IVTV_HW_Z8F0811_IR_HAUP,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE7 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO1 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE3 },
{ IVTV_CARD_INPUT_SVIDEO2, 2, CX25840_SVIDEO2 },
{ IVTV_CARD_INPUT_COMPOSITE2, 2, CX25840_COMPOSITE4 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER,
CX25840_AUDIO8, WM8775_AIN2 },
{ IVTV_CARD_INPUT_LINE_IN1,
CX25840_AUDIO_SERIAL, WM8775_AIN2 },
{ IVTV_CARD_INPUT_LINE_IN2,
CX25840_AUDIO_SERIAL, WM8775_AIN3 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER,
CX25840_AUDIO_SERIAL, WM8775_AIN4 },
/* apparently needed for the IR blaster */
.gpio_init = { .direction = 0x1f01, .initial_value = 0x26f3 },
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* AVerMedia M179 cards */
static const struct ivtv_card_pci_info ivtv_pci_m179[] = {
{ PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_AVERMEDIA, 0xa3cf },
{ PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_AVERMEDIA, 0xa3ce },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_m179 = {
.type = IVTV_CARD_M179,
.name = "AVerMedia M179",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7114,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_GPIO,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0xe380, .initial_value = 0x8290 },
.gpio_audio_input = { .mask = 0x8040, .tuner = 0x8000, .linein = 0x0000 },
.gpio_audio_mute = { .mask = 0x2000, .mute = 0x2000 },
.gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200,
.lang1 = 0x0200, .lang2 = 0x0100, .both = 0x0000 },
.gpio_audio_freq = { .mask = 0x0018, .f32000 = 0x0000,
.f44100 = 0x0008, .f48000 = 0x0010 },
.gpio_audio_detect = { .mask = 0x4000, .stereo = 0x0000 },
.tuners = {
/* As far as we know all M179 cards use this tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_NTSC },
},
.pci_list = ivtv_pci_m179,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan MPG600/Kuroutoshikou ITVC16-STVLP cards */
static const struct ivtv_card_pci_info ivtv_pci_mpg600[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0xfff3 },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0xffff },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_mpg600 = {
.type = IVTV_CARD_MPG600,
.name = "Yuan MPG600, Kuroutoshikou ITVC16-STVLP",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_GPIO,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0x3080, .initial_value = 0x0004 },
.gpio_audio_input = { .mask = 0x3000, .tuner = 0x0000, .linein = 0x2000 },
.gpio_audio_mute = { .mask = 0x0001, .mute = 0x0001 },
.gpio_audio_mode = { .mask = 0x000e, .mono = 0x0006, .stereo = 0x0004,
.lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 },
.gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
.tuners = {
/* The PAL tuner is confirmed */
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FQ1216ME },
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_mpg600,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan MPG160/Kuroutoshikou ITVC15-STVLP cards */
static const struct ivtv_card_pci_info ivtv_pci_mpg160[] = {
{ PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_YUAN1, 0 },
{ PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_IODATA, 0x40a0 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_mpg160 = {
.type = IVTV_CARD_MPG160,
.name = "YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7114,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_GPIO,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0x7080, .initial_value = 0x400c },
.gpio_audio_input = { .mask = 0x3000, .tuner = 0x0000, .linein = 0x2000 },
.gpio_audio_mute = { .mask = 0x0001, .mute = 0x0001 },
.gpio_audio_mode = { .mask = 0x000e, .mono = 0x0006, .stereo = 0x0004,
.lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 },
.gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FQ1216ME },
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_mpg160,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan PG600/Diamond PVR-550 cards */
static const struct ivtv_card_pci_info ivtv_pci_pg600[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_DIAMONDMM, 0x0070 },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_pg600 = {
.type = IVTV_CARD_PG600,
.name = "Yuan PG600, Diamond PVR-550",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1,
CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
},
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FQ1216ME },
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_pg600,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Adaptec VideOh! AVC-2410 card */
static const struct ivtv_card_pci_info ivtv_pci_avc2410[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ADAPTEC, 0x0093 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_avc2410 = {
.type = IVTV_CARD_AVC2410,
.name = "Adaptec VideOh! AVC-2410",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_MSP34XX,
.hw_audio_ctrl = IVTV_HW_MSP34XX,
.hw_muxer = IVTV_HW_CS53L32A,
.hw_all = IVTV_HW_MSP34XX | IVTV_HW_CS53L32A |
IVTV_HW_SAA7115 | IVTV_HW_TUNER |
IVTV_HW_I2C_IR_RX_ADAPTEC,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER,
MSP_TUNER, CS53L32A_IN0 },
{ IVTV_CARD_INPUT_LINE_IN1,
MSP_SCART1, CS53L32A_IN2 },
},
/* This card has no eeprom and in fact the Windows driver relies
on the country/region setting of the user to decide which tuner
is available. */
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
{ .std = V4L2_STD_ALL - V4L2_STD_NTSC_M_JP,
.tuner = TUNER_PHILIPS_FM1236_MK3 },
{ .std = V4L2_STD_NTSC_M_JP, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_avc2410,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Adaptec VideOh! AVC-2010 card */
static const struct ivtv_card_pci_info ivtv_pci_avc2010[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ADAPTEC, 0x0092 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_avc2010 = {
.type = IVTV_CARD_AVC2010,
.name = "Adaptec VideOh! AVC-2010",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_CS53L32A,
.hw_audio_ctrl = IVTV_HW_CS53L32A,
.hw_all = IVTV_HW_CS53L32A | IVTV_HW_SAA7115,
.video_inputs = {
{ IVTV_CARD_INPUT_SVIDEO1, 0, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 0, IVTV_SAA71XX_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_LINE_IN1, CS53L32A_IN2 },
},
/* Does not have a tuner */
.pci_list = ivtv_pci_avc2010,
};
/* ------------------------------------------------------------------------- */
/* Nagase Transgear 5000TV card */
static const struct ivtv_card_pci_info ivtv_pci_tg5000tv[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xbfff },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_tg5000tv = {
.type = IVTV_CARD_TG5000TV,
.name = "Nagase Transgear 5000TV",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7114 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X |
IVTV_HW_GPIO,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_GPIO,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER |
IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO2 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gr_config = UPD64031A_VERTICAL_EXTERNAL,
.gpio_init = { .direction = 0xe080, .initial_value = 0x8000 },
.gpio_audio_input = { .mask = 0x8080, .tuner = 0x8000, .linein = 0x0080 },
.gpio_audio_mute = { .mask = 0x6000, .mute = 0x6000 },
.gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200,
.lang1 = 0x0300, .lang2 = 0x0000, .both = 0x0200 },
.gpio_video_input = { .mask = 0x0030, .tuner = 0x0000,
.composite = 0x0010, .svideo = 0x0020 },
.tuners = {
{ .std = V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_tg5000tv,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* AOpen VA2000MAX-SNT6 card */
static const struct ivtv_card_pci_info ivtv_pci_va2000[] = {
{ PCI_DEVICE_ID_IVTV16, 0, 0xff5f },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_va2000 = {
.type = IVTV_CARD_VA2000MAX_SNT6,
.name = "AOpen VA2000MAX-SNT6",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD6408X,
.hw_audio = IVTV_HW_MSP34XX,
.hw_audio_ctrl = IVTV_HW_MSP34XX,
.hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 |
IVTV_HW_UPD6408X | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
},
.tuners = {
{ .std = V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_va2000,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan MPG600GR/Kuroutoshikou CX23416GYC-STVLP cards */
static const struct ivtv_card_pci_info ivtv_pci_cx23416gyc[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0x0600 },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN4, 0x0600 },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_MELCO, 0x0523 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_cx23416gyc = {
.type = IVTV_CARD_CX23416GYC,
.name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO |
IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
.hw_audio = IVTV_HW_SAA717X,
.hw_audio_ctrl = IVTV_HW_SAA717X,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER |
IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO3 |
IVTV_SAA717X_TUNER_FLAG },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 },
},
.gr_config = UPD64031A_VERTICAL_EXTERNAL,
.gpio_init = { .direction = 0xf880, .initial_value = 0x8800 },
.gpio_video_input = { .mask = 0x0020, .tuner = 0x0000,
.composite = 0x0020, .svideo = 0x0020 },
.gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
.f44100 = 0x4000, .f48000 = 0x8000 },
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
},
.pci_list = ivtv_pci_cx23416gyc,
.i2c = &ivtv_i2c_std,
};
static const struct ivtv_card ivtv_card_cx23416gyc_nogr = {
.type = IVTV_CARD_CX23416GYC_NOGR,
.name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR)",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO | IVTV_HW_UPD6408X,
.hw_audio = IVTV_HW_SAA717X,
.hw_audio_ctrl = IVTV_HW_SAA717X,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER |
IVTV_HW_UPD6408X,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 |
IVTV_SAA717X_TUNER_FLAG },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 },
},
.gpio_init = { .direction = 0xf880, .initial_value = 0x8800 },
.gpio_video_input = { .mask = 0x0020, .tuner = 0x0000,
.composite = 0x0020, .svideo = 0x0020 },
.gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
.f44100 = 0x4000, .f48000 = 0x8000 },
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
},
.i2c = &ivtv_i2c_std,
};
static const struct ivtv_card ivtv_card_cx23416gyc_nogrycs = {
.type = IVTV_CARD_CX23416GYC_NOGRYCS,
.name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS)",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO,
.hw_audio = IVTV_HW_SAA717X,
.hw_audio_ctrl = IVTV_HW_SAA717X,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 |
IVTV_SAA717X_TUNER_FLAG },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 },
},
.gpio_init = { .direction = 0xf880, .initial_value = 0x8800 },
.gpio_video_input = { .mask = 0x0020, .tuner = 0x0000,
.composite = 0x0020, .svideo = 0x0020 },
.gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
.f44100 = 0x4000, .f48000 = 0x8000 },
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
},
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* I/O Data GV-MVP/RX & GV-MVP/RX2W (dual tuner) cards */
static const struct ivtv_card_pci_info ivtv_pci_gv_mvprx[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd01e },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd038 }, /* 2W unit #1 */
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd039 }, /* 2W unit #2 */
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_gv_mvprx = {
.type = IVTV_CARD_GV_MVPRX,
.name = "I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner)",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_WM8739,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_VP27SMPX |
IVTV_HW_TUNER | IVTV_HW_WM8739 |
IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO1 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0xc301, .initial_value = 0x0200 },
.gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 },
.tuners = {
/* This card has the Panasonic VP27 tuner */
{ .std = V4L2_STD_MN, .tuner = TUNER_PANASONIC_VP27 },
},
.pci_list = ivtv_pci_gv_mvprx,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* I/O Data GV-MVP/RX2E card */
static const struct ivtv_card_pci_info ivtv_pci_gv_mvprx2e[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd025 },
{0, 0, 0}
};
static const struct ivtv_card ivtv_card_gv_mvprx2e = {
.type = IVTV_CARD_GV_MVPRX2E,
.name = "I/O Data GV-MVP/RX2E",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_WM8739,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER |
IVTV_HW_VP27SMPX | IVTV_HW_WM8739,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0xc301, .initial_value = 0x0200 },
.gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 },
.tuners = {
/* This card has the Panasonic VP27 tuner */
{ .std = V4L2_STD_MN, .tuner = TUNER_PANASONIC_VP27 },
},
.pci_list = ivtv_pci_gv_mvprx2e,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* GotVIEW PCI DVD card */
static const struct ivtv_card_pci_info ivtv_pci_gotview_pci_dvd[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0x0600 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_gotview_pci_dvd = {
.type = IVTV_CARD_GOTVIEW_PCI_DVD,
.name = "GotView PCI DVD",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA717X,
.hw_audio = IVTV_HW_SAA717X,
.hw_audio_ctrl = IVTV_HW_SAA717X,
.hw_all = IVTV_HW_SAA717X | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE1 }, /* pin 116 */
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, /* pin 114/109 */
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, /* pin 118 */
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN0 },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN2 },
},
.gpio_init = { .direction = 0xf000, .initial_value = 0xA000 },
.tuners = {
/* This card has a Philips FQ1216ME MK3 tuner */
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
},
.pci_list = ivtv_pci_gotview_pci_dvd,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* GotVIEW PCI DVD2 Deluxe card */
static const struct ivtv_card_pci_info ivtv_pci_gotview_pci_dvd2[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_GOTVIEW1, 0x0600 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_gotview_pci_dvd2 = {
.type = IVTV_CARD_GOTVIEW_PCI_DVD2,
.name = "GotView PCI DVD2 Deluxe",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_muxer = IVTV_HW_GPIO,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1,
CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5, 0 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
.gpio_init = { .direction = 0x0800, .initial_value = 0 },
.gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 },
.tuners = {
/* This card has a Philips FQ1216ME MK5 tuner */
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
},
.pci_list = ivtv_pci_gotview_pci_dvd2,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan MPC622 miniPCI card */
static const struct ivtv_card_pci_info ivtv_pci_yuan_mpc622[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN2, 0xd998 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_yuan_mpc622 = {
.type = IVTV_CARD_YUAN_MPC622,
.name = "Yuan MPC622",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1,
CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
},
.gpio_init = { .direction = 0x00ff, .initial_value = 0x0002 },
.tuners = {
/* This card has the TDA8290/TDA8275 tuner chips */
{ .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_TDA8290 },
},
.pci_list = ivtv_pci_yuan_mpc622,
.i2c = &ivtv_i2c_tda8290,
};
/* ------------------------------------------------------------------------- */
/* DIGITAL COWBOY DCT-MTVP1 card */
static const struct ivtv_card_pci_info ivtv_pci_dctmvtvp1[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xbfff },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_dctmvtvp1 = {
.type = IVTV_CARD_DCTMTVP1,
.name = "Digital Cowboy DCT-MTVP1",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X |
IVTV_HW_GPIO,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_GPIO,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER |
IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO2 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0xe080, .initial_value = 0x8000 },
.gpio_audio_input = { .mask = 0x8080, .tuner = 0x8000, .linein = 0x0080 },
.gpio_audio_mute = { .mask = 0x6000, .mute = 0x6000 },
.gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200,
.lang1 = 0x0300, .lang2 = 0x0000, .both = 0x0200 },
.gpio_video_input = { .mask = 0x0030, .tuner = 0x0000,
.composite = 0x0010, .svideo = 0x0020},
.tuners = {
{ .std = V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
},
.pci_list = ivtv_pci_dctmvtvp1,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan PG600-2/GotView PCI DVD Lite cards */
static const struct ivtv_card_pci_info ivtv_pci_pg600v2[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_GOTVIEW2, 0x0600 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_pg600v2 = {
.type = IVTV_CARD_PG600V2,
.name = "Yuan PG600-2, GotView PCI DVD Lite",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
/* XC2028 support apparently works for the Yuan, it's still
uncertain whether it also works with the GotView. */
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1,
CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
.xceive_pin = 12,
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.pci_list = ivtv_pci_pg600v2,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Club3D ZAP-TV1x01 cards */
static const struct ivtv_card_pci_info ivtv_pci_club3d[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_club3d = {
.type = IVTV_CARD_CLUB3D,
.name = "Club3D ZAP-TV1x01",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1,
CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE3 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
.xceive_pin = 12,
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.pci_list = ivtv_pci_club3d,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* AVerTV MCE 116 Plus (M116) card */
static const struct ivtv_card_pci_info ivtv_pci_avertv_mce116[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc439 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_avertv_mce116 = {
.type = IVTV_CARD_AVERTV_MCE116,
.name = "AVerTV MCE 116 Plus",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER | IVTV_HW_WM8739 |
IVTV_HW_I2C_IR_RX_AVER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO3 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
/* enable line-in */
.gpio_init = { .direction = 0xe000, .initial_value = 0x4000 },
.xceive_pin = 10,
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.pci_list = ivtv_pci_avertv_mce116,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* AVerMedia PVR-150 Plus / AVerTV M113 cards with a Daewoo/Partsnic Tuner */
static const struct ivtv_card_pci_info ivtv_pci_aver_pvr150[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc034 }, /* NTSC */
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc035 }, /* NTSC FM */
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_aver_pvr150 = {
.type = IVTV_CARD_AVER_PVR150PLUS,
.name = "AVerMedia PVR-150 Plus / AVerTV M113 Partsnic (Daewoo) Tuner",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_muxer = IVTV_HW_GPIO,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER |
IVTV_HW_WM8739 | IVTV_HW_GPIO,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO3 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5, 0 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
/* The 74HC4052 Dual 4:1 multiplexer is controlled by 2 GPIO lines */
.gpio_init = { .direction = 0xc000, .initial_value = 0 },
.gpio_audio_input = { .mask = 0xc000,
.tuner = 0x0000,
.linein = 0x4000,
.radio = 0x8000 },
.tuners = {
/* Subsystem ID's 0xc03[45] have a Partsnic PTI-5NF05 tuner */
{ .std = V4L2_STD_MN, .tuner = TUNER_PARTSNIC_PTI_5NF05 },
},
.pci_list = ivtv_pci_aver_pvr150,
/* Subsystem ID 0xc035 has a TEA5767(?) FM tuner, 0xc034 does not */
.i2c = &ivtv_i2c_radio,
};
/* ------------------------------------------------------------------------- */
/* AVerMedia UltraTV 1500 MCE (newer non-cx88 version, M113 variant) card */
static const struct ivtv_card_pci_info ivtv_pci_aver_ultra1500mce[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc019 }, /* NTSC */
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc01b }, /* PAL/SECAM */
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_aver_ultra1500mce = {
.type = IVTV_CARD_AVER_ULTRA1500MCE,
.name = "AVerMedia UltraTV 1500 MCE / AVerTV M113 Philips Tuner",
.comment = "For non-NTSC tuners, use the pal= or secam= module options",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_muxer = IVTV_HW_GPIO,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER |
IVTV_HW_WM8739 | IVTV_HW_GPIO,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO3 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5, 0 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
/* The 74HC4052 Dual 4:1 multiplexer is controlled by 2 GPIO lines */
.gpio_init = { .direction = 0xc000, .initial_value = 0 },
.gpio_audio_input = { .mask = 0xc000,
.tuner = 0x0000,
.linein = 0x4000,
.radio = 0x8000 },
.tuners = {
/* The UltraTV 1500 MCE has a Philips FM1236 MK5 TV/FM tuner */
{ .std = V4L2_STD_MN, .tuner = TUNER_PHILIPS_FM1236_MK3 },
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216MK5 },
},
.pci_list = ivtv_pci_aver_ultra1500mce,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* AVerMedia EZMaker PCI Deluxe card */
static const struct ivtv_card_pci_info ivtv_pci_aver_ezmaker[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc03f },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_aver_ezmaker = {
.type = IVTV_CARD_AVER_EZMAKER,
.name = "AVerMedia EZMaker PCI Deluxe",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_WM8739,
.video_inputs = {
{ IVTV_CARD_INPUT_SVIDEO1, 0, CX25840_SVIDEO3 },
{ IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 0 },
},
.gpio_init = { .direction = 0x4000, .initial_value = 0x4000 },
/* Does not have a tuner */
.pci_list = ivtv_pci_aver_ezmaker,
};
/* ------------------------------------------------------------------------- */
/* ASUS Falcon2 */
static const struct ivtv_card_pci_info ivtv_pci_asus_falcon2[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ASUSTEK, 0x4b66 },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ASUSTEK, 0x462e },
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ASUSTEK, 0x4b2e },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_asus_falcon2 = {
.type = IVTV_CARD_ASUS_FALCON2,
.name = "ASUS Falcon2",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_muxer = IVTV_HW_M52790,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_M52790 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO3 },
{ IVTV_CARD_INPUT_COMPOSITE1, 2, CX25840_COMPOSITE2 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5, M52790_IN_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL,
M52790_IN_V2 | M52790_SW1_YCMIX | M52790_SW2_YCMIX },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, M52790_IN_V2 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, M52790_IN_TUNER },
.tuners = {
{ .std = V4L2_STD_MN, .tuner = TUNER_PHILIPS_FM1236_MK3 },
},
.pci_list = ivtv_pci_asus_falcon2,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* AVerMedia M104 miniPCI card */
static const struct ivtv_card_pci_info ivtv_pci_aver_m104[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc136 },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_aver_m104 = {
.type = IVTV_CARD_AVER_M104,
.name = "AVerMedia M104",
.comment = "Not yet supported!\n",
.v4l2_capabilities = 0, /*IVTV_CAP_ENCODER,*/
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER | IVTV_HW_WM8739,
.video_inputs = {
{ IVTV_CARD_INPUT_SVIDEO1, 0, CX25840_SVIDEO3 },
{ IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
},
.radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
/* enable line-in + reset tuner */
.gpio_init = { .direction = 0xe000, .initial_value = 0x4000 },
.xceive_pin = 10,
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.pci_list = ivtv_pci_aver_m104,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Buffalo PC-MV5L/PCI cards */
static const struct ivtv_card_pci_info ivtv_pci_buffalo[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_MELCO, 0x052b },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_buffalo = {
.type = IVTV_CARD_BUFFALO_MV5L,
.name = "Buffalo PC-MV5L/PCI",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_CX25840,
.hw_audio = IVTV_HW_CX25840,
.hw_audio_ctrl = IVTV_HW_CX25840,
.hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
{ IVTV_CARD_INPUT_SVIDEO1, 1,
CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
{ IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
},
.xceive_pin = 12,
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.pci_list = ivtv_pci_buffalo,
.i2c = &ivtv_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Sony Kikyou */
static const struct ivtv_card_pci_info ivtv_pci_kikyou[] = {
{ PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_SONY, 0x813d },
{ 0, 0, 0 }
};
static const struct ivtv_card ivtv_card_kikyou = {
.type = IVTV_CARD_KIKYOU,
.name = "Sony VAIO Giga Pocket (ENX Kikyou)",
.v4l2_capabilities = IVTV_CAP_ENCODER,
.hw_video = IVTV_HW_SAA7115,
.hw_audio = IVTV_HW_GPIO,
.hw_audio_ctrl = IVTV_HW_GPIO,
.hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER,
.video_inputs = {
{ IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE1 },
{ IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE1 },
{ IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO1 },
},
.audio_inputs = {
{ IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
{ IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
{ IVTV_CARD_INPUT_LINE_IN2, IVTV_GPIO_LINE_IN },
},
.gpio_init = { .direction = 0x03e1, .initial_value = 0x0320 },
.gpio_audio_input = { .mask = 0x0060,
.tuner = 0x0020,
.linein = 0x0000,
.radio = 0x0060 },
.gpio_audio_mute = { .mask = 0x0000,
.mute = 0x0000 }, /* 0x200? Disable for now. */
.gpio_audio_mode = { .mask = 0x0080,
.mono = 0x0000,
.stereo = 0x0000, /* SAP */
.lang1 = 0x0080,
.lang2 = 0x0000,
.both = 0x0080 },
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_SONY_BTF_PXN01Z },
},
.pci_list = ivtv_pci_kikyou,
.i2c = &ivtv_i2c_std,
};
static const struct ivtv_card *ivtv_card_list[] = {
&ivtv_card_pvr250,
&ivtv_card_pvr350,
&ivtv_card_pvr150,
&ivtv_card_m179,
&ivtv_card_mpg600,
&ivtv_card_mpg160,
&ivtv_card_pg600,
&ivtv_card_avc2410,
&ivtv_card_avc2010,
&ivtv_card_tg5000tv,
&ivtv_card_va2000,
&ivtv_card_cx23416gyc,
&ivtv_card_gv_mvprx,
&ivtv_card_gv_mvprx2e,
&ivtv_card_gotview_pci_dvd,
&ivtv_card_gotview_pci_dvd2,
&ivtv_card_yuan_mpc622,
&ivtv_card_dctmvtvp1,
&ivtv_card_pg600v2,
&ivtv_card_club3d,
&ivtv_card_avertv_mce116,
&ivtv_card_asus_falcon2,
&ivtv_card_aver_pvr150,
&ivtv_card_aver_ezmaker,
&ivtv_card_aver_m104,
&ivtv_card_buffalo,
&ivtv_card_aver_ultra1500mce,
&ivtv_card_kikyou,
/* Variations of standard cards but with the same PCI IDs.
These cards must come last in this list. */
&ivtv_card_pvr350_v1,
&ivtv_card_cx23416gyc_nogr,
&ivtv_card_cx23416gyc_nogrycs,
};
const struct ivtv_card *ivtv_get_card(u16 index)
{
if (index >= ARRAY_SIZE(ivtv_card_list))
return NULL;
return ivtv_card_list[index];
}
int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input)
{
const struct ivtv_card_video_input *card_input = itv->card->video_inputs + index;
static const char * const input_strs[] = {
"Tuner 1",
"S-Video 1",
"S-Video 2",
"Composite 1",
"Composite 2",
"Composite 3"
};
if (index >= itv->nof_inputs)
return -EINVAL;
input->index = index;
strscpy(input->name, input_strs[card_input->video_type - 1],
sizeof(input->name));
input->type = (card_input->video_type == IVTV_CARD_INPUT_VID_TUNER ?
V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
input->audioset = (1 << itv->nof_audio_inputs) - 1;
input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ?
itv->tuner_std : V4L2_STD_ALL;
return 0;
}
int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output)
{
const struct ivtv_card_output *card_output = itv->card->video_outputs + index;
if (index >= itv->card->nof_outputs)
return -EINVAL;
output->index = index;
strscpy(output->name, card_output->name, sizeof(output->name));
output->type = V4L2_OUTPUT_TYPE_ANALOG;
output->audioset = 1;
output->std = V4L2_STD_ALL;
return 0;
}
int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *audio)
{
const struct ivtv_card_audio_input *aud_input = itv->card->audio_inputs + index;
static const char * const input_strs[] = {
"Tuner 1",
"Line In 1",
"Line In 2"
};
memset(audio, 0, sizeof(*audio));
if (index >= itv->nof_audio_inputs)
return -EINVAL;
strscpy(audio->name, input_strs[aud_input->audio_type - 1],
sizeof(audio->name));
audio->index = index;
audio->capability = V4L2_AUDCAP_STEREO;
return 0;
}
int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *aud_output)
{
memset(aud_output, 0, sizeof(*aud_output));
if (itv->card->video_outputs == NULL || index != 0)
return -EINVAL;
strscpy(aud_output->name, "A/V Audio Out", sizeof(aud_output->name));
return 0;
}
| linux-master | drivers/media/pci/ivtv/ivtv-cards.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
gpio functions.
Merging GPIO support into driver:
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-gpio.h"
#include "xc2028.h"
#include <media/tuner.h>
#include <media/v4l2-ctrls.h>
/*
* GPIO assignment of Yuan MPG600/MPG160
*
* bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0
* OUTPUT IN1 IN0 AM3 AM2 AM1 AM0
* INPUT DM1 DM0
*
* IN* : Input selection
* IN1 IN0
* 1 1 N/A
* 1 0 Line
* 0 1 N/A
* 0 0 Tuner
*
* AM* : Audio Mode
* AM3 0: Normal 1: Mixed(Sub+Main channel)
* AM2 0: Subchannel 1: Main channel
* AM1 0: Stereo 1: Mono
* AM0 0: Normal 1: Mute
*
* DM* : Detected tuner audio Mode
* DM1 0: Stereo 1: Mono
* DM0 0: Multiplex 1: Normal
*
* GPIO Initial Settings
* MPG600 MPG160
* DIR 0x3080 0x7080
* OUTPUT 0x000C 0x400C
*
* Special thanks to Makoto Iguchi <[email protected]> and Mr. Anonymous
* for analyzing GPIO of MPG160.
*
*****************************************************************************
*
* GPIO assignment of Avermedia M179 (per information direct from AVerMedia)
*
* bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0
* OUTPUT IN0 AM0 IN1 AM1 AM2 IN2 BR0 BR1
* INPUT
*
* IN* : Input selection
* IN0 IN1 IN2
* * 1 * Mute
* 0 0 0 Line-In
* 1 0 0 TV Tuner Audio
* 0 0 1 FM Audio
* 1 0 1 Mute
*
* AM* : Audio Mode
* AM0 AM1 AM2
* 0 0 0 TV Tuner Audio: L_OUT=(L+R)/2, R_OUT=SAP
* 0 0 1 TV Tuner Audio: L_OUT=R_OUT=SAP (SAP)
* 0 1 0 TV Tuner Audio: L_OUT=L, R_OUT=R (stereo)
* 0 1 1 TV Tuner Audio: mute
* 1 * * TV Tuner Audio: L_OUT=R_OUT=(L+R)/2 (mono)
*
* BR* : Audio Sample Rate (BR stands for bitrate for some reason)
* BR0 BR1
* 0 0 32 kHz
* 0 1 44.1 kHz
* 1 0 48 kHz
*
* DM* : Detected tuner audio Mode
* Unknown currently
*
* Special thanks to AVerMedia Technologies, Inc. and Jiun-Kuei Jung at
* AVerMedia for providing the GPIO information used to add support
* for the M179 cards.
*/
/********************* GPIO stuffs *********************/
/* GPIO registers */
#define IVTV_REG_GPIO_IN 0x9008
#define IVTV_REG_GPIO_OUT 0x900c
#define IVTV_REG_GPIO_DIR 0x9020
void ivtv_reset_ir_gpio(struct ivtv *itv)
{
int curdir, curout;
if (itv->card->type != IVTV_CARD_PVR_150)
return;
IVTV_DEBUG_INFO("Resetting PVR150 IR\n");
curout = read_reg(IVTV_REG_GPIO_OUT);
curdir = read_reg(IVTV_REG_GPIO_DIR);
curdir |= 0x80;
write_reg(curdir, IVTV_REG_GPIO_DIR);
curout = (curout & ~0xF) | 1;
write_reg(curout, IVTV_REG_GPIO_OUT);
/* We could use something else for smaller time */
schedule_timeout_interruptible(msecs_to_jiffies(1));
curout |= 2;
write_reg(curout, IVTV_REG_GPIO_OUT);
curdir &= ~0x80;
write_reg(curdir, IVTV_REG_GPIO_DIR);
}
/* Xceive tuner reset function */
int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
{
struct i2c_algo_bit_data *algo = dev;
struct ivtv *itv = algo->data;
u32 curout;
if (cmd != XC2028_TUNER_RESET)
return 0;
IVTV_DEBUG_INFO("Resetting tuner\n");
curout = read_reg(IVTV_REG_GPIO_OUT);
curout &= ~(1 << itv->card->xceive_pin);
write_reg(curout, IVTV_REG_GPIO_OUT);
schedule_timeout_interruptible(msecs_to_jiffies(1));
curout |= 1 << itv->card->xceive_pin;
write_reg(curout, IVTV_REG_GPIO_OUT);
schedule_timeout_interruptible(msecs_to_jiffies(1));
return 0;
}
static inline struct ivtv *sd_to_ivtv(struct v4l2_subdev *sd)
{
return container_of(sd, struct ivtv, sd_gpio);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct ivtv, hdl_gpio)->sd_gpio;
}
static int subdev_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
mask = itv->card->gpio_audio_freq.mask;
switch (freq) {
case 32000:
data = itv->card->gpio_audio_freq.f32000;
break;
case 44100:
data = itv->card->gpio_audio_freq.f44100;
break;
case 48000:
default:
data = itv->card->gpio_audio_freq.f48000;
break;
}
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask;
mask = itv->card->gpio_audio_detect.mask;
if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask))
vt->rxsubchans = V4L2_TUNER_SUB_STEREO |
V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
else
vt->rxsubchans = V4L2_TUNER_SUB_MONO;
return 0;
}
static int subdev_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
mask = itv->card->gpio_audio_mode.mask;
switch (vt->audmode) {
case V4L2_TUNER_MODE_LANG1:
data = itv->card->gpio_audio_mode.lang1;
break;
case V4L2_TUNER_MODE_LANG2:
data = itv->card->gpio_audio_mode.lang2;
break;
case V4L2_TUNER_MODE_MONO:
data = itv->card->gpio_audio_mode.mono;
break;
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
default:
data = itv->card->gpio_audio_mode.stereo;
break;
}
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_s_radio(struct v4l2_subdev *sd)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
mask = itv->card->gpio_audio_input.mask;
data = itv->card->gpio_audio_input.radio;
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_s_audio_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
if (input > 2)
return -EINVAL;
mask = itv->card->gpio_audio_input.mask;
switch (input) {
case 0:
data = itv->card->gpio_audio_input.tuner;
break;
case 1:
data = itv->card->gpio_audio_input.linein;
break;
case 2:
default:
data = itv->card->gpio_audio_input.radio;
break;
}
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
mask = itv->card->gpio_audio_mute.mask;
data = ctrl->val ? itv->card->gpio_audio_mute.mute : 0;
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) |
(data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
return -EINVAL;
}
static int subdev_log_status(struct v4l2_subdev *sd)
{
struct ivtv *itv = sd_to_ivtv(sd);
IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n",
read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT),
read_reg(IVTV_REG_GPIO_IN));
v4l2_ctrl_handler_log_status(&itv->hdl_gpio, sd->name);
return 0;
}
static int subdev_s_video_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
if (input > 2) /* 0:Tuner 1:Composite 2:S-Video */
return -EINVAL;
mask = itv->card->gpio_video_input.mask;
if (input == 0)
data = itv->card->gpio_video_input.tuner;
else if (input == 1)
data = itv->card->gpio_video_input.composite;
else
data = itv->card->gpio_video_input.svideo;
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static const struct v4l2_ctrl_ops gpio_ctrl_ops = {
.s_ctrl = subdev_s_ctrl,
};
static const struct v4l2_subdev_core_ops subdev_core_ops = {
.log_status = subdev_log_status,
};
static const struct v4l2_subdev_tuner_ops subdev_tuner_ops = {
.s_radio = subdev_s_radio,
.g_tuner = subdev_g_tuner,
.s_tuner = subdev_s_tuner,
};
static const struct v4l2_subdev_audio_ops subdev_audio_ops = {
.s_clock_freq = subdev_s_clock_freq,
.s_routing = subdev_s_audio_routing,
};
static const struct v4l2_subdev_video_ops subdev_video_ops = {
.s_routing = subdev_s_video_routing,
};
static const struct v4l2_subdev_ops subdev_ops = {
.core = &subdev_core_ops,
.tuner = &subdev_tuner_ops,
.audio = &subdev_audio_ops,
.video = &subdev_video_ops,
};
int ivtv_gpio_init(struct ivtv *itv)
{
u16 pin = 0;
if (itv->card->xceive_pin)
pin = 1 << itv->card->xceive_pin;
if ((itv->card->gpio_init.direction | pin) == 0)
return 0;
IVTV_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT));
/* init output data then direction */
write_reg(itv->card->gpio_init.initial_value | pin, IVTV_REG_GPIO_OUT);
write_reg(itv->card->gpio_init.direction | pin, IVTV_REG_GPIO_DIR);
v4l2_subdev_init(&itv->sd_gpio, &subdev_ops);
snprintf(itv->sd_gpio.name, sizeof(itv->sd_gpio.name), "%s-gpio", itv->v4l2_dev.name);
itv->sd_gpio.grp_id = IVTV_HW_GPIO;
v4l2_ctrl_handler_init(&itv->hdl_gpio, 1);
v4l2_ctrl_new_std(&itv->hdl_gpio, &gpio_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
if (itv->hdl_gpio.error)
return itv->hdl_gpio.error;
itv->sd_gpio.ctrl_handler = &itv->hdl_gpio;
v4l2_ctrl_handler_setup(&itv->hdl_gpio);
return v4l2_device_register_subdev(&itv->v4l2_dev, &itv->sd_gpio);
}
| linux-master | drivers/media/pci/ivtv/ivtv-gpio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Vertical Blank Interval support functions
Copyright (C) 2004-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-i2c.h"
#include "ivtv-ioctl.h"
#include "ivtv-queue.h"
#include "ivtv-cards.h"
#include "ivtv-vbi.h"
static void ivtv_set_vps(struct ivtv *itv, int enabled)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_VPS;
data.field = 0;
data.line = enabled ? 16 : 0;
data.data[2] = itv->vbi.vps_payload.data[0];
data.data[8] = itv->vbi.vps_payload.data[1];
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
/* When using a 50 Hz system, always turn on the
wide screen signal with 4x3 ratio as the default.
Turning this signal on and off can confuse certain
TVs. As far as I can tell there is no reason not to
transmit this signal. */
if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
enabled = 1;
mode = 0x08; /* 4x3 full format */
}
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static void ivtv_write_vbi_line(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *d,
struct vbi_cc *cc, int *found_cc)
{
struct vbi_info *vi = &itv->vbi;
if (d->id == V4L2_SLICED_CAPTION_525 && d->line == 21) {
if (d->field) {
cc->even[0] = d->data[0];
cc->even[1] = d->data[1];
} else {
cc->odd[0] = d->data[0];
cc->odd[1] = d->data[1];
}
*found_cc = 1;
} else if (d->id == V4L2_SLICED_VPS && d->line == 16 && d->field == 0) {
struct vbi_vps vps;
vps.data[0] = d->data[2];
vps.data[1] = d->data[8];
vps.data[2] = d->data[9];
vps.data[3] = d->data[10];
vps.data[4] = d->data[11];
if (memcmp(&vps, &vi->vps_payload, sizeof(vps))) {
vi->vps_payload = vps;
set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
}
} else if (d->id == V4L2_SLICED_WSS_625 &&
d->line == 23 && d->field == 0) {
int wss = d->data[0] | d->data[1] << 8;
if (vi->wss_payload != wss) {
vi->wss_payload = wss;
set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
}
}
}
static void ivtv_write_vbi_cc_lines(struct ivtv *itv, const struct vbi_cc *cc)
{
struct vbi_info *vi = &itv->vbi;
if (vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
memcpy(&vi->cc_payload[vi->cc_payload_idx], cc,
sizeof(struct vbi_cc));
vi->cc_payload_idx++;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
}
static void ivtv_write_vbi(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
for (i = 0; i < cnt; i++)
ivtv_write_vbi_line(itv, sliced + i, &cc, &found_cc);
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
}
ssize_t
ivtv_write_vbi_from_user(struct ivtv *itv,
const struct v4l2_sliced_vbi_data __user *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
struct v4l2_sliced_vbi_data d;
ssize_t ret = cnt * sizeof(struct v4l2_sliced_vbi_data);
for (i = 0; i < cnt; i++) {
if (copy_from_user(&d, sliced + i,
sizeof(struct v4l2_sliced_vbi_data))) {
ret = -EFAULT;
break;
}
ivtv_write_vbi_line(itv, &d, &cc, &found_cc);
}
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
return ret;
}
static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
{
int line = 0;
int i;
u32 linemask[2] = { 0, 0 };
unsigned short size;
static const u8 mpeg_hdr_data[] = {
0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
};
const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
int idx = itv->vbi.frame % IVTV_VBI_FRAMES;
u8 *dst = &itv->vbi.sliced_mpeg_data[idx][0];
for (i = 0; i < lines; i++) {
int f, l;
if (itv->vbi.sliced_data[i].id == 0)
continue;
l = itv->vbi.sliced_data[i].line - 6;
f = itv->vbi.sliced_data[i].field;
if (f)
l += 18;
if (l < 32)
linemask[0] |= (1 << l);
else
linemask[1] |= (1 << (l - 32));
dst[sd + 12 + line * 43] =
ivtv_service2vbi(itv->vbi.sliced_data[i].id);
memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42);
line++;
}
memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
if (line == 36) {
/* All lines are used, so there is no space for the linemask
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
memmove(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
cpu_to_le32s(&linemask[0]);
cpu_to_le32s(&linemask[1]);
memcpy(dst + sd + 4, &linemask[0], 8);
size = 12 + ((43 * line + 3) & ~3);
}
dst[4+16] = (size + 10) >> 8;
dst[5+16] = (size + 10) & 0xff;
dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
dst[10+16] = (pts_stamp >> 22) & 0xff;
dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
dst[12+16] = (pts_stamp >> 7) & 0xff;
dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
itv->vbi.sliced_mpeg_size[idx] = sd + size;
}
static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p)
{
u32 linemask[2];
int i, l, id2;
int line = 0;
if (!memcmp(p, "itv0", 4)) {
memcpy(linemask, p + 4, 8);
p += 12;
} else if (!memcmp(p, "ITV0", 4)) {
linemask[0] = 0xffffffff;
linemask[1] = 0xf;
p += 4;
} else {
/* unknown VBI data, convert to empty VBI frame */
linemask[0] = linemask[1] = 0;
}
for (i = 0; i < 36; i++) {
int err = 0;
if (i < 32 && !(linemask[0] & (1 << i)))
continue;
if (i >= 32 && !(linemask[1] & (1 << (i - 32))))
continue;
id2 = *p & 0xf;
switch (id2) {
case IVTV_SLICED_TYPE_TELETEXT_B:
id2 = V4L2_SLICED_TELETEXT_B;
break;
case IVTV_SLICED_TYPE_CAPTION_525:
id2 = V4L2_SLICED_CAPTION_525;
err = !odd_parity(p[1]) || !odd_parity(p[2]);
break;
case IVTV_SLICED_TYPE_VPS:
id2 = V4L2_SLICED_VPS;
break;
case IVTV_SLICED_TYPE_WSS_625:
id2 = V4L2_SLICED_WSS_625;
break;
default:
id2 = 0;
break;
}
if (err == 0) {
l = (i < 18) ? i + 6 : i - 18 + 6;
itv->vbi.sliced_dec_data[line].line = l;
itv->vbi.sliced_dec_data[line].field = i >= 18;
itv->vbi.sliced_dec_data[line].id = id2;
memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42);
line++;
}
p += 43;
}
while (line < 36) {
itv->vbi.sliced_dec_data[line].id = 0;
itv->vbi.sliced_dec_data[line].line = 0;
itv->vbi.sliced_dec_data[line].field = 0;
line++;
}
return line * sizeof(itv->vbi.sliced_dec_data[0]);
}
/* Compress raw VBI format, removes leading SAV codes and surplus space after the
field.
Returns new compressed size. */
static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
{
u32 line_size = itv->vbi.raw_decoder_line_size;
u32 lines = itv->vbi.count;
u8 sav1 = itv->vbi.raw_decoder_sav_odd_field;
u8 sav2 = itv->vbi.raw_decoder_sav_even_field;
u8 *q = buf;
u8 *p;
int i;
for (i = 0; i < lines; i++) {
p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) {
break;
}
memcpy(q, p + 4, line_size - 4);
q += line_size - 4;
}
return lines * (line_size - 4);
}
/* Compressed VBI format, all found sliced blocks put next to one another
Returns new compressed size */
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
struct v4l2_decode_vbi_line vbi = {};
int i;
unsigned lines = 0;
/* find the first valid line */
for (i = 0; i < size; i++, buf++) {
if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
break;
}
size -= i;
if (size < line_size) {
return line;
}
for (i = 0; i < size / line_size; i++) {
u8 *p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) {
continue;
}
vbi.p = p + 4;
v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
itv->vbi.sliced_data[line].field = vbi.is_second_field;
itv->vbi.sliced_data[line].line = vbi.line;
memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42);
line++;
}
}
return line;
}
void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf,
u64 pts_stamp, int streamtype)
{
u8 *p = (u8 *) buf->buf;
u32 size = buf->bytesused;
int y;
/* Raw VBI data */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI && ivtv_raw_vbi(itv)) {
u8 type;
ivtv_buf_swap(buf);
type = p[3];
size = buf->bytesused = compress_raw_buf(itv, p, size);
/* second field of the frame? */
if (type == itv->vbi.raw_decoder_sav_even_field) {
/* Dirty hack needed for backwards
compatibility of old VBI software. */
p += size - 4;
memcpy(p, &itv->vbi.frame, 4);
itv->vbi.frame++;
}
return;
}
/* Sliced VBI data with data insertion */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI) {
int lines;
ivtv_buf_swap(buf);
/* first field */
lines = compress_sliced_buf(itv, 0, p, size / 2,
itv->vbi.sliced_decoder_sav_odd_field);
/* second field */
/* experimentation shows that the second half does not always begin
at the exact address. So start a bit earlier (hence 32). */
lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32,
itv->vbi.sliced_decoder_sav_even_field);
/* always return at least one empty line */
if (lines == 0) {
itv->vbi.sliced_data[0].id = 0;
itv->vbi.sliced_data[0].line = 0;
itv->vbi.sliced_data[0].field = 0;
lines = 1;
}
buf->bytesused = size = lines * sizeof(itv->vbi.sliced_data[0]);
memcpy(p, &itv->vbi.sliced_data[0], size);
if (itv->vbi.insert_mpeg) {
copy_vbi_data(itv, lines, pts_stamp);
}
itv->vbi.frame++;
return;
}
/* Sliced VBI re-inserted from an MPEG stream */
if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
/* If the size is not 4-byte aligned, then the starting address
for the swapping is also shifted. After swapping the data the
real start address of the VBI data is exactly 4 bytes after the
original start. It's a bit fiddly but it works like a charm.
Non-4-byte alignment happens when an lseek is done on the input
mpeg file to a non-4-byte aligned position. So on arrival here
the VBI data is also non-4-byte aligned. */
int offset = size & 3;
int cnt;
if (offset) {
p += 4 - offset;
}
/* Swap Buffer */
for (y = 0; y < size; y += 4) {
swab32s((u32 *)(p + y));
}
cnt = ivtv_convert_ivtv_vbi(itv, p + offset);
memcpy(buf->buf, itv->vbi.sliced_dec_data, cnt);
buf->bytesused = cnt;
ivtv_write_vbi(itv, itv->vbi.sliced_dec_data,
cnt / sizeof(itv->vbi.sliced_dec_data[0]));
return;
}
}
void ivtv_disable_cc(struct ivtv *itv)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 0, &cc);
itv->vbi.cc_payload_idx = 0;
}
void ivtv_vbi_work_handler(struct ivtv *itv)
{
struct vbi_info *vi = &itv->vbi;
struct v4l2_sliced_vbi_data data;
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
/* Lock */
if (itv->output_mode == OUT_PASSTHROUGH) {
if (itv->is_50hz) {
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
ivtv_set_wss(itv, 1, 0x8); /* 4x3 full format */
} else {
vi->wss_missing_cnt++;
}
}
else {
int mode = 0;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
}
if (mode) {
vi->cc_missing_cnt = 0;
ivtv_set_cc(itv, mode, &cc);
} else if (vi->cc_missing_cnt == 4) {
ivtv_set_cc(itv, 0, &cc);
} else {
vi->cc_missing_cnt++;
}
}
return;
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags)) {
ivtv_set_wss(itv, 1, vi->wss_payload & 0xf);
}
if (test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags)) {
if (vi->cc_payload_idx == 0) {
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 3, &cc);
}
while (vi->cc_payload_idx) {
cc = vi->cc_payload[0];
memmove(vi->cc_payload, vi->cc_payload + 1,
sizeof(vi->cc_payload) - sizeof(vi->cc_payload[0]));
vi->cc_payload_idx--;
if (vi->cc_payload_idx && cc.odd[0] == 0x80 && cc.odd[1] == 0x80)
continue;
ivtv_set_cc(itv, 3, &cc);
break;
}
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags)) {
ivtv_set_vps(itv, 1);
}
}
| linux-master | drivers/media/pci/ivtv/ivtv-vbi.c |
/*
ivtv driver initialization and card probing
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Main Driver file for the ivtv project:
* Driver for the Conexant CX23415/CX23416 chip.
* Author: Kevin Thayer (nufan_wfk at yahoo.com)
* License: GPL
*
* -----
* MPG600/MPG160 support by T.Adachi <[email protected]>
* and Takeru KOMORIYA<[email protected]>
*
* AVerMedia M179 GPIO info by Chris Pinkham <[email protected]>
* using information provided by Jiun-Kuei Jung @ AVerMedia.
*
* Kurouto Sikou CX23416GYC-STVLP tested by K.Ohta <[email protected]>
* using information from T.Adachi,Takeru KOMORIYA and others :-)
*
* Nagase TRANSGEAR 5000TV, Aopen VA2000MAX-STN6 and I/O data GV-MVP/RX
* version by T.Adachi. Special thanks Mr.Suzuki
*/
#include "ivtv-driver.h"
#include "ivtv-version.h"
#include "ivtv-fileops.h"
#include "ivtv-i2c.h"
#include "ivtv-firmware.h"
#include "ivtv-queue.h"
#include "ivtv-udma.h"
#include "ivtv-irq.h"
#include "ivtv-mailbox.h"
#include "ivtv-streams.h"
#include "ivtv-ioctl.h"
#include "ivtv-cards.h"
#include "ivtv-vbi.h"
#include "ivtv-routing.h"
#include "ivtv-controls.h"
#include "ivtv-gpio.h"
#include <linux/dma-mapping.h>
#include <media/tveeprom.h>
#include <media/i2c/saa7115.h>
#include "xc2028.h"
#include <uapi/linux/sched/types.h>
/* If you have already X v4l cards, then set this to X. This way
the device numbers stay matched. Example: you have a WinTV card
without radio and a PVR-350 with. Normally this would give a
video1 device together with a radio0 device for the PVR. By
setting this to 1 you ensure that radio0 is now also radio1. */
int ivtv_first_minor;
/* Callback for registering extensions */
int (*ivtv_ext_init)(struct ivtv *);
EXPORT_SYMBOL(ivtv_ext_init);
/* add your revision and whatnot here */
static const struct pci_device_id ivtv_pci_tbl[] = {
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
/* ivtv instance counter */
static atomic_t ivtv_instance = ATOMIC_INIT(0);
/* Parameter declarations */
static int cardtype[IVTV_MAX_CARDS];
static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int radio[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int i2c_clock_period[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static unsigned int cardtype_c = 1;
static unsigned int tuner_c = 1;
static int radio_c = 1;
static unsigned int i2c_clock_period_c = 1;
static char pal[] = "---";
static char secam[] = "--";
static char ntsc[] = "-";
/* Buffers */
/* DMA Buffers, Default size in MB allocated */
#define IVTV_DEFAULT_ENC_MPG_BUFFERS 4
#define IVTV_DEFAULT_ENC_YUV_BUFFERS 2
#define IVTV_DEFAULT_ENC_VBI_BUFFERS 1
/* Exception: size in kB for this stream (MB is overkill) */
#define IVTV_DEFAULT_ENC_PCM_BUFFERS 320
#define IVTV_DEFAULT_DEC_MPG_BUFFERS 1
#define IVTV_DEFAULT_DEC_YUV_BUFFERS 1
/* Exception: size in kB for this stream (MB is way overkill) */
#define IVTV_DEFAULT_DEC_VBI_BUFFERS 64
static int enc_mpg_buffers = IVTV_DEFAULT_ENC_MPG_BUFFERS;
static int enc_yuv_buffers = IVTV_DEFAULT_ENC_YUV_BUFFERS;
static int enc_vbi_buffers = IVTV_DEFAULT_ENC_VBI_BUFFERS;
static int enc_pcm_buffers = IVTV_DEFAULT_ENC_PCM_BUFFERS;
static int dec_mpg_buffers = IVTV_DEFAULT_DEC_MPG_BUFFERS;
static int dec_yuv_buffers = IVTV_DEFAULT_DEC_YUV_BUFFERS;
static int dec_vbi_buffers = IVTV_DEFAULT_DEC_VBI_BUFFERS;
static int ivtv_yuv_mode;
static int ivtv_yuv_threshold = -1;
static int ivtv_pci_latency = 1;
int ivtv_debug;
#ifdef CONFIG_VIDEO_ADV_DEBUG
int ivtv_fw_debug;
#endif
static int tunertype = -1;
static int newi2c = -1;
module_param_array(tuner, int, &tuner_c, 0644);
module_param_array(radio, int, &radio_c, 0644);
module_param_array(cardtype, int, &cardtype_c, 0644);
module_param_string(pal, pal, sizeof(pal), 0644);
module_param_string(secam, secam, sizeof(secam), 0644);
module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
module_param_named(debug,ivtv_debug, int, 0644);
#ifdef CONFIG_VIDEO_ADV_DEBUG
module_param_named(fw_debug, ivtv_fw_debug, int, 0644);
#endif
module_param(ivtv_pci_latency, int, 0644);
module_param(ivtv_yuv_mode, int, 0644);
module_param(ivtv_yuv_threshold, int, 0644);
module_param(ivtv_first_minor, int, 0644);
module_param(enc_mpg_buffers, int, 0644);
module_param(enc_yuv_buffers, int, 0644);
module_param(enc_vbi_buffers, int, 0644);
module_param(enc_pcm_buffers, int, 0644);
module_param(dec_mpg_buffers, int, 0644);
module_param(dec_yuv_buffers, int, 0644);
module_param(dec_vbi_buffers, int, 0644);
module_param(tunertype, int, 0644);
module_param(newi2c, int, 0644);
module_param_array(i2c_clock_period, int, &i2c_clock_period_c, 0644);
MODULE_PARM_DESC(tuner, "Tuner type selection,\n"
"\t\t\tsee tuner.h for values");
MODULE_PARM_DESC(radio,
"Enable or disable the radio. Use only if autodetection\n"
"\t\t\tfails. 0 = disable, 1 = enable");
MODULE_PARM_DESC(cardtype,
"Only use this option if your card is not detected properly.\n"
"\t\tSpecify card type:\n"
"\t\t\t 1 = WinTV PVR 250\n"
"\t\t\t 2 = WinTV PVR 350\n"
"\t\t\t 3 = WinTV PVR-150 or PVR-500\n"
"\t\t\t 4 = AVerMedia M179\n"
"\t\t\t 5 = YUAN MPG600/Kuroutoshikou iTVC16-STVLP\n"
"\t\t\t 6 = YUAN MPG160/Kuroutoshikou iTVC15-STVLP\n"
"\t\t\t 7 = YUAN PG600/DIAMONDMM PVR-550 (CX Falcon 2)\n"
"\t\t\t 8 = Adaptec AVC-2410\n"
"\t\t\t 9 = Adaptec AVC-2010\n"
"\t\t\t10 = NAGASE TRANSGEAR 5000TV\n"
"\t\t\t11 = AOpen VA2000MAX-STN6\n"
"\t\t\t12 = YUAN MPG600GR/Kuroutoshikou CX23416GYC-STVLP\n"
"\t\t\t13 = I/O Data GV-MVP/RX\n"
"\t\t\t14 = I/O Data GV-MVP/RX2E\n"
"\t\t\t15 = GOTVIEW PCI DVD\n"
"\t\t\t16 = GOTVIEW PCI DVD2 Deluxe\n"
"\t\t\t17 = Yuan MPC622\n"
"\t\t\t18 = Digital Cowboy DCT-MTVP1\n"
"\t\t\t19 = Yuan PG600V2/GotView PCI DVD Lite\n"
"\t\t\t20 = Club3D ZAP-TV1x01\n"
"\t\t\t21 = AverTV MCE 116 Plus\n"
"\t\t\t22 = ASUS Falcon2\n"
"\t\t\t23 = AverMedia PVR-150 Plus\n"
"\t\t\t24 = AverMedia EZMaker PCI Deluxe\n"
"\t\t\t25 = AverMedia M104 (not yet working)\n"
"\t\t\t26 = Buffalo PC-MV5L/PCI\n"
"\t\t\t27 = AVerMedia UltraTV 1500 MCE\n"
"\t\t\t28 = Sony VAIO Giga Pocket (ENX Kikyou)\n"
"\t\t\t 0 = Autodetect (default)\n"
"\t\t\t-1 = Ignore this card\n\t\t");
MODULE_PARM_DESC(pal, "Set PAL standard: BGH, DK, I, M, N, Nc, 60");
MODULE_PARM_DESC(secam, "Set SECAM standard: BGH, DK, L, LC");
MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J (Japan), K (South Korea)");
MODULE_PARM_DESC(tunertype,
"Specify tuner type:\n"
"\t\t\t 0 = tuner for PAL-B/G/H/D/K/I, SECAM-B/G/H/D/K/L/Lc\n"
"\t\t\t 1 = tuner for NTSC-M/J/K, PAL-M/N/Nc\n"
"\t\t\t-1 = Autodetect (default)\n");
MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: 0\n"
"\t\t\t 1/0x0001: warning\n"
"\t\t\t 2/0x0002: info\n"
"\t\t\t 4/0x0004: mailbox\n"
"\t\t\t 8/0x0008: ioctl\n"
"\t\t\t 16/0x0010: file\n"
"\t\t\t 32/0x0020: dma\n"
"\t\t\t 64/0x0040: irq\n"
"\t\t\t 128/0x0080: decoder\n"
"\t\t\t 256/0x0100: yuv\n"
"\t\t\t 512/0x0200: i2c\n"
"\t\t\t1024/0x0400: high volume\n");
#ifdef CONFIG_VIDEO_ADV_DEBUG
MODULE_PARM_DESC(fw_debug,
"Enable code for debugging firmware problems. Default: 0\n");
#endif
MODULE_PARM_DESC(ivtv_pci_latency,
"Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
"\t\t\tDefault: Yes");
MODULE_PARM_DESC(ivtv_yuv_mode,
"Specify the yuv playback mode:\n"
"\t\t\t0 = interlaced\n\t\t\t1 = progressive\n\t\t\t2 = auto\n"
"\t\t\tDefault: 0 (interlaced)");
MODULE_PARM_DESC(ivtv_yuv_threshold,
"If ivtv_yuv_mode is 2 (auto) then playback content as\n\t\tprogressive if src height <= ivtv_yuvthreshold\n"
"\t\t\tDefault: 480");
MODULE_PARM_DESC(enc_mpg_buffers,
"Encoder MPG Buffers (in MB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_MPG_BUFFERS));
MODULE_PARM_DESC(enc_yuv_buffers,
"Encoder YUV Buffers (in MB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_YUV_BUFFERS));
MODULE_PARM_DESC(enc_vbi_buffers,
"Encoder VBI Buffers (in MB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_VBI_BUFFERS));
MODULE_PARM_DESC(enc_pcm_buffers,
"Encoder PCM buffers (in kB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_PCM_BUFFERS));
MODULE_PARM_DESC(dec_mpg_buffers,
"Decoder MPG buffers (in MB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_MPG_BUFFERS));
MODULE_PARM_DESC(dec_yuv_buffers,
"Decoder YUV buffers (in MB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_YUV_BUFFERS));
MODULE_PARM_DESC(dec_vbi_buffers,
"Decoder VBI buffers (in kB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_VBI_BUFFERS));
MODULE_PARM_DESC(newi2c,
"Use new I2C implementation\n"
"\t\t\t-1 is autodetect, 0 is off, 1 is on\n"
"\t\t\tDefault is autodetect");
MODULE_PARM_DESC(i2c_clock_period,
"Period of SCL for the I2C bus controlled by the CX23415/6\n"
"\t\t\tMin: 10 usec (100 kHz), Max: 4500 usec (222 Hz)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_I2C_CLOCK_PERIOD));
MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first card");
MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");
MODULE_DESCRIPTION("CX23415/CX23416 driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(IVTV_VERSION);
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
{
struct ivtv *dev = container_of(work, struct ivtv, request_module_wk);
/* Make sure ivtv-alsa module is loaded */
request_module("ivtv-alsa");
/* Initialize ivtv-alsa for this instance of the cx18 device */
if (ivtv_ext_init != NULL)
ivtv_ext_init(dev);
}
static void request_modules(struct ivtv *dev)
{
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_modules(struct ivtv *dev)
{
flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask)
{
itv->irqmask &= ~mask;
write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK);
}
void ivtv_set_irq_mask(struct ivtv *itv, u32 mask)
{
itv->irqmask |= mask;
write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK);
}
int ivtv_set_output_mode(struct ivtv *itv, int mode)
{
int old_mode;
spin_lock(&itv->lock);
old_mode = itv->output_mode;
if (old_mode == 0)
itv->output_mode = old_mode = mode;
spin_unlock(&itv->lock);
return old_mode;
}
struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv)
{
switch (itv->output_mode) {
case OUT_MPG:
return &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
case OUT_YUV:
return &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
default:
return NULL;
}
}
int ivtv_waitq(wait_queue_head_t *waitq)
{
DEFINE_WAIT(wait);
prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
schedule();
finish_wait(waitq, &wait);
return signal_pending(current) ? -EINTR : 0;
}
/* Generic utility functions */
int ivtv_msleep_timeout(unsigned int msecs, int intr)
{
int timeout = msecs_to_jiffies(msecs);
do {
set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
timeout = schedule_timeout(timeout);
if (intr) {
int ret = signal_pending(current);
if (ret)
return ret;
}
} while (timeout);
return 0;
}
/* Release ioremapped memory */
static void ivtv_iounmap(struct ivtv *itv)
{
if (itv == NULL)
return;
/* Release registers memory */
if (itv->reg_mem != NULL) {
IVTV_DEBUG_INFO("releasing reg_mem\n");
iounmap(itv->reg_mem);
itv->reg_mem = NULL;
}
/* Release io memory */
if (itv->has_cx23415 && itv->dec_mem != NULL) {
IVTV_DEBUG_INFO("releasing dec_mem\n");
iounmap(itv->dec_mem);
}
itv->dec_mem = NULL;
/* Release io memory */
if (itv->enc_mem != NULL) {
IVTV_DEBUG_INFO("releasing enc_mem\n");
iounmap(itv->enc_mem);
itv->enc_mem = NULL;
}
}
/* Hauppauge card? get values from tveeprom */
void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv)
{
u8 eedata[256];
itv->i2c_client.addr = 0xA0 >> 1;
tveeprom_read(&itv->i2c_client, eedata, sizeof(eedata));
tveeprom_hauppauge_analog(tv, eedata);
}
static void ivtv_process_eeprom(struct ivtv *itv)
{
struct tveeprom tv;
int pci_slot = PCI_SLOT(itv->pdev->devfn);
ivtv_read_eeprom(itv, &tv);
/* Many thanks to Steven Toth from Hauppauge for providing the
model numbers */
switch (tv.model) {
/* In a few cases the PCI subsystem IDs do not correctly
identify the card. A better method is to check the
model number from the eeprom instead. */
case 30012 ... 30039: /* Low profile PVR250 */
case 32000 ... 32999:
case 48000 ... 48099: /* 48??? range are PVR250s with a cx23415 */
case 48400 ... 48599:
itv->card = ivtv_get_card(IVTV_CARD_PVR_250);
break;
case 48100 ... 48399:
case 48600 ... 48999:
itv->card = ivtv_get_card(IVTV_CARD_PVR_350);
break;
case 23000 ... 23999: /* PVR500 */
case 25000 ... 25999: /* Low profile PVR150 */
case 26000 ... 26999: /* Regular PVR150 */
itv->card = ivtv_get_card(IVTV_CARD_PVR_150);
break;
case 0:
IVTV_ERR("Invalid EEPROM\n");
return;
default:
IVTV_ERR("Unknown model %d, defaulting to PVR-150\n", tv.model);
itv->card = ivtv_get_card(IVTV_CARD_PVR_150);
break;
}
switch (tv.model) {
/* Old style PVR350 (with an saa7114) uses this input for
the tuner. */
case 48254:
itv->card = ivtv_get_card(IVTV_CARD_PVR_350_V1);
break;
default:
break;
}
itv->v4l2_cap = itv->card->v4l2_capabilities;
itv->card_name = itv->card->name;
itv->card_i2c = itv->card->i2c;
/* If this is a PVR500 then it should be possible to detect whether it is the
first or second unit by looking at the subsystem device ID: is bit 4 is
set, then it is the second unit (according to info from Hauppauge).
However, while this works for most cards, I have seen a few PVR500 cards
where both units have the same subsystem ID.
So instead I look at the reported 'PCI slot' (which is the slot on the PVR500
PCI bridge) and if it is 8, then it is assumed to be the first unit, otherwise
it is the second unit. It is possible that it is a different slot when ivtv is
used in Xen, in that case I ignore this card here. The worst that can happen
is that the card presents itself with a non-working radio device.
This detection is needed since the eeprom reports incorrectly that a radio is
present on the second unit. */
if (tv.model / 1000 == 23) {
static const struct ivtv_card_tuner_i2c ivtv_i2c_radio = {
.radio = { 0x60, I2C_CLIENT_END },
.demod = { 0x43, I2C_CLIENT_END },
.tv = { 0x61, I2C_CLIENT_END },
};
itv->card_name = "WinTV PVR 500";
itv->card_i2c = &ivtv_i2c_radio;
if (pci_slot == 8 || pci_slot == 9) {
int is_first = (pci_slot & 1) == 0;
itv->card_name = is_first ? "WinTV PVR 500 (unit #1)" :
"WinTV PVR 500 (unit #2)";
if (!is_first) {
IVTV_INFO("Correcting tveeprom data: no radio present on second unit\n");
tv.has_radio = 0;
}
}
}
IVTV_INFO("Autodetected %s\n", itv->card_name);
switch (tv.tuner_hauppauge_model) {
case 85:
case 99:
case 112:
itv->pvr150_workaround = 1;
break;
default:
break;
}
if (tv.tuner_type == TUNER_ABSENT)
IVTV_ERR("tveeprom cannot autodetect tuner!\n");
if (itv->options.tuner == -1)
itv->options.tuner = tv.tuner_type;
if (itv->options.radio == -1)
itv->options.radio = (tv.has_radio != 0);
/* only enable newi2c if an IR blaster is present */
if (itv->options.newi2c == -1 && tv.has_ir) {
itv->options.newi2c = (tv.has_ir & 4) ? 1 : 0;
if (itv->options.newi2c) {
IVTV_INFO("Reopen i2c bus for IR-blaster support\n");
exit_ivtv_i2c(itv);
init_ivtv_i2c(itv);
}
}
if (itv->std != 0)
/* user specified tuner standard */
return;
/* autodetect tuner standard */
if (tv.tuner_formats & V4L2_STD_PAL) {
IVTV_DEBUG_INFO("PAL tuner detected\n");
itv->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
} else if (tv.tuner_formats & V4L2_STD_NTSC) {
IVTV_DEBUG_INFO("NTSC tuner detected\n");
itv->std |= V4L2_STD_NTSC_M;
} else if (tv.tuner_formats & V4L2_STD_SECAM) {
IVTV_DEBUG_INFO("SECAM tuner detected\n");
itv->std |= V4L2_STD_SECAM_L;
} else {
IVTV_INFO("No tuner detected, default to NTSC-M\n");
itv->std |= V4L2_STD_NTSC_M;
}
}
static v4l2_std_id ivtv_parse_std(struct ivtv *itv)
{
switch (pal[0]) {
case '6':
tunertype = 0;
return V4L2_STD_PAL_60;
case 'b':
case 'B':
case 'g':
case 'G':
case 'h':
case 'H':
tunertype = 0;
return V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
case 'n':
case 'N':
tunertype = 1;
if (pal[1] == 'c' || pal[1] == 'C')
return V4L2_STD_PAL_Nc;
return V4L2_STD_PAL_N;
case 'i':
case 'I':
tunertype = 0;
return V4L2_STD_PAL_I;
case 'd':
case 'D':
case 'k':
case 'K':
tunertype = 0;
return V4L2_STD_PAL_DK;
case 'M':
case 'm':
tunertype = 1;
return V4L2_STD_PAL_M;
case '-':
break;
default:
IVTV_WARN("pal= argument not recognised\n");
return 0;
}
switch (secam[0]) {
case 'b':
case 'B':
case 'g':
case 'G':
case 'h':
case 'H':
tunertype = 0;
return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H;
case 'd':
case 'D':
case 'k':
case 'K':
tunertype = 0;
return V4L2_STD_SECAM_DK;
case 'l':
case 'L':
tunertype = 0;
if (secam[1] == 'C' || secam[1] == 'c')
return V4L2_STD_SECAM_LC;
return V4L2_STD_SECAM_L;
case '-':
break;
default:
IVTV_WARN("secam= argument not recognised\n");
return 0;
}
switch (ntsc[0]) {
case 'm':
case 'M':
tunertype = 1;
return V4L2_STD_NTSC_M;
case 'j':
case 'J':
tunertype = 1;
return V4L2_STD_NTSC_M_JP;
case 'k':
case 'K':
tunertype = 1;
return V4L2_STD_NTSC_M_KR;
case '-':
break;
default:
IVTV_WARN("ntsc= argument not recognised\n");
return 0;
}
/* no match found */
return 0;
}
static void ivtv_process_options(struct ivtv *itv)
{
const char *chipname;
int i, j;
itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers * 1024;
itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers * 1024;
itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers * 1024;
itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers;
itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers * 1024;
itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers * 1024;
itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers;
itv->options.cardtype = cardtype[itv->instance];
itv->options.tuner = tuner[itv->instance];
itv->options.radio = radio[itv->instance];
itv->options.i2c_clock_period = i2c_clock_period[itv->instance];
if (itv->options.i2c_clock_period == -1)
itv->options.i2c_clock_period = IVTV_DEFAULT_I2C_CLOCK_PERIOD;
else if (itv->options.i2c_clock_period < 10)
itv->options.i2c_clock_period = 10;
else if (itv->options.i2c_clock_period > 4500)
itv->options.i2c_clock_period = 4500;
itv->options.newi2c = newi2c;
if (tunertype < -1 || tunertype > 1) {
IVTV_WARN("Invalid tunertype argument, will autodetect instead\n");
tunertype = -1;
}
itv->std = ivtv_parse_std(itv);
if (itv->std == 0 && tunertype >= 0)
itv->std = tunertype ? V4L2_STD_MN : (V4L2_STD_ALL & ~V4L2_STD_MN);
itv->has_cx23415 = (itv->pdev->device == PCI_DEVICE_ID_IVTV15);
chipname = itv->has_cx23415 ? "cx23415" : "cx23416";
if (itv->options.cardtype == -1) {
IVTV_INFO("Ignore card (detected %s based chip)\n", chipname);
return;
}
if ((itv->card = ivtv_get_card(itv->options.cardtype - 1))) {
IVTV_INFO("User specified %s card (detected %s based chip)\n",
itv->card->name, chipname);
} else if (itv->options.cardtype != 0) {
IVTV_ERR("Unknown user specified type, trying to autodetect card\n");
}
if (itv->card == NULL) {
if (itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE ||
itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT1 ||
itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT2) {
itv->card = ivtv_get_card(itv->has_cx23415 ? IVTV_CARD_PVR_350 : IVTV_CARD_PVR_150);
IVTV_INFO("Autodetected Hauppauge card (%s based)\n",
chipname);
}
}
if (itv->card == NULL) {
for (i = 0; (itv->card = ivtv_get_card(i)); i++) {
if (itv->card->pci_list == NULL)
continue;
for (j = 0; itv->card->pci_list[j].device; j++) {
if (itv->pdev->device !=
itv->card->pci_list[j].device)
continue;
if (itv->pdev->subsystem_vendor !=
itv->card->pci_list[j].subsystem_vendor)
continue;
if (itv->pdev->subsystem_device !=
itv->card->pci_list[j].subsystem_device)
continue;
IVTV_INFO("Autodetected %s card (%s based)\n",
itv->card->name, chipname);
goto done;
}
}
}
done:
if (itv->card == NULL) {
itv->card = ivtv_get_card(IVTV_CARD_PVR_150);
IVTV_ERR("Unknown card: vendor/device: [%04x:%04x]\n",
itv->pdev->vendor, itv->pdev->device);
IVTV_ERR(" subsystem vendor/device: [%04x:%04x]\n",
itv->pdev->subsystem_vendor, itv->pdev->subsystem_device);
IVTV_ERR(" %s based\n", chipname);
IVTV_ERR("Defaulting to %s card\n", itv->card->name);
IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
IVTV_ERR("card you have to the linux-media mailinglist (www.linuxtv.org)\n");
IVTV_ERR("Prefix your subject line with [UNKNOWN IVTV CARD].\n");
}
itv->v4l2_cap = itv->card->v4l2_capabilities;
itv->card_name = itv->card->name;
itv->card_i2c = itv->card->i2c;
}
/* Precondition: the ivtv structure has been memset to 0. Only
the dev and num fields have been filled in.
No assumptions on the card type may be made here (see ivtv_init_struct2
for that).
*/
static int ivtv_init_struct1(struct ivtv *itv)
{
itv->base_addr = pci_resource_start(itv->pdev, 0);
itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
mutex_init(&itv->serialize_lock);
mutex_init(&itv->i2c_bus_lock);
mutex_init(&itv->udma.lock);
spin_lock_init(&itv->lock);
spin_lock_init(&itv->dma_reg_lock);
kthread_init_worker(&itv->irq_worker);
itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
"%s", itv->v4l2_dev.name);
if (IS_ERR(itv->irq_worker_task)) {
IVTV_ERR("Could not create ivtv task\n");
return -1;
}
/* must use the FIFO scheduler as it is realtime sensitive */
sched_set_fifo(itv->irq_worker_task);
kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
init_waitqueue_head(&itv->eos_waitq);
init_waitqueue_head(&itv->event_waitq);
init_waitqueue_head(&itv->vsync_waitq);
init_waitqueue_head(&itv->dma_waitq);
timer_setup(&itv->dma_timer, ivtv_unfinished_dma, 0);
itv->cur_dma_stream = -1;
itv->cur_pio_stream = -1;
/* Ctrls */
itv->speed = 1000;
/* VBI */
itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
itv->vbi.sliced_in = &itv->vbi.in.fmt.sliced;
/* Init the sg table for osd/yuv output */
sg_init_table(itv->udma.SGlist, IVTV_DMA_SG_OSD_ENT);
/* OSD */
itv->osd_global_alpha_state = 1;
itv->osd_global_alpha = 255;
/* YUV */
atomic_set(&itv->yuv_info.next_dma_frame, -1);
itv->yuv_info.lace_mode = ivtv_yuv_mode;
itv->yuv_info.lace_threshold = ivtv_yuv_threshold;
itv->yuv_info.max_frames_buffered = 3;
itv->yuv_info.track_osd = 1;
return 0;
}
/* Second initialization part. Here the card type has been
autodetected. */
static void ivtv_init_struct2(struct ivtv *itv)
{
int i;
for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
if (itv->card->video_inputs[i].video_type == 0)
break;
itv->nof_inputs = i;
for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
if (itv->card->audio_inputs[i].audio_type == 0)
break;
itv->nof_audio_inputs = i;
if (itv->card->hw_all & IVTV_HW_CX25840) {
itv->vbi.sliced_size = 288; /* multiple of 16, real size = 284 */
} else {
itv->vbi.sliced_size = 64; /* multiple of 16, real size = 52 */
}
/* Find tuner input */
for (i = 0; i < itv->nof_inputs; i++) {
if (itv->card->video_inputs[i].video_type ==
IVTV_CARD_INPUT_VID_TUNER)
break;
}
if (i >= itv->nof_inputs)
i = 0;
itv->active_input = i;
itv->audio_input = itv->card->video_inputs[i].audio_index;
}
static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
u16 cmd;
unsigned char pci_latency;
IVTV_DEBUG_INFO("Enabling pci device\n");
if (pci_enable_device(pdev)) {
IVTV_ERR("Can't enable device!\n");
return -EIO;
}
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
IVTV_ERR("No suitable DMA available.\n");
return -EIO;
}
if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) {
IVTV_ERR("Cannot request encoder memory region.\n");
return -EIO;
}
if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET,
IVTV_REG_SIZE, "ivtv registers")) {
IVTV_ERR("Cannot request register memory region.\n");
release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
return -EIO;
}
if (itv->has_cx23415 &&
!request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE, "ivtv decoder")) {
IVTV_ERR("Cannot request decoder memory region.\n");
release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
return -EIO;
}
/* Check for bus mastering */
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (!(cmd & PCI_COMMAND_MASTER)) {
IVTV_DEBUG_INFO("Attempting to enable Bus Mastering\n");
pci_set_master(pdev);
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (!(cmd & PCI_COMMAND_MASTER)) {
IVTV_ERR("Bus Mastering is not enabled\n");
if (itv->has_cx23415)
release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
return -ENXIO;
}
}
IVTV_DEBUG_INFO("Bus Mastering Enabled.\n");
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 64 && ivtv_pci_latency) {
IVTV_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
pci_latency);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
}
/* This config space value relates to DMA latencies. The
default value 0x8080 is too low however and will lead
to DMA errors. 0xffff is the max value which solves
these problems. */
pci_write_config_dword(pdev, 0x40, 0xffff);
IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
pdev->device, pdev->revision, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev->irq, pci_latency, (u64)itv->base_addr);
return 0;
}
static void ivtv_load_and_init_modules(struct ivtv *itv)
{
u32 hw = itv->card->hw_all;
unsigned i;
/* check which i2c devices are actually found */
for (i = 0; i < 32; i++) {
u32 device = BIT(i);
if (!(device & hw))
continue;
if (device == IVTV_HW_GPIO || device == IVTV_HW_TVEEPROM) {
/* GPIO and TVEEPROM do not use i2c probing */
itv->hw_flags |= device;
continue;
}
if (ivtv_i2c_register(itv, i) == 0)
itv->hw_flags |= device;
}
/* probe for legacy IR controllers that aren't in card definitions */
if ((itv->hw_flags & IVTV_HW_IR_ANY) == 0)
ivtv_i2c_new_ir_legacy(itv);
if (itv->card->hw_all & IVTV_HW_CX25840)
itv->sd_video = ivtv_find_hw(itv, IVTV_HW_CX25840);
else if (itv->card->hw_all & IVTV_HW_SAA717X)
itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA717X);
else if (itv->card->hw_all & IVTV_HW_SAA7114)
itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7114);
else
itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7115);
itv->sd_audio = ivtv_find_hw(itv, itv->card->hw_audio_ctrl);
itv->sd_muxer = ivtv_find_hw(itv, itv->card->hw_muxer);
hw = itv->hw_flags;
if (itv->card->type == IVTV_CARD_CX23416GYC) {
/* Several variations of this card exist, detect which card
type should be used. */
if ((hw & (IVTV_HW_UPD64031A | IVTV_HW_UPD6408X)) == 0)
itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGRYCS);
else if ((hw & IVTV_HW_UPD64031A) == 0)
itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGR);
}
else if (itv->card->type == IVTV_CARD_GV_MVPRX ||
itv->card->type == IVTV_CARD_GV_MVPRX2E) {
/* The crystal frequency of GVMVPRX is 24.576MHz */
v4l2_subdev_call(itv->sd_video, video, s_crystal_freq,
SAA7115_FREQ_24_576_MHZ, SAA7115_FREQ_FL_UCGC);
}
if (hw & IVTV_HW_CX25840) {
itv->vbi.raw_decoder_line_size = 1444;
itv->vbi.raw_decoder_sav_odd_field = 0x20;
itv->vbi.raw_decoder_sav_even_field = 0x60;
itv->vbi.sliced_decoder_line_size = 272;
itv->vbi.sliced_decoder_sav_odd_field = 0xB0;
itv->vbi.sliced_decoder_sav_even_field = 0xF0;
}
if (hw & IVTV_HW_SAA711X) {
/* determine the exact saa711x model */
itv->hw_flags &= ~IVTV_HW_SAA711X;
if (strstr(itv->sd_video->name, "saa7114")) {
itv->hw_flags |= IVTV_HW_SAA7114;
/* VBI is not yet supported by the saa7114 driver. */
itv->v4l2_cap &= ~(V4L2_CAP_SLICED_VBI_CAPTURE|V4L2_CAP_VBI_CAPTURE);
} else {
itv->hw_flags |= IVTV_HW_SAA7115;
}
itv->vbi.raw_decoder_line_size = 1443;
itv->vbi.raw_decoder_sav_odd_field = 0x25;
itv->vbi.raw_decoder_sav_even_field = 0x62;
itv->vbi.sliced_decoder_line_size = 51;
itv->vbi.sliced_decoder_sav_odd_field = 0xAB;
itv->vbi.sliced_decoder_sav_even_field = 0xEC;
}
if (hw & IVTV_HW_SAA717X) {
itv->vbi.raw_decoder_line_size = 1443;
itv->vbi.raw_decoder_sav_odd_field = 0x25;
itv->vbi.raw_decoder_sav_even_field = 0x62;
itv->vbi.sliced_decoder_line_size = 51;
itv->vbi.sliced_decoder_sav_odd_field = 0xAB;
itv->vbi.sliced_decoder_sav_even_field = 0xEC;
}
}
static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
int retval = 0;
int vbi_buf_size;
struct ivtv *itv;
itv = kzalloc(sizeof(struct ivtv), GFP_KERNEL);
if (itv == NULL)
return -ENOMEM;
itv->pdev = pdev;
itv->instance = v4l2_device_set_name(&itv->v4l2_dev, "ivtv",
&ivtv_instance);
retval = v4l2_device_register(&pdev->dev, &itv->v4l2_dev);
if (retval) {
kfree(itv);
return retval;
}
IVTV_INFO("Initializing card %d\n", itv->instance);
ivtv_process_options(itv);
if (itv->options.cardtype == -1) {
retval = -ENODEV;
goto err;
}
if (ivtv_init_struct1(itv)) {
retval = -ENOMEM;
goto err;
}
retval = cx2341x_handler_init(&itv->cxhdl, 50);
if (retval)
goto err;
itv->v4l2_dev.ctrl_handler = &itv->cxhdl.hdl;
itv->cxhdl.ops = &ivtv_cxhdl_ops;
itv->cxhdl.priv = itv;
itv->cxhdl.func = ivtv_api_func;
IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
/* PCI Device Setup */
retval = ivtv_setup_pci(itv, pdev, pci_id);
if (retval == -EIO)
goto free_worker;
if (retval == -ENXIO)
goto free_mem;
/* map io memory */
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
if (itv->has_cx23415) {
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
IVTV_ERR("Each capture card with a CX23415 needs 8 MB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
}
else {
itv->dec_mem = itv->enc_mem;
}
/* map registers memory */
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_io;
}
retval = ivtv_gpio_init(itv);
if (retval)
goto free_io;
/* active i2c */
IVTV_DEBUG_INFO("activating i2c...\n");
if (init_ivtv_i2c(itv)) {
IVTV_ERR("Could not initialize i2c\n");
goto free_io;
}
if (itv->card->hw_all & IVTV_HW_TVEEPROM) {
/* Based on the model number the cardtype may be changed.
The PCI IDs are not always reliable. */
ivtv_process_eeprom(itv);
}
if (itv->card->comment)
IVTV_INFO("%s", itv->card->comment);
if (itv->card->v4l2_capabilities == 0) {
/* card was detected but is not supported */
retval = -ENODEV;
goto free_i2c;
}
if (itv->std == 0) {
itv->std = V4L2_STD_NTSC_M;
}
if (itv->options.tuner == -1) {
int i;
for (i = 0; i < IVTV_CARD_MAX_TUNERS; i++) {
if ((itv->std & itv->card->tuners[i].std) == 0)
continue;
itv->options.tuner = itv->card->tuners[i].tuner;
break;
}
}
/* if no tuner was found, then pick the first tuner in the card list */
if (itv->options.tuner == -1 && itv->card->tuners[0].std) {
itv->std = itv->card->tuners[0].std;
if (itv->std & V4L2_STD_PAL)
itv->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
else if (itv->std & V4L2_STD_NTSC)
itv->std = V4L2_STD_NTSC_M;
else if (itv->std & V4L2_STD_SECAM)
itv->std = V4L2_STD_SECAM_L;
itv->options.tuner = itv->card->tuners[0].tuner;
}
if (itv->options.radio == -1)
itv->options.radio = (itv->card->radio_input.audio_type != 0);
/* The card is now fully identified, continue with card-specific
initialization. */
ivtv_init_struct2(itv);
ivtv_load_and_init_modules(itv);
if (itv->std & V4L2_STD_525_60) {
itv->is_60hz = 1;
itv->is_out_60hz = 1;
} else {
itv->is_50hz = 1;
itv->is_out_50hz = 1;
}
itv->yuv_info.osd_full_w = 720;
itv->yuv_info.osd_full_h = itv->is_out_50hz ? 576 : 480;
itv->yuv_info.v4l2_src_w = itv->yuv_info.osd_full_w;
itv->yuv_info.v4l2_src_h = itv->yuv_info.osd_full_h;
cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz);
itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_MPG] = 0x08000;
itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_PCM] = 0x01200;
itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_MPG] = 0x10000;
itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_YUV] = 0x10000;
itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_YUV] = 0x08000;
/* Setup VBI Raw Size. Should be big enough to hold PAL.
It is possible to switch between PAL and NTSC, so we need to
take the largest size here. */
/* 1456 is multiple of 16, real size = 1444 */
itv->vbi.raw_size = 1456;
/* We use a buffer size of 1/2 of the total size needed for a
frame. This is actually very useful, since we now receive
a field at a time and that makes 'compressing' the raw data
down to size by stripping off the SAV codes a lot easier.
Note: having two different buffer sizes prevents standard
switching on the fly. We need to find a better solution... */
vbi_buf_size = itv->vbi.raw_size * (itv->is_60hz ? 24 : 36) / 2;
itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_VBI] = vbi_buf_size;
itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_VBI] = sizeof(struct v4l2_sliced_vbi_data) * 36;
if (itv->options.radio > 0)
itv->v4l2_cap |= V4L2_CAP_RADIO;
if (itv->options.tuner > -1) {
struct tuner_setup setup;
setup.addr = ADDR_UNSET;
setup.type = itv->options.tuner;
setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
if (itv->options.radio > 0)
setup.mode_mask |= T_RADIO;
setup.tuner_callback = (setup.type == TUNER_XC2028) ?
ivtv_reset_tuner_gpio : NULL;
ivtv_call_all(itv, tuner, s_type_addr, &setup);
if (setup.type == TUNER_XC2028) {
static struct xc2028_ctrl ctrl = {
.fname = XC2028_DEFAULT_FIRMWARE,
.max_len = 64,
};
struct v4l2_priv_tun_config cfg = {
.tuner = itv->options.tuner,
.priv = &ctrl,
};
ivtv_call_all(itv, tuner, s_config, &cfg);
}
}
/* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
are not. */
itv->tuner_std = itv->std;
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
struct v4l2_ctrl_handler *hdl = itv->v4l2_dev.ctrl_handler;
itv->ctrl_pts = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops,
V4L2_CID_MPEG_VIDEO_DEC_PTS, 0, 0, 0, 0);
itv->ctrl_frame = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops,
V4L2_CID_MPEG_VIDEO_DEC_FRAME, 0, 0, 0, 0);
/* Note: V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO is not supported,
mask that menu item. */
itv->ctrl_audio_playback =
v4l2_ctrl_new_std_menu(hdl, &ivtv_hdl_out_ops,
V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO,
1 << V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO);
itv->ctrl_audio_multilingual_playback =
v4l2_ctrl_new_std_menu(hdl, &ivtv_hdl_out_ops,
V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO,
1 << V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT);
if (hdl->error) {
retval = hdl->error;
goto free_i2c;
}
v4l2_ctrl_cluster(2, &itv->ctrl_pts);
v4l2_ctrl_cluster(2, &itv->ctrl_audio_playback);
ivtv_call_all(itv, video, s_std_output, itv->std);
/* Turn off the output signal. The mpeg decoder is not yet
active so without this you would get a green image until the
mpeg decoder becomes active. */
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
}
/* clear interrupt mask, effectively disabling interrupts */
ivtv_set_irq_mask(itv, 0xffffffff);
/* Register IRQ */
retval = request_irq(itv->pdev->irq, ivtv_irq_handler,
IRQF_SHARED, itv->v4l2_dev.name, (void *)itv);
if (retval) {
IVTV_ERR("Failed to register irq %d\n", retval);
goto free_i2c;
}
retval = ivtv_streams_setup(itv);
if (retval) {
IVTV_ERR("Error %d setting up streams\n", retval);
goto free_irq;
}
retval = ivtv_streams_register(itv);
if (retval) {
IVTV_ERR("Error %d registering devices\n", retval);
goto free_streams;
}
IVTV_INFO("Initialized card: %s\n", itv->card_name);
/* Load ivtv submodules (ivtv-alsa) */
request_modules(itv);
return 0;
free_streams:
ivtv_streams_cleanup(itv);
free_irq:
free_irq(itv->pdev->irq, (void *)itv);
free_i2c:
v4l2_ctrl_handler_free(&itv->cxhdl.hdl);
exit_ivtv_i2c(itv);
free_io:
ivtv_iounmap(itv);
free_mem:
release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (itv->has_cx23415)
release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
free_worker:
kthread_stop(itv->irq_worker_task);
err:
if (retval == 0)
retval = -ENODEV;
IVTV_ERR("Error %d on initialization\n", retval);
v4l2_device_unregister(&itv->v4l2_dev);
kfree(itv);
return retval;
}
int ivtv_init_on_first_open(struct ivtv *itv)
{
struct v4l2_frequency vf;
/* Needed to call ioctls later */
struct ivtv_open_id fh;
int fw_retry_count = 3;
int video_input;
fh.itv = itv;
fh.type = IVTV_ENC_STREAM_TYPE_MPG;
if (test_bit(IVTV_F_I_FAILED, &itv->i_flags))
return -ENXIO;
if (test_and_set_bit(IVTV_F_I_INITED, &itv->i_flags))
return 0;
while (--fw_retry_count > 0) {
/* load firmware */
if (ivtv_firmware_init(itv) == 0)
break;
if (fw_retry_count > 1)
IVTV_WARN("Retry loading firmware\n");
}
if (fw_retry_count == 0) {
set_bit(IVTV_F_I_FAILED, &itv->i_flags);
return -ENXIO;
}
/* Try and get firmware versions */
IVTV_DEBUG_INFO("Getting firmware version..\n");
ivtv_firmware_versions(itv);
if (itv->card->hw_all & IVTV_HW_CX25840)
v4l2_subdev_call(itv->sd_video, core, load_fw);
vf.tuner = 0;
vf.type = V4L2_TUNER_ANALOG_TV;
vf.frequency = 6400; /* the tuner 'baseline' frequency */
/* Set initial frequency. For PAL/SECAM broadcasts no
'default' channel exists AFAIK. */
if (itv->std == V4L2_STD_NTSC_M_JP) {
vf.frequency = 1460; /* ch. 1 91250*16/1000 */
}
else if (itv->std & V4L2_STD_NTSC_M) {
vf.frequency = 1076; /* ch. 4 67250*16/1000 */
}
video_input = itv->active_input;
itv->active_input++; /* Force update of input */
ivtv_s_input(NULL, &fh, video_input);
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
in one place. */
itv->std++; /* Force full standard initialization */
itv->std_out = itv->std;
ivtv_s_frequency(NULL, &fh, &vf);
if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
/* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
the mpeg decoder so now the saa7127 receives a proper
signal. */
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
ivtv_init_mpeg_decoder(itv);
}
/* On a cx23416 this seems to be able to enable DMA to the chip? */
if (!itv->has_cx23415)
write_reg_sync(0x03, IVTV_REG_DMACONTROL);
ivtv_s_std_enc(itv, itv->tuner_std);
/* Default interrupts enabled. For the PVR350 this includes the
decoder VSYNC interrupt, which is always on. It is not only used
during decoding but also by the OSD.
Some old PVR250 cards had a cx23415, so testing for that is too
general. Instead test if the card has video output capability. */
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
ivtv_set_osd_alpha(itv);
ivtv_s_std_dec(itv, itv->tuner_std);
} else {
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
}
/* Setup initial controls */
cx2341x_handler_setup(&itv->cxhdl);
return 0;
}
static void ivtv_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
struct ivtv *itv = to_ivtv(v4l2_dev);
int i;
IVTV_DEBUG_INFO("Removing card\n");
flush_request_modules(itv);
if (test_bit(IVTV_F_I_INITED, &itv->i_flags)) {
/* Stop all captures */
IVTV_DEBUG_INFO("Stopping all streams\n");
if (atomic_read(&itv->capturing) > 0)
ivtv_stop_all_captures(itv);
/* Stop all decoding */
IVTV_DEBUG_INFO("Stopping decoding\n");
/* Turn off the TV-out */
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
if (atomic_read(&itv->decoding) > 0) {
int type;
if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
type = IVTV_DEC_STREAM_TYPE_YUV;
else
type = IVTV_DEC_STREAM_TYPE_MPG;
ivtv_stop_v4l2_decode_stream(&itv->streams[type],
V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY, 0);
}
ivtv_halt_firmware(itv);
}
/* Interrupts */
ivtv_set_irq_mask(itv, 0xffffffff);
timer_shutdown_sync(&itv->dma_timer);
/* Kill irq worker */
kthread_flush_worker(&itv->irq_worker);
kthread_stop(itv->irq_worker_task);
ivtv_streams_cleanup(itv);
ivtv_udma_free(itv);
v4l2_ctrl_handler_free(&itv->cxhdl.hdl);
exit_ivtv_i2c(itv);
free_irq(itv->pdev->irq, (void *)itv);
ivtv_iounmap(itv);
release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (itv->has_cx23415)
release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
pci_disable_device(itv->pdev);
for (i = 0; i < IVTV_VBI_FRAMES; i++)
kfree(itv->vbi.sliced_mpeg_data[i]);
pr_info("Removed %s\n", itv->card_name);
v4l2_device_unregister(&itv->v4l2_dev);
kfree(itv);
}
/* define a pci_driver for card detection */
static struct pci_driver ivtv_pci_driver = {
.name = "ivtv",
.id_table = ivtv_pci_tbl,
.probe = ivtv_probe,
.remove = ivtv_remove,
};
static int __init module_start(void)
{
pr_info("Start initialization, version %s\n", IVTV_VERSION);
/* Validate parameters */
if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) {
pr_err("Exiting, ivtv_first_minor must be between 0 and %d\n",
IVTV_MAX_CARDS - 1);
return -1;
}
if (ivtv_debug < 0 || ivtv_debug > 2047) {
ivtv_debug = 0;
pr_info("Debug value must be >= 0 and <= 2047\n");
}
if (pci_register_driver(&ivtv_pci_driver)) {
pr_err("Error detecting PCI card\n");
return -ENODEV;
}
pr_info("End initialization\n");
return 0;
}
static void __exit module_cleanup(void)
{
pci_unregister_driver(&ivtv_pci_driver);
}
/* Note: These symbols are exported because they are used by the ivtvfb
framebuffer module and an infrared module for the IR-blaster. */
EXPORT_SYMBOL(ivtv_set_irq_mask);
EXPORT_SYMBOL(ivtv_api);
EXPORT_SYMBOL(ivtv_vapi);
EXPORT_SYMBOL(ivtv_vapi_result);
EXPORT_SYMBOL(ivtv_clear_irq_mask);
EXPORT_SYMBOL(ivtv_debug);
#ifdef CONFIG_VIDEO_ADV_DEBUG
EXPORT_SYMBOL(ivtv_fw_debug);
#endif
EXPORT_SYMBOL(ivtv_reset_ir_gpio);
EXPORT_SYMBOL(ivtv_udma_setup);
EXPORT_SYMBOL(ivtv_udma_unmap);
EXPORT_SYMBOL(ivtv_udma_alloc);
EXPORT_SYMBOL(ivtv_udma_prepare);
EXPORT_SYMBOL(ivtv_init_on_first_open);
EXPORT_SYMBOL(ivtv_firmware_check);
module_init(module_start);
module_exit(module_cleanup);
| linux-master | drivers/media/pci/ivtv/ivtv-driver.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-ioctl.h"
#include "ivtv-controls.h"
#include "ivtv-mailbox.h"
static int ivtv_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
{
struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
/* First try to allocate sliced VBI buffers if needed. */
if (fmt && itv->vbi.sliced_mpeg_data[0] == NULL) {
int i;
for (i = 0; i < IVTV_VBI_FRAMES; i++) {
/* Yuck, hardcoded. Needs to be a define */
itv->vbi.sliced_mpeg_data[i] = kmalloc(2049, GFP_KERNEL);
if (itv->vbi.sliced_mpeg_data[i] == NULL) {
while (--i >= 0) {
kfree(itv->vbi.sliced_mpeg_data[i]);
itv->vbi.sliced_mpeg_data[i] = NULL;
}
return -ENOMEM;
}
}
}
itv->vbi.insert_mpeg = fmt;
if (itv->vbi.insert_mpeg == 0) {
return 0;
}
/* Need sliced data for mpeg insertion */
if (ivtv_get_service_set(itv->vbi.sliced_in) == 0) {
if (itv->is_60hz)
itv->vbi.sliced_in->service_set = V4L2_SLICED_CAPTION_525;
else
itv->vbi.sliced_in->service_set = V4L2_SLICED_WSS_625;
ivtv_expand_service_set(itv->vbi.sliced_in, itv->is_50hz);
}
return 0;
}
static int ivtv_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
{
struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
int is_mpeg1 = val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
/* fix videodecoder resolution */
format.format.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
format.format.height = cxhdl->height;
format.format.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(itv->sd_video, pad, set_fmt, NULL, &format);
return 0;
}
static int ivtv_s_audio_sampling_freq(struct cx2341x_handler *cxhdl, u32 idx)
{
static const u32 freqs[3] = { 44100, 48000, 32000 };
struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
/* The audio clock of the digitizer must match the codec sample
rate otherwise you get some very strange effects. */
if (idx < ARRAY_SIZE(freqs))
ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]);
return 0;
}
static int ivtv_s_audio_mode(struct cx2341x_handler *cxhdl, u32 val)
{
struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
itv->dualwatch_stereo_mode = val;
return 0;
}
const struct cx2341x_handler_ops ivtv_cxhdl_ops = {
.s_audio_mode = ivtv_s_audio_mode,
.s_audio_sampling_freq = ivtv_s_audio_sampling_freq,
.s_video_encoding = ivtv_s_video_encoding,
.s_stream_vbi_fmt = ivtv_s_stream_vbi_fmt,
};
int ivtv_g_pts_frame(struct ivtv *itv, s64 *pts, s64 *frame)
{
u32 data[CX2341X_MBOX_MAX_DATA];
if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) {
*pts = (s64)((u64)itv->last_dec_timing[2] << 32) |
(u64)itv->last_dec_timing[1];
*frame = itv->last_dec_timing[0];
return 0;
}
*pts = 0;
*frame = 0;
if (atomic_read(&itv->decoding)) {
if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) {
IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n");
return -EIO;
}
memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing));
set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
*pts = (s64)((u64) data[2] << 32) | (u64) data[1];
*frame = data[0];
/*timing->scr = (u64) (((u64) data[4] << 32) | (u64) (data[3]));*/
}
return 0;
}
static int ivtv_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct ivtv *itv = container_of(ctrl->handler, struct ivtv, cxhdl.hdl);
switch (ctrl->id) {
/* V4L2_CID_MPEG_VIDEO_DEC_PTS and V4L2_CID_MPEG_VIDEO_DEC_FRAME
control cluster */
case V4L2_CID_MPEG_VIDEO_DEC_PTS:
return ivtv_g_pts_frame(itv, itv->ctrl_pts->p_new.p_s64,
itv->ctrl_frame->p_new.p_s64);
}
return 0;
}
static int ivtv_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct ivtv *itv = container_of(ctrl->handler, struct ivtv, cxhdl.hdl);
switch (ctrl->id) {
/* V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK and MULTILINGUAL_PLAYBACK
control cluster */
case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
itv->audio_stereo_mode = itv->ctrl_audio_playback->val - 1;
itv->audio_bilingual_mode = itv->ctrl_audio_multilingual_playback->val - 1;
ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
break;
}
return 0;
}
const struct v4l2_ctrl_ops ivtv_hdl_out_ops = {
.s_ctrl = ivtv_s_ctrl,
.g_volatile_ctrl = ivtv_g_volatile_ctrl,
};
| linux-master | drivers/media/pci/ivtv/ivtv-controls.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
/*
This file includes an i2c implementation that was reverse engineered
from the Hauppauge windows driver. Older ivtv versions used i2c-algo-bit,
which whilst fine under most circumstances, had trouble with the Zilog
CPU on the PVR-150 which handles IR functions (occasional inability to
communicate with the chip until it was reset) and also with the i2c
bus being completely unreachable when multiple PVR cards were present.
The implementation is very similar to i2c-algo-bit, but there are enough
subtle differences that the two are hard to merge. The general strategy
employed by i2c-algo-bit is to use udelay() to implement the timing
when putting out bits on the scl/sda lines. The general strategy taken
here is to poll the lines for state changes (see ivtv_waitscl and
ivtv_waitsda). In addition there are small delays at various locations
which poll the SCL line 5 times (ivtv_scldelay). I would guess that
since this is memory mapped I/O that the length of those delays is tied
to the PCI bus clock. There is some extra code to do with recovery
and retries. Since it is not known what causes the actual i2c problems
in the first place, the only goal if one was to attempt to use
i2c-algo-bit would be to try to make it follow the same code path.
This would be a lot of work, and I'm also not convinced that it would
provide a generic benefit to i2c-algo-bit. Therefore consider this
an engineering solution -- not pretty, but it works.
Some more general comments about what we are doing:
The i2c bus is a 2 wire serial bus, with clock (SCL) and data (SDA)
lines. To communicate on the bus (as a master, we don't act as a slave),
we first initiate a start condition (ivtv_start). We then write the
address of the device that we want to communicate with, along with a flag
that indicates whether this is a read or a write. The slave then issues
an ACK signal (ivtv_ack), which tells us that it is ready for reading /
writing. We then proceed with reading or writing (ivtv_read/ivtv_write),
and finally issue a stop condition (ivtv_stop) to make the bus available
to other masters.
There is an additional form of transaction where a write may be
immediately followed by a read. In this case, there is no intervening
stop condition. (Only the msp3400 chip uses this method of data transfer).
*/
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-gpio.h"
#include "ivtv-i2c.h"
#include <media/drv-intf/cx25840.h>
/* i2c implementation for cx23415/6 chip, ivtv project.
* Author: Kevin Thayer (nufan_wfk at yahoo.com)
*/
/* i2c stuff */
#define IVTV_REG_I2C_SETSCL_OFFSET 0x7000
#define IVTV_REG_I2C_SETSDA_OFFSET 0x7004
#define IVTV_REG_I2C_GETSCL_OFFSET 0x7008
#define IVTV_REG_I2C_GETSDA_OFFSET 0x700c
#define IVTV_CS53L32A_I2C_ADDR 0x11
#define IVTV_M52790_I2C_ADDR 0x48
#define IVTV_CX25840_I2C_ADDR 0x44
#define IVTV_SAA7115_I2C_ADDR 0x21
#define IVTV_SAA7127_I2C_ADDR 0x44
#define IVTV_SAA717x_I2C_ADDR 0x21
#define IVTV_MSP3400_I2C_ADDR 0x40
#define IVTV_HAUPPAUGE_I2C_ADDR 0x50
#define IVTV_WM8739_I2C_ADDR 0x1a
#define IVTV_WM8775_I2C_ADDR 0x1b
#define IVTV_TEA5767_I2C_ADDR 0x60
#define IVTV_UPD64031A_I2C_ADDR 0x12
#define IVTV_UPD64083_I2C_ADDR 0x5c
#define IVTV_VP27SMPX_I2C_ADDR 0x5b
#define IVTV_M52790_I2C_ADDR 0x48
#define IVTV_AVERMEDIA_IR_RX_I2C_ADDR 0x40
#define IVTV_HAUP_EXT_IR_RX_I2C_ADDR 0x1a
#define IVTV_HAUP_INT_IR_RX_I2C_ADDR 0x18
#define IVTV_Z8F0811_IR_TX_I2C_ADDR 0x70
#define IVTV_Z8F0811_IR_RX_I2C_ADDR 0x71
#define IVTV_ADAPTEC_IR_ADDR 0x6b
/* This array should match the IVTV_HW_ defines */
static const u8 hw_addrs[IVTV_HW_MAX_BITS] = {
IVTV_CX25840_I2C_ADDR,
IVTV_SAA7115_I2C_ADDR,
IVTV_SAA7127_I2C_ADDR,
IVTV_MSP3400_I2C_ADDR,
0,
IVTV_WM8775_I2C_ADDR,
IVTV_CS53L32A_I2C_ADDR,
0,
IVTV_SAA7115_I2C_ADDR,
IVTV_UPD64031A_I2C_ADDR,
IVTV_UPD64083_I2C_ADDR,
IVTV_SAA717x_I2C_ADDR,
IVTV_WM8739_I2C_ADDR,
IVTV_VP27SMPX_I2C_ADDR,
IVTV_M52790_I2C_ADDR,
0, /* IVTV_HW_GPIO dummy driver ID */
IVTV_AVERMEDIA_IR_RX_I2C_ADDR, /* IVTV_HW_I2C_IR_RX_AVER */
IVTV_HAUP_EXT_IR_RX_I2C_ADDR, /* IVTV_HW_I2C_IR_RX_HAUP_EXT */
IVTV_HAUP_INT_IR_RX_I2C_ADDR, /* IVTV_HW_I2C_IR_RX_HAUP_INT */
IVTV_Z8F0811_IR_RX_I2C_ADDR, /* IVTV_HW_Z8F0811_IR_HAUP */
IVTV_ADAPTEC_IR_ADDR, /* IVTV_HW_I2C_IR_RX_ADAPTEC */
};
/* This array should match the IVTV_HW_ defines */
static const char * const hw_devicenames[IVTV_HW_MAX_BITS] = {
"cx25840",
"saa7115",
"saa7127_auto", /* saa7127 or saa7129 */
"msp3400",
"tuner",
"wm8775",
"cs53l32a",
"tveeprom",
"saa7114",
"upd64031a",
"upd64083",
"saa717x",
"wm8739",
"vp27smpx",
"m52790",
"gpio",
"ir_video", /* IVTV_HW_I2C_IR_RX_AVER */
"ir_video", /* IVTV_HW_I2C_IR_RX_HAUP_EXT */
"ir_video", /* IVTV_HW_I2C_IR_RX_HAUP_INT */
"ir_z8f0811_haup", /* IVTV_HW_Z8F0811_IR_HAUP */
"ir_video", /* IVTV_HW_I2C_IR_RX_ADAPTEC */
};
static int get_key_adaptec(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char keybuf[4];
keybuf[0] = 0x00;
i2c_master_send(ir->c, keybuf, 1);
/* poll IR chip */
if (i2c_master_recv(ir->c, keybuf, sizeof(keybuf)) != sizeof(keybuf)) {
return 0;
}
/* key pressed ? */
if (keybuf[2] == 0xff)
return 0;
/* remove repeat bit */
keybuf[2] &= 0x7f;
keybuf[3] |= 0x80;
*protocol = RC_PROTO_UNKNOWN;
*scancode = keybuf[3] | keybuf[2] << 8 | keybuf[1] << 16 |keybuf[0] << 24;
*toggle = 0;
return 1;
}
static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
{
struct i2c_board_info info;
struct i2c_adapter *adap = &itv->i2c_adap;
struct IR_i2c_init_data *init_data = &itv->ir_i2c_init_data;
unsigned short addr_list[2] = { addr, I2C_CLIENT_END };
/* Only allow one IR receiver to be registered per board */
if (itv->hw_flags & IVTV_HW_IR_ANY)
return -1;
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case IVTV_HW_I2C_IR_RX_AVER:
init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
init_data->type = RC_PROTO_BIT_OTHER;
init_data->name = "AVerMedia AVerTV card";
break;
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
init_data->type = RC_PROTO_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_HAUP:
/* Default to grey remote */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
RC_PROTO_BIT_RC6_6A_32;
init_data->name = itv->card_name;
break;
case IVTV_HW_I2C_IR_RX_ADAPTEC:
init_data->get_key = get_key_adaptec;
init_data->name = itv->card_name;
/* FIXME: The protocol and RC_MAP needs to be corrected */
init_data->ir_codes = RC_MAP_EMPTY;
init_data->type = RC_PROTO_BIT_UNKNOWN;
break;
}
memset(&info, 0, sizeof(struct i2c_board_info));
info.platform_data = init_data;
strscpy(info.type, type, I2C_NAME_SIZE);
return IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL)) ?
-1 : 0;
}
/* Instantiate the IR receiver device using probing -- undesirable */
void ivtv_i2c_new_ir_legacy(struct ivtv *itv)
{
struct i2c_board_info info;
/*
* The external IR receiver is at i2c address 0x34.
* The internal IR receiver is at i2c address 0x30.
*
* In theory, both can be fitted, and Hauppauge suggests an external
* overrides an internal. That's why we probe 0x1a (~0x34) first. CB
*
* Some of these addresses we probe may collide with other i2c address
* allocations, so this function must be called after all other i2c
* devices we care about are registered.
*/
static const unsigned short addr_list[] = {
0x1a, /* Hauppauge IR external - collides with WM8739 */
0x18, /* Hauppauge IR internal */
I2C_CLIENT_END
};
memset(&info, 0, sizeof(struct i2c_board_info));
strscpy(info.type, "ir_video", I2C_NAME_SIZE);
i2c_new_scanned_device(&itv->i2c_adap, &info, addr_list, NULL);
}
int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
{
struct i2c_adapter *adap = &itv->i2c_adap;
struct v4l2_subdev *sd;
const char *type;
u32 hw;
if (idx >= IVTV_HW_MAX_BITS)
return -ENODEV;
type = hw_devicenames[idx];
hw = 1 << idx;
if (hw == IVTV_HW_TUNER) {
/* special tuner handling */
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
itv->card_i2c->radio);
if (sd)
sd->grp_id = 1 << idx;
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
itv->card_i2c->demod);
if (sd)
sd->grp_id = 1 << idx;
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
itv->card_i2c->tv);
if (sd)
sd->grp_id = 1 << idx;
return sd ? 0 : -1;
}
if (hw & IVTV_HW_IR_ANY)
return ivtv_i2c_new_ir(itv, hw, type, hw_addrs[idx]);
/* Is it not an I2C device or one we do not wish to register? */
if (!hw_addrs[idx])
return -1;
/* It's an I2C device other than an analog tuner or IR chip */
if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
} else if (hw == IVTV_HW_CX25840) {
struct cx25840_platform_data pdata;
struct i2c_board_info cx25840_info = {
.type = "cx25840",
.addr = hw_addrs[idx],
.platform_data = &pdata,
};
memset(&pdata, 0, sizeof(pdata));
pdata.pvr150_workaround = itv->pvr150_workaround;
sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
&cx25840_info, NULL);
} else {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
adap, type, hw_addrs[idx], NULL);
}
if (sd)
sd->grp_id = 1 << idx;
return sd ? 0 : -1;
}
struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw)
{
struct v4l2_subdev *result = NULL;
struct v4l2_subdev *sd;
spin_lock(&itv->v4l2_dev.lock);
v4l2_device_for_each_subdev(sd, &itv->v4l2_dev) {
if (sd->grp_id == hw) {
result = sd;
break;
}
}
spin_unlock(&itv->v4l2_dev.lock);
return result;
}
/* Set the serial clock line to the desired state */
static void ivtv_setscl(struct ivtv *itv, int state)
{
/* write them out */
/* write bits are inverted */
write_reg(~state, IVTV_REG_I2C_SETSCL_OFFSET);
}
/* Set the serial data line to the desired state */
static void ivtv_setsda(struct ivtv *itv, int state)
{
/* write them out */
/* write bits are inverted */
write_reg(~state & 1, IVTV_REG_I2C_SETSDA_OFFSET);
}
/* Read the serial clock line */
static int ivtv_getscl(struct ivtv *itv)
{
return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1;
}
/* Read the serial data line */
static int ivtv_getsda(struct ivtv *itv)
{
return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1;
}
/* Implement a short delay by polling the serial clock line */
static void ivtv_scldelay(struct ivtv *itv)
{
int i;
for (i = 0; i < 5; ++i)
ivtv_getscl(itv);
}
/* Wait for the serial clock line to become set to a specific value */
static int ivtv_waitscl(struct ivtv *itv, int val)
{
int i;
ivtv_scldelay(itv);
for (i = 0; i < 1000; ++i) {
if (ivtv_getscl(itv) == val)
return 1;
}
return 0;
}
/* Wait for the serial data line to become set to a specific value */
static int ivtv_waitsda(struct ivtv *itv, int val)
{
int i;
ivtv_scldelay(itv);
for (i = 0; i < 1000; ++i) {
if (ivtv_getsda(itv) == val)
return 1;
}
return 0;
}
/* Wait for the slave to issue an ACK */
static int ivtv_ack(struct ivtv *itv)
{
int ret = 0;
if (ivtv_getscl(itv) == 1) {
IVTV_DEBUG_HI_I2C("SCL was high starting an ack\n");
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("Could not set SCL low starting an ack\n");
return -EREMOTEIO;
}
}
ivtv_setsda(itv, 1);
ivtv_scldelay(itv);
ivtv_setscl(itv, 1);
if (!ivtv_waitsda(itv, 0)) {
IVTV_DEBUG_I2C("Slave did not ack\n");
ret = -EREMOTEIO;
}
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("Failed to set SCL low after ACK\n");
ret = -EREMOTEIO;
}
return ret;
}
/* Write a single byte to the i2c bus and wait for the slave to ACK */
static int ivtv_sendbyte(struct ivtv *itv, unsigned char byte)
{
int i, bit;
IVTV_DEBUG_HI_I2C("write %x\n",byte);
for (i = 0; i < 8; ++i, byte<<=1) {
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("Error setting SCL low\n");
return -EREMOTEIO;
}
bit = (byte>>7)&1;
ivtv_setsda(itv, bit);
if (!ivtv_waitsda(itv, bit)) {
IVTV_DEBUG_I2C("Error setting SDA\n");
return -EREMOTEIO;
}
ivtv_setscl(itv, 1);
if (!ivtv_waitscl(itv, 1)) {
IVTV_DEBUG_I2C("Slave not ready for bit\n");
return -EREMOTEIO;
}
}
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("Error setting SCL low\n");
return -EREMOTEIO;
}
return ivtv_ack(itv);
}
/* Read a byte from the i2c bus and send a NACK if applicable (i.e. for the
final byte) */
static int ivtv_readbyte(struct ivtv *itv, unsigned char *byte, int nack)
{
int i;
*byte = 0;
ivtv_setsda(itv, 1);
ivtv_scldelay(itv);
for (i = 0; i < 8; ++i) {
ivtv_setscl(itv, 0);
ivtv_scldelay(itv);
ivtv_setscl(itv, 1);
if (!ivtv_waitscl(itv, 1)) {
IVTV_DEBUG_I2C("Error setting SCL high\n");
return -EREMOTEIO;
}
*byte = ((*byte)<<1)|ivtv_getsda(itv);
}
ivtv_setscl(itv, 0);
ivtv_scldelay(itv);
ivtv_setsda(itv, nack);
ivtv_scldelay(itv);
ivtv_setscl(itv, 1);
ivtv_scldelay(itv);
ivtv_setscl(itv, 0);
ivtv_scldelay(itv);
IVTV_DEBUG_HI_I2C("read %x\n",*byte);
return 0;
}
/* Issue a start condition on the i2c bus to alert slaves to prepare for
an address write */
static int ivtv_start(struct ivtv *itv)
{
int sda;
sda = ivtv_getsda(itv);
if (sda != 1) {
IVTV_DEBUG_HI_I2C("SDA was low at start\n");
ivtv_setsda(itv, 1);
if (!ivtv_waitsda(itv, 1)) {
IVTV_DEBUG_I2C("SDA stuck low\n");
return -EREMOTEIO;
}
}
if (ivtv_getscl(itv) != 1) {
ivtv_setscl(itv, 1);
if (!ivtv_waitscl(itv, 1)) {
IVTV_DEBUG_I2C("SCL stuck low at start\n");
return -EREMOTEIO;
}
}
ivtv_setsda(itv, 0);
ivtv_scldelay(itv);
return 0;
}
/* Issue a stop condition on the i2c bus to release it */
static int ivtv_stop(struct ivtv *itv)
{
int i;
if (ivtv_getscl(itv) != 0) {
IVTV_DEBUG_HI_I2C("SCL not low when stopping\n");
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("SCL could not be set low\n");
}
}
ivtv_setsda(itv, 0);
ivtv_scldelay(itv);
ivtv_setscl(itv, 1);
if (!ivtv_waitscl(itv, 1)) {
IVTV_DEBUG_I2C("SCL could not be set high\n");
return -EREMOTEIO;
}
ivtv_scldelay(itv);
ivtv_setsda(itv, 1);
if (!ivtv_waitsda(itv, 1)) {
IVTV_DEBUG_I2C("resetting I2C\n");
for (i = 0; i < 16; ++i) {
ivtv_setscl(itv, 0);
ivtv_scldelay(itv);
ivtv_setscl(itv, 1);
ivtv_scldelay(itv);
ivtv_setsda(itv, 1);
}
ivtv_waitsda(itv, 1);
return -EREMOTEIO;
}
return 0;
}
/* Write a message to the given i2c slave. do_stop may be 0 to prevent
issuing the i2c stop condition (when following with a read) */
static int ivtv_write(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len, int do_stop)
{
int retry, ret = -EREMOTEIO;
u32 i;
for (retry = 0; ret != 0 && retry < 8; ++retry) {
ret = ivtv_start(itv);
if (ret == 0) {
ret = ivtv_sendbyte(itv, addr<<1);
for (i = 0; ret == 0 && i < len; ++i)
ret = ivtv_sendbyte(itv, data[i]);
}
if (ret != 0 || do_stop) {
ivtv_stop(itv);
}
}
if (ret)
IVTV_DEBUG_I2C("i2c write to %x failed\n", addr);
return ret;
}
/* Read data from the given i2c slave. A stop condition is always issued. */
static int ivtv_read(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len)
{
int retry, ret = -EREMOTEIO;
u32 i;
for (retry = 0; ret != 0 && retry < 8; ++retry) {
ret = ivtv_start(itv);
if (ret == 0)
ret = ivtv_sendbyte(itv, (addr << 1) | 1);
for (i = 0; ret == 0 && i < len; ++i) {
ret = ivtv_readbyte(itv, &data[i], i == len - 1);
}
ivtv_stop(itv);
}
if (ret)
IVTV_DEBUG_I2C("i2c read from %x failed\n", addr);
return ret;
}
/* Kernel i2c transfer implementation. Takes a number of messages to be read
or written. If a read follows a write, this will occur without an
intervening stop condition */
static int ivtv_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
{
struct v4l2_device *v4l2_dev = i2c_get_adapdata(i2c_adap);
struct ivtv *itv = to_ivtv(v4l2_dev);
int retval;
int i;
mutex_lock(&itv->i2c_bus_lock);
for (i = retval = 0; retval == 0 && i < num; i++) {
if (msgs[i].flags & I2C_M_RD)
retval = ivtv_read(itv, msgs[i].addr, msgs[i].buf, msgs[i].len);
else {
/* if followed by a read, don't stop */
int stop = !(i + 1 < num && msgs[i + 1].flags == I2C_M_RD);
retval = ivtv_write(itv, msgs[i].addr, msgs[i].buf, msgs[i].len, stop);
}
}
mutex_unlock(&itv->i2c_bus_lock);
return retval ? retval : num;
}
/* Kernel i2c capabilities */
static u32 ivtv_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm ivtv_algo = {
.master_xfer = ivtv_xfer,
.functionality = ivtv_functionality,
};
/* template for our-bit banger */
static const struct i2c_adapter ivtv_i2c_adap_hw_template = {
.name = "ivtv i2c driver",
.algo = &ivtv_algo,
.algo_data = NULL, /* filled from template */
.owner = THIS_MODULE,
};
static void ivtv_setscl_old(void *data, int state)
{
struct ivtv *itv = (struct ivtv *)data;
if (state)
itv->i2c_state |= 0x01;
else
itv->i2c_state &= ~0x01;
/* write them out */
/* write bits are inverted */
write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSCL_OFFSET);
}
static void ivtv_setsda_old(void *data, int state)
{
struct ivtv *itv = (struct ivtv *)data;
if (state)
itv->i2c_state |= 0x01;
else
itv->i2c_state &= ~0x01;
/* write them out */
/* write bits are inverted */
write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSDA_OFFSET);
}
static int ivtv_getscl_old(void *data)
{
struct ivtv *itv = (struct ivtv *)data;
return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1;
}
static int ivtv_getsda_old(void *data)
{
struct ivtv *itv = (struct ivtv *)data;
return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1;
}
/* template for i2c-bit-algo */
static const struct i2c_adapter ivtv_i2c_adap_template = {
.name = "ivtv i2c driver",
.algo = NULL, /* set by i2c-algo-bit */
.algo_data = NULL, /* filled from template */
.owner = THIS_MODULE,
};
#define IVTV_ALGO_BIT_TIMEOUT (2) /* seconds */
static const struct i2c_algo_bit_data ivtv_i2c_algo_template = {
.setsda = ivtv_setsda_old,
.setscl = ivtv_setscl_old,
.getsda = ivtv_getsda_old,
.getscl = ivtv_getscl_old,
.udelay = IVTV_DEFAULT_I2C_CLOCK_PERIOD / 2, /* microseconds */
.timeout = IVTV_ALGO_BIT_TIMEOUT * HZ, /* jiffies */
};
static const struct i2c_client ivtv_i2c_client_template = {
.name = "ivtv internal",
};
/* init + register i2c adapter */
int init_ivtv_i2c(struct ivtv *itv)
{
int retval;
IVTV_DEBUG_I2C("i2c init\n");
/* Sanity checks for the I2C hardware arrays. They must be the
* same size.
*/
if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs)) {
IVTV_ERR("Mismatched I2C hardware arrays\n");
return -ENODEV;
}
if (itv->options.newi2c > 0) {
itv->i2c_adap = ivtv_i2c_adap_hw_template;
} else {
itv->i2c_adap = ivtv_i2c_adap_template;
itv->i2c_algo = ivtv_i2c_algo_template;
}
itv->i2c_algo.udelay = itv->options.i2c_clock_period / 2;
itv->i2c_algo.data = itv;
itv->i2c_adap.algo_data = &itv->i2c_algo;
sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), " #%d",
itv->instance);
i2c_set_adapdata(&itv->i2c_adap, &itv->v4l2_dev);
itv->i2c_client = ivtv_i2c_client_template;
itv->i2c_client.adapter = &itv->i2c_adap;
itv->i2c_adap.dev.parent = &itv->pdev->dev;
IVTV_DEBUG_I2C("setting scl and sda to 1\n");
ivtv_setscl(itv, 1);
ivtv_setsda(itv, 1);
if (itv->options.newi2c > 0)
retval = i2c_add_adapter(&itv->i2c_adap);
else
retval = i2c_bit_add_bus(&itv->i2c_adap);
return retval;
}
void exit_ivtv_i2c(struct ivtv *itv)
{
IVTV_DEBUG_I2C("i2c exit\n");
i2c_del_adapter(&itv->i2c_adap);
}
| linux-master | drivers/media/pci/ivtv/ivtv-i2c.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <[email protected]>
Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
*/
#include "ivtv-driver.h"
#include "ivtv-queue.h"
int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
{
if (s->buf_size - buf->bytesused < copybytes)
copybytes = s->buf_size - buf->bytesused;
if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
return -EFAULT;
}
buf->bytesused += copybytes;
return copybytes;
}
void ivtv_buf_swap(struct ivtv_buffer *buf)
{
int i;
for (i = 0; i < buf->bytesused; i += 4)
swab32s((u32 *)(buf->buf + i));
}
void ivtv_queue_init(struct ivtv_queue *q)
{
INIT_LIST_HEAD(&q->list);
q->buffers = 0;
q->length = 0;
q->bytesused = 0;
}
void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
{
unsigned long flags;
/* clear the buffer if it is going to be enqueued to the free queue */
if (q == &s->q_free) {
buf->bytesused = 0;
buf->readpos = 0;
buf->b_flags = 0;
buf->dma_xfer_cnt = 0;
}
spin_lock_irqsave(&s->qlock, flags);
list_add_tail(&buf->list, &q->list);
q->buffers++;
q->length += s->buf_size;
q->bytesused += buf->bytesused - buf->readpos;
spin_unlock_irqrestore(&s->qlock, flags);
}
struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
{
struct ivtv_buffer *buf = NULL;
unsigned long flags;
spin_lock_irqsave(&s->qlock, flags);
if (!list_empty(&q->list)) {
buf = list_entry(q->list.next, struct ivtv_buffer, list);
list_del_init(q->list.next);
q->buffers--;
q->length -= s->buf_size;
q->bytesused -= buf->bytesused - buf->readpos;
}
spin_unlock_irqrestore(&s->qlock, flags);
return buf;
}
static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
struct ivtv_queue *to, int clear)
{
struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
list_move_tail(from->list.next, &to->list);
from->buffers--;
from->length -= s->buf_size;
from->bytesused -= buf->bytesused - buf->readpos;
/* special handling for q_free */
if (clear)
buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
to->buffers++;
to->length += s->buf_size;
to->bytesused += buf->bytesused - buf->readpos;
}
/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
If 'steal' != NULL, then buffers may also taken from that queue if
needed, but only if 'from' is the free queue.
The buffer is automatically cleared if it goes to the free queue. It is
also cleared if buffers need to be taken from the 'steal' queue and
the 'from' queue is the free queue.
When 'from' is q_free, then needed_bytes is compared to the total
available buffer length, otherwise needed_bytes is compared to the
bytesused value. For the 'steal' queue the total available buffer
length is always used.
-ENOMEM is returned if the buffers could not be obtained, 0 if all
buffers where obtained from the 'from' list and if non-zero then
the number of stolen buffers is returned. */
int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
struct ivtv_queue *to, int needed_bytes)
{
unsigned long flags;
int rc = 0;
int from_free = from == &s->q_free;
int to_free = to == &s->q_free;
int bytes_available, bytes_steal;
spin_lock_irqsave(&s->qlock, flags);
if (needed_bytes == 0) {
from_free = 1;
needed_bytes = from->length;
}
bytes_available = from_free ? from->length : from->bytesused;
bytes_steal = (from_free && steal) ? steal->length : 0;
if (bytes_available + bytes_steal < needed_bytes) {
spin_unlock_irqrestore(&s->qlock, flags);
return -ENOMEM;
}
while (steal && bytes_available < needed_bytes) {
struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
u16 dma_xfer_cnt = buf->dma_xfer_cnt;
/* move buffers from the tail of the 'steal' queue to the tail of the
'from' queue. Always copy all the buffers with the same dma_xfer_cnt
value, this ensures that you do not end up with partial frame data
if one frame is stored in multiple buffers. */
while (dma_xfer_cnt == buf->dma_xfer_cnt) {
list_move_tail(steal->list.prev, &from->list);
rc++;
steal->buffers--;
steal->length -= s->buf_size;
steal->bytesused -= buf->bytesused - buf->readpos;
buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
from->buffers++;
from->length += s->buf_size;
bytes_available += s->buf_size;
if (list_empty(&steal->list))
break;
buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
}
}
if (from_free) {
u32 old_length = to->length;
while (to->length - old_length < needed_bytes) {
ivtv_queue_move_buf(s, from, to, 1);
}
}
else {
u32 old_bytesused = to->bytesused;
while (to->bytesused - old_bytesused < needed_bytes) {
ivtv_queue_move_buf(s, from, to, to_free);
}
}
spin_unlock_irqrestore(&s->qlock, flags);
return rc;
}
void ivtv_flush_queues(struct ivtv_stream *s)
{
ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
}
int ivtv_stream_alloc(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
int i;
if (s->buffers == 0)
return 0;
IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
s->dma != DMA_NONE ? "DMA " : "",
s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
if (s->sg_pending == NULL) {
IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
return -ENOMEM;
}
s->sg_pending_size = 0;
s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
if (s->sg_processing == NULL) {
IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
kfree(s->sg_pending);
s->sg_pending = NULL;
return -ENOMEM;
}
s->sg_processing_size = 0;
s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
GFP_KERNEL|__GFP_NOWARN);
if (s->sg_dma == NULL) {
IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
kfree(s->sg_pending);
s->sg_pending = NULL;
kfree(s->sg_processing);
s->sg_processing = NULL;
return -ENOMEM;
}
if (ivtv_might_use_dma(s)) {
s->sg_handle = dma_map_single(&itv->pdev->dev, s->sg_dma,
sizeof(struct ivtv_sg_element),
DMA_TO_DEVICE);
ivtv_stream_sync_for_cpu(s);
}
/* allocate stream buffers. Initially all buffers are in q_free. */
for (i = 0; i < s->buffers; i++) {
struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
GFP_KERNEL|__GFP_NOWARN);
if (buf == NULL)
break;
buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
if (buf->buf == NULL) {
kfree(buf);
break;
}
INIT_LIST_HEAD(&buf->list);
if (ivtv_might_use_dma(s)) {
buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
buf->buf, s->buf_size + 256, s->dma);
ivtv_buf_sync_for_cpu(s, buf);
}
ivtv_enqueue(s, buf, &s->q_free);
}
if (i == s->buffers)
return 0;
IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
ivtv_stream_free(s);
return -ENOMEM;
}
void ivtv_stream_free(struct ivtv_stream *s)
{
struct ivtv_buffer *buf;
/* move all buffers to q_free */
ivtv_flush_queues(s);
/* empty q_free */
while ((buf = ivtv_dequeue(s, &s->q_free))) {
if (ivtv_might_use_dma(s))
dma_unmap_single(&s->itv->pdev->dev, buf->dma_handle,
s->buf_size + 256, s->dma);
kfree(buf->buf);
kfree(buf);
}
/* Free SG Array/Lists */
if (s->sg_dma != NULL) {
if (s->sg_handle != IVTV_DMA_UNMAPPED) {
dma_unmap_single(&s->itv->pdev->dev, s->sg_handle,
sizeof(struct ivtv_sg_element),
DMA_TO_DEVICE);
s->sg_handle = IVTV_DMA_UNMAPPED;
}
kfree(s->sg_pending);
kfree(s->sg_processing);
kfree(s->sg_dma);
s->sg_pending = NULL;
s->sg_processing = NULL;
s->sg_dma = NULL;
s->sg_pending_size = 0;
s->sg_processing_size = 0;
}
}
| linux-master | drivers/media/pci/ivtv/ivtv-queue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ngene-i2c.c: nGene PCIe bridge driver i2c functions
*
* Copyright (C) 2005-2007 Micronas
*
* Copyright (C) 2008-2009 Ralph Metzler <[email protected]>
* Modifications for new nGene firmware,
* support for EEPROM-copying,
* support for new dual DVB-S2 card prototype
*/
/* FIXME - some of these can probably be removed */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/timer.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "ngene.h"
/* Firmware command for i2c operations */
static int ngene_command_i2c_read(struct ngene *dev, u8 adr,
u8 *out, u8 outlen, u8 *in, u8 inlen, int flag)
{
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_I2C_READ;
com.cmd.hdr.Length = outlen + 3;
com.cmd.I2CRead.Device = adr << 1;
memcpy(com.cmd.I2CRead.Data, out, outlen);
com.cmd.I2CRead.Data[outlen] = inlen;
com.cmd.I2CRead.Data[outlen + 1] = 0;
com.in_len = outlen + 3;
com.out_len = inlen + 1;
if (ngene_command(dev, &com) < 0)
return -EIO;
if ((com.cmd.raw8[0] >> 1) != adr)
return -EIO;
if (flag)
memcpy(in, com.cmd.raw8, inlen + 1);
else
memcpy(in, com.cmd.raw8 + 1, inlen);
return 0;
}
static int ngene_command_i2c_write(struct ngene *dev, u8 adr,
u8 *out, u8 outlen)
{
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_I2C_WRITE;
com.cmd.hdr.Length = outlen + 1;
com.cmd.I2CRead.Device = adr << 1;
memcpy(com.cmd.I2CRead.Data, out, outlen);
com.in_len = outlen + 1;
com.out_len = 1;
if (ngene_command(dev, &com) < 0)
return -EIO;
if (com.cmd.raw8[0] == 1)
return -EIO;
return 0;
}
static void ngene_i2c_set_bus(struct ngene *dev, int bus)
{
if (!(dev->card_info->i2c_access & 2))
return;
if (dev->i2c_current_bus == bus)
return;
switch (bus) {
case 0:
ngene_command_gpio_set(dev, 3, 0);
ngene_command_gpio_set(dev, 2, 1);
break;
case 1:
ngene_command_gpio_set(dev, 2, 0);
ngene_command_gpio_set(dev, 3, 1);
break;
}
dev->i2c_current_bus = bus;
}
static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
struct i2c_msg msg[], int num)
{
struct ngene_channel *chan =
(struct ngene_channel *)i2c_get_adapdata(adapter);
struct ngene *dev = chan->dev;
mutex_lock(&dev->i2c_switch_mutex);
ngene_i2c_set_bus(dev, chan->number);
if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
if (!ngene_command_i2c_read(dev, msg[0].addr,
msg[0].buf, msg[0].len,
msg[1].buf, msg[1].len, 0))
goto done;
if (num == 1 && !(msg[0].flags & I2C_M_RD))
if (!ngene_command_i2c_write(dev, msg[0].addr,
msg[0].buf, msg[0].len))
goto done;
if (num == 1 && (msg[0].flags & I2C_M_RD))
if (!ngene_command_i2c_read(dev, msg[0].addr, NULL, 0,
msg[0].buf, msg[0].len, 0))
goto done;
mutex_unlock(&dev->i2c_switch_mutex);
return -EIO;
done:
mutex_unlock(&dev->i2c_switch_mutex);
return num;
}
static u32 ngene_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm ngene_i2c_algo = {
.master_xfer = ngene_i2c_master_xfer,
.functionality = ngene_i2c_functionality,
};
int ngene_i2c_init(struct ngene *dev, int dev_nr)
{
struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
strscpy(adap->name, "nGene", sizeof(adap->name));
adap->algo = &ngene_i2c_algo;
adap->algo_data = (void *)&(dev->channel[dev_nr]);
adap->dev.parent = &dev->pci_dev->dev;
return i2c_add_adapter(adap);
}
| linux-master | drivers/media/pci/ngene/ngene-i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ngene-cards.c: nGene PCIe bridge driver - card specific info
*
* Copyright (C) 2005-2007 Micronas
*
* Copyright (C) 2008-2009 Ralph Metzler <[email protected]>
* Modifications for new nGene firmware,
* support for EEPROM-copying,
* support for new dual DVB-S2 card prototype
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include "ngene.h"
/* demods/tuners */
#include "stv6110x.h"
#include "stv090x.h"
#include "lnbh24.h"
#include "lgdt330x.h"
#include "mt2131.h"
#include "tda18271c2dd.h"
#include "drxk.h"
#include "drxd.h"
#include "dvb-pll.h"
#include "stv0367.h"
#include "stv0367_priv.h"
#include "tda18212.h"
#include "cxd2841er.h"
#include "stv0910.h"
#include "stv6111.h"
#include "lnbh25.h"
/****************************************************************************/
/* I2C transfer functions used for demod/tuner probing***********************/
/****************************************************************************/
static int i2c_io(struct i2c_adapter *adapter, u8 adr,
u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = wbuf, .len = wlen },
{.addr = adr, .flags = I2C_M_RD,
.buf = rbuf, .len = rlen } };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
{
struct i2c_msg msg = {.addr = adr, .flags = 0,
.buf = data, .len = len};
return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1;
}
static int i2c_write_reg(struct i2c_adapter *adap, u8 adr,
u8 reg, u8 val)
{
u8 msg[2] = {reg, val};
return i2c_write(adap, adr, msg, 2);
}
static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
{
struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1 } };
return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
}
static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
u16 reg, u8 *val)
{
u8 msg[2] = {reg >> 8, reg & 0xff};
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = msg, .len = 2},
{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1} };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
static int i2c_read_regs(struct i2c_adapter *adapter,
u8 adr, u8 reg, u8 *val, u8 len)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = ®, .len = 1},
{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = len} };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
{
return i2c_read_regs(adapter, adr, reg, val, 1);
}
/****************************************************************************/
/* Demod/tuner attachment ***************************************************/
/****************************************************************************/
static struct i2c_adapter *i2c_adapter_from_chan(struct ngene_channel *chan)
{
/* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
if (chan->number < 2)
return &chan->dev->channel[0].i2c_adapter;
return &chan->dev->channel[1].i2c_adapter;
}
static int tuner_attach_stv6110(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *i2c = i2c_adapter_from_chan(chan);
struct stv090x_config *feconf = (struct stv090x_config *)
chan->dev->card_info->fe_config[chan->number];
struct stv6110x_config *tunerconf = (struct stv6110x_config *)
chan->dev->card_info->tuner_config[chan->number];
const struct stv6110x_devctl *ctl;
ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf, i2c);
if (ctl == NULL) {
dev_err(pdev, "No STV6110X found!\n");
return -ENODEV;
}
feconf->tuner_init = ctl->tuner_init;
feconf->tuner_sleep = ctl->tuner_sleep;
feconf->tuner_set_mode = ctl->tuner_set_mode;
feconf->tuner_set_frequency = ctl->tuner_set_frequency;
feconf->tuner_get_frequency = ctl->tuner_get_frequency;
feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
feconf->tuner_set_refclk = ctl->tuner_set_refclk;
feconf->tuner_get_status = ctl->tuner_get_status;
return 0;
}
static int tuner_attach_stv6111(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *i2c = i2c_adapter_from_chan(chan);
struct dvb_frontend *fe;
u8 adr = 4 + ((chan->number & 1) ? 0x63 : 0x60);
fe = dvb_attach(stv6111_attach, chan->fe, i2c, adr);
if (!fe) {
fe = dvb_attach(stv6111_attach, chan->fe, i2c, adr & ~4);
if (!fe) {
dev_err(pdev, "stv6111_attach() failed!\n");
return -ENODEV;
}
}
return 0;
}
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct ngene_channel *chan = fe->sec_priv;
int status;
if (enable) {
down(&chan->dev->pll_mutex);
status = chan->gate_ctrl(fe, 1);
} else {
status = chan->gate_ctrl(fe, 0);
up(&chan->dev->pll_mutex);
}
return status;
}
static int tuner_attach_tda18271(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *i2c = i2c_adapter_from_chan(chan);
struct dvb_frontend *fe;
if (chan->fe->ops.i2c_gate_ctrl)
chan->fe->ops.i2c_gate_ctrl(chan->fe, 1);
fe = dvb_attach(tda18271c2dd_attach, chan->fe, i2c, 0x60);
if (chan->fe->ops.i2c_gate_ctrl)
chan->fe->ops.i2c_gate_ctrl(chan->fe, 0);
if (!fe) {
dev_err(pdev, "No TDA18271 found!\n");
return -ENODEV;
}
return 0;
}
static int tuner_tda18212_ping(struct ngene_channel *chan,
struct i2c_adapter *i2c,
unsigned short adr)
{
struct device *pdev = &chan->dev->pci_dev->dev;
u8 tda_id[2];
u8 subaddr = 0x00;
dev_dbg(pdev, "stv0367-tda18212 tuner ping\n");
if (chan->fe->ops.i2c_gate_ctrl)
chan->fe->ops.i2c_gate_ctrl(chan->fe, 1);
if (i2c_read_regs(i2c, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
dev_dbg(pdev, "tda18212 ping 1 fail\n");
if (i2c_read_regs(i2c, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
dev_warn(pdev, "tda18212 ping failed, expect problems\n");
if (chan->fe->ops.i2c_gate_ctrl)
chan->fe->ops.i2c_gate_ctrl(chan->fe, 0);
return 0;
}
static int tuner_attach_tda18212(struct ngene_channel *chan, u32 dmdtype)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *i2c = i2c_adapter_from_chan(chan);
struct i2c_client *client;
struct tda18212_config config = {
.fe = chan->fe,
.if_dvbt_6 = 3550,
.if_dvbt_7 = 3700,
.if_dvbt_8 = 4150,
.if_dvbt2_6 = 3250,
.if_dvbt2_7 = 4000,
.if_dvbt2_8 = 4000,
.if_dvbc = 5000,
};
u8 addr = (chan->number & 1) ? 0x63 : 0x60;
/*
* due to a hardware quirk with the I2C gate on the stv0367+tda18212
* combo, the tda18212 must be probed by reading it's id _twice_ when
* cold started, or it very likely will fail.
*/
if (dmdtype == DEMOD_TYPE_STV0367)
tuner_tda18212_ping(chan, i2c, addr);
/* perform tuner probe/init/attach */
client = dvb_module_probe("tda18212", NULL, i2c, addr, &config);
if (!client)
goto err;
chan->i2c_client[0] = client;
chan->i2c_client_fe = 1;
return 0;
err:
dev_err(pdev, "TDA18212 tuner not found. Device is not fully operational.\n");
return -ENODEV;
}
static int tuner_attach_probe(struct ngene_channel *chan)
{
switch (chan->demod_type) {
case DEMOD_TYPE_STV090X:
return tuner_attach_stv6110(chan);
case DEMOD_TYPE_DRXK:
return tuner_attach_tda18271(chan);
case DEMOD_TYPE_STV0367:
case DEMOD_TYPE_SONY_CT2:
case DEMOD_TYPE_SONY_ISDBT:
case DEMOD_TYPE_SONY_C2T2:
case DEMOD_TYPE_SONY_C2T2I:
return tuner_attach_tda18212(chan, chan->demod_type);
case DEMOD_TYPE_STV0910:
return tuner_attach_stv6111(chan);
}
return -EINVAL;
}
static int demod_attach_stv0900(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *i2c = i2c_adapter_from_chan(chan);
struct stv090x_config *feconf = (struct stv090x_config *)
chan->dev->card_info->fe_config[chan->number];
chan->fe = dvb_attach(stv090x_attach, feconf, i2c,
(chan->number & 1) == 0 ? STV090x_DEMODULATOR_0
: STV090x_DEMODULATOR_1);
if (chan->fe == NULL) {
dev_err(pdev, "No STV0900 found!\n");
return -ENODEV;
}
/* store channel info */
if (feconf->tuner_i2c_lock)
chan->fe->analog_demod_priv = chan;
if (!dvb_attach(lnbh24_attach, chan->fe, i2c, 0,
0, chan->dev->card_info->lnb[chan->number])) {
dev_err(pdev, "No LNBH24 found!\n");
dvb_frontend_detach(chan->fe);
chan->fe = NULL;
return -ENODEV;
}
return 0;
}
static struct stv0910_cfg stv0910_p = {
.adr = 0x68,
.parallel = 1,
.rptlvl = 4,
.clk = 30000000,
.tsspeed = 0x28,
};
static struct lnbh25_config lnbh25_cfg = {
.i2c_address = 0x0c << 1,
.data2_config = LNBH25_TEN
};
static int demod_attach_stv0910(struct ngene_channel *chan,
struct i2c_adapter *i2c)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct stv0910_cfg cfg = stv0910_p;
struct lnbh25_config lnbcfg = lnbh25_cfg;
chan->fe = dvb_attach(stv0910_attach, i2c, &cfg, (chan->number & 1));
if (!chan->fe) {
cfg.adr = 0x6c;
chan->fe = dvb_attach(stv0910_attach, i2c,
&cfg, (chan->number & 1));
}
if (!chan->fe) {
dev_err(pdev, "stv0910_attach() failed!\n");
return -ENODEV;
}
/*
* attach lnbh25 - leftshift by one as the lnbh25 driver expects 8bit
* i2c addresses
*/
lnbcfg.i2c_address = (((chan->number & 1) ? 0x0d : 0x0c) << 1);
if (!dvb_attach(lnbh25_attach, chan->fe, &lnbcfg, i2c)) {
lnbcfg.i2c_address = (((chan->number & 1) ? 0x09 : 0x08) << 1);
if (!dvb_attach(lnbh25_attach, chan->fe, &lnbcfg, i2c)) {
dev_err(pdev, "lnbh25_attach() failed!\n");
dvb_frontend_detach(chan->fe);
chan->fe = NULL;
return -ENODEV;
}
}
return 0;
}
static struct stv0367_config ddb_stv0367_config[] = {
{
.demod_address = 0x1f,
.xtal = 27000000,
.if_khz = 0,
.if_iq_mode = FE_TER_NORMAL_IF_TUNER,
.ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
.clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
}, {
.demod_address = 0x1e,
.xtal = 27000000,
.if_khz = 0,
.if_iq_mode = FE_TER_NORMAL_IF_TUNER,
.ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
.clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
},
};
static int demod_attach_stv0367(struct ngene_channel *chan,
struct i2c_adapter *i2c)
{
struct device *pdev = &chan->dev->pci_dev->dev;
chan->fe = dvb_attach(stv0367ddb_attach,
&ddb_stv0367_config[(chan->number & 1)], i2c);
if (!chan->fe) {
dev_err(pdev, "stv0367ddb_attach() failed!\n");
return -ENODEV;
}
chan->fe->sec_priv = chan;
chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
return 0;
}
static int demod_attach_cxd28xx(struct ngene_channel *chan,
struct i2c_adapter *i2c, int osc24)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct cxd2841er_config cfg;
/* the cxd2841er driver expects 8bit/shifted I2C addresses */
cfg.i2c_addr = ((chan->number & 1) ? 0x6d : 0x6c) << 1;
cfg.xtal = osc24 ? SONY_XTAL_24000 : SONY_XTAL_20500;
cfg.flags = CXD2841ER_AUTO_IFHZ | CXD2841ER_EARLY_TUNE |
CXD2841ER_NO_WAIT_LOCK | CXD2841ER_NO_AGCNEG |
CXD2841ER_TSBITS | CXD2841ER_TS_SERIAL;
/* attach frontend */
chan->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c);
if (!chan->fe) {
dev_err(pdev, "CXD28XX attach failed!\n");
return -ENODEV;
}
chan->fe->sec_priv = chan;
chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
return 0;
}
static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock)
{
struct ngene_channel *chan = fe->analog_demod_priv;
if (lock)
down(&chan->dev->pll_mutex);
else
up(&chan->dev->pll_mutex);
}
static int port_has_stv0900(struct i2c_adapter *i2c, int port)
{
u8 val;
if (i2c_read_reg16(i2c, 0x68+port/2, 0xf100, &val) < 0)
return 0;
return 1;
}
static int port_has_drxk(struct i2c_adapter *i2c, int port)
{
u8 val;
if (i2c_read(i2c, 0x29+port, &val) < 0)
return 0;
return 1;
}
static int port_has_stv0367(struct i2c_adapter *i2c)
{
u8 val;
if (i2c_read_reg16(i2c, 0x1e, 0xf000, &val) < 0)
return 0;
if (val != 0x60)
return 0;
if (i2c_read_reg16(i2c, 0x1f, 0xf000, &val) < 0)
return 0;
if (val != 0x60)
return 0;
return 1;
}
int ngene_port_has_cxd2099(struct i2c_adapter *i2c, u8 *type)
{
u8 val;
u8 probe[4] = { 0xe0, 0x00, 0x00, 0x00 }, data[4];
struct i2c_msg msgs[2] = {{ .addr = 0x40, .flags = 0,
.buf = probe, .len = 4 },
{ .addr = 0x40, .flags = I2C_M_RD,
.buf = data, .len = 4 } };
val = i2c_transfer(i2c, msgs, 2);
if (val != 2)
return 0;
if (data[0] == 0x02 && data[1] == 0x2b && data[3] == 0x43)
*type = 2;
else
*type = 1;
return 1;
}
static int demod_attach_drxk(struct ngene_channel *chan,
struct i2c_adapter *i2c)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct drxk_config config;
memset(&config, 0, sizeof(config));
config.microcode_name = "drxk_a3.mc";
config.qam_demod_parameter_count = 4;
config.adr = 0x29 + (chan->number ^ 2);
chan->fe = dvb_attach(drxk_attach, &config, i2c);
if (!chan->fe) {
dev_err(pdev, "No DRXK found!\n");
return -ENODEV;
}
chan->fe->sec_priv = chan;
chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
return 0;
}
/****************************************************************************/
/* XO2 related lists and functions ******************************************/
/****************************************************************************/
static char *xo2names[] = {
"DUAL DVB-S2",
"DUAL DVB-C/T/T2",
"DUAL DVB-ISDBT",
"DUAL DVB-C/C2/T/T2",
"DUAL ATSC",
"DUAL DVB-C/C2/T/T2/I",
};
static int init_xo2(struct ngene_channel *chan, struct i2c_adapter *i2c)
{
struct device *pdev = &chan->dev->pci_dev->dev;
u8 addr = 0x10;
u8 val, data[2];
int res;
res = i2c_read_regs(i2c, addr, 0x04, data, 2);
if (res < 0)
return res;
if (data[0] != 0x01) {
dev_info(pdev, "Invalid XO2 on channel %d\n", chan->number);
return -1;
}
i2c_read_reg(i2c, addr, 0x08, &val);
if (val != 0) {
i2c_write_reg(i2c, addr, 0x08, 0x00);
msleep(100);
}
/* Enable tuner power, disable pll, reset demods */
i2c_write_reg(i2c, addr, 0x08, 0x04);
usleep_range(2000, 3000);
/* Release demod resets */
i2c_write_reg(i2c, addr, 0x08, 0x07);
/*
* speed: 0=55,1=75,2=90,3=104 MBit/s
* Note: The ngene hardware must be run at 75 MBit/s compared
* to more modern ddbridge hardware which runs at 90 MBit/s,
* else there will be issues with the data transport and non-
* working secondary/slave demods/tuners.
*/
i2c_write_reg(i2c, addr, 0x09, 1);
i2c_write_reg(i2c, addr, 0x0a, 0x01);
i2c_write_reg(i2c, addr, 0x0b, 0x01);
usleep_range(2000, 3000);
/* Start XO2 PLL */
i2c_write_reg(i2c, addr, 0x08, 0x87);
return 0;
}
static int port_has_xo2(struct i2c_adapter *i2c, u8 *type, u8 *id)
{
u8 probe[1] = { 0x00 }, data[4];
u8 addr = 0x10;
*type = NGENE_XO2_TYPE_NONE;
if (i2c_io(i2c, addr, probe, 1, data, 4))
return 0;
if (data[0] == 'D' && data[1] == 'F') {
*id = data[2];
*type = NGENE_XO2_TYPE_DUOFLEX;
return 1;
}
if (data[0] == 'C' && data[1] == 'I') {
*id = data[2];
*type = NGENE_XO2_TYPE_CI;
return 1;
}
return 0;
}
/****************************************************************************/
/* Probing and port/channel handling ****************************************/
/****************************************************************************/
static int cineS2_probe(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *i2c = i2c_adapter_from_chan(chan);
struct stv090x_config *fe_conf;
u8 buf[3];
u8 xo2_type, xo2_id, xo2_demodtype;
u8 sony_osc24 = 0;
struct i2c_msg i2c_msg = { .flags = 0, .buf = buf };
int rc;
if (port_has_xo2(i2c, &xo2_type, &xo2_id)) {
xo2_id >>= 2;
dev_dbg(pdev, "XO2 on channel %d (type %d, id %d)\n",
chan->number, xo2_type, xo2_id);
switch (xo2_type) {
case NGENE_XO2_TYPE_DUOFLEX:
if (chan->number & 1)
dev_dbg(pdev,
"skipping XO2 init on odd channel %d",
chan->number);
else
init_xo2(chan, i2c);
xo2_demodtype = DEMOD_TYPE_XO2 + xo2_id;
switch (xo2_demodtype) {
case DEMOD_TYPE_SONY_CT2:
case DEMOD_TYPE_SONY_ISDBT:
case DEMOD_TYPE_SONY_C2T2:
case DEMOD_TYPE_SONY_C2T2I:
dev_info(pdev, "%s (XO2) on channel %d\n",
xo2names[xo2_id], chan->number);
chan->demod_type = xo2_demodtype;
if (xo2_demodtype == DEMOD_TYPE_SONY_C2T2I)
sony_osc24 = 1;
demod_attach_cxd28xx(chan, i2c, sony_osc24);
break;
case DEMOD_TYPE_STV0910:
dev_info(pdev, "%s (XO2) on channel %d\n",
xo2names[xo2_id], chan->number);
chan->demod_type = xo2_demodtype;
demod_attach_stv0910(chan, i2c);
break;
default:
dev_warn(pdev,
"Unsupported XO2 module on channel %d\n",
chan->number);
return -ENODEV;
}
break;
case NGENE_XO2_TYPE_CI:
dev_info(pdev, "DuoFlex CI modules not supported\n");
return -ENODEV;
default:
dev_info(pdev, "Unsupported XO2 module type\n");
return -ENODEV;
}
} else if (port_has_stv0900(i2c, chan->number)) {
chan->demod_type = DEMOD_TYPE_STV090X;
fe_conf = chan->dev->card_info->fe_config[chan->number];
/* demod found, attach it */
rc = demod_attach_stv0900(chan);
if (rc < 0 || chan->number < 2)
return rc;
/* demod #2: reprogram outputs DPN1 & DPN2 */
i2c_msg.addr = fe_conf->address;
i2c_msg.len = 3;
buf[0] = 0xf1;
switch (chan->number) {
case 2:
buf[1] = 0x5c;
buf[2] = 0xc2;
break;
case 3:
buf[1] = 0x61;
buf[2] = 0xcc;
break;
default:
return -ENODEV;
}
rc = i2c_transfer(i2c, &i2c_msg, 1);
if (rc != 1) {
dev_err(pdev, "Could not setup DPNx\n");
return -EIO;
}
} else if (port_has_drxk(i2c, chan->number^2)) {
chan->demod_type = DEMOD_TYPE_DRXK;
demod_attach_drxk(chan, i2c);
} else if (port_has_stv0367(i2c)) {
chan->demod_type = DEMOD_TYPE_STV0367;
dev_info(pdev, "STV0367 on channel %d\n", chan->number);
demod_attach_stv0367(chan, i2c);
} else {
dev_info(pdev, "No demod found on chan %d\n", chan->number);
return -ENODEV;
}
return 0;
}
static struct lgdt330x_config aver_m780 = {
.demod_chip = LGDT3303,
.serial_mpeg = 0x00, /* PARALLEL */
.clock_polarity_flip = 1,
};
static struct mt2131_config m780_tunerconfig = {
0xc0 >> 1
};
/* A single func to attach the demo and tuner, rather than
* use two sep funcs like the current design mandates.
*/
static int demod_attach_lg330x(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
chan->fe = dvb_attach(lgdt330x_attach, &aver_m780,
0xb2 >> 1, &chan->i2c_adapter);
if (chan->fe == NULL) {
dev_err(pdev, "No LGDT330x found!\n");
return -ENODEV;
}
dvb_attach(mt2131_attach, chan->fe, &chan->i2c_adapter,
&m780_tunerconfig, 0);
return (chan->fe) ? 0 : -ENODEV;
}
static int demod_attach_drxd(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct drxd_config *feconf;
feconf = chan->dev->card_info->fe_config[chan->number];
chan->fe = dvb_attach(drxd_attach, feconf, chan,
&chan->i2c_adapter, &chan->dev->pci_dev->dev);
if (!chan->fe) {
dev_err(pdev, "No DRXD found!\n");
return -ENODEV;
}
return 0;
}
static int tuner_attach_dtt7520x(struct ngene_channel *chan)
{
struct device *pdev = &chan->dev->pci_dev->dev;
struct drxd_config *feconf;
feconf = chan->dev->card_info->fe_config[chan->number];
if (!dvb_attach(dvb_pll_attach, chan->fe, feconf->pll_address,
&chan->i2c_adapter,
feconf->pll_type)) {
dev_err(pdev, "No pll(%d) found!\n", feconf->pll_type);
return -ENODEV;
}
return 0;
}
/****************************************************************************/
/* EEPROM TAGS **************************************************************/
/****************************************************************************/
#define MICNG_EE_START 0x0100
#define MICNG_EE_END 0x0FF0
#define MICNG_EETAG_END0 0x0000
#define MICNG_EETAG_END1 0xFFFF
/* 0x0001 - 0x000F reserved for housekeeping */
/* 0xFFFF - 0xFFFE reserved for housekeeping */
/* Micronas assigned tags
EEProm tags for hardware support */
#define MICNG_EETAG_DRXD1_OSCDEVIATION 0x1000 /* 2 Bytes data */
#define MICNG_EETAG_DRXD2_OSCDEVIATION 0x1001 /* 2 Bytes data */
#define MICNG_EETAG_MT2060_1_1STIF 0x1100 /* 2 Bytes data */
#define MICNG_EETAG_MT2060_2_1STIF 0x1101 /* 2 Bytes data */
/* Tag range for OEMs */
#define MICNG_EETAG_OEM_FIRST 0xC000
#define MICNG_EETAG_OEM_LAST 0xFFEF
static int i2c_write_eeprom(struct i2c_adapter *adapter,
u8 adr, u16 reg, u8 data)
{
struct device *pdev = adapter->dev.parent;
u8 m[3] = {(reg >> 8), (reg & 0xff), data};
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = m,
.len = sizeof(m)};
if (i2c_transfer(adapter, &msg, 1) != 1) {
dev_err(pdev, "Error writing EEPROM!\n");
return -EIO;
}
return 0;
}
static int i2c_read_eeprom(struct i2c_adapter *adapter,
u8 adr, u16 reg, u8 *data, int len)
{
struct device *pdev = adapter->dev.parent;
u8 msg[2] = {(reg >> 8), (reg & 0xff)};
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = msg, .len = 2 },
{.addr = adr, .flags = I2C_M_RD,
.buf = data, .len = len} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
dev_err(pdev, "Error reading EEPROM\n");
return -EIO;
}
return 0;
}
static int ReadEEProm(struct i2c_adapter *adapter,
u16 Tag, u32 MaxLen, u8 *data, u32 *pLength)
{
struct device *pdev = adapter->dev.parent;
int status = 0;
u16 Addr = MICNG_EE_START, Length, tag = 0;
u8 EETag[3];
while (Addr + sizeof(u16) + 1 < MICNG_EE_END) {
if (i2c_read_eeprom(adapter, 0x50, Addr, EETag, sizeof(EETag)))
return -1;
tag = (EETag[0] << 8) | EETag[1];
if (tag == MICNG_EETAG_END0 || tag == MICNG_EETAG_END1)
return -1;
if (tag == Tag)
break;
Addr += sizeof(u16) + 1 + EETag[2];
}
if (Addr + sizeof(u16) + 1 + EETag[2] > MICNG_EE_END) {
dev_err(pdev, "Reached EOEE @ Tag = %04x Length = %3d\n",
tag, EETag[2]);
return -1;
}
Length = EETag[2];
if (Length > MaxLen)
Length = (u16) MaxLen;
if (Length > 0) {
Addr += sizeof(u16) + 1;
status = i2c_read_eeprom(adapter, 0x50, Addr, data, Length);
if (!status) {
*pLength = EETag[2];
#if 0
if (Length < EETag[2])
status = STATUS_BUFFER_OVERFLOW;
#endif
}
}
return status;
}
static int WriteEEProm(struct i2c_adapter *adapter,
u16 Tag, u32 Length, u8 *data)
{
struct device *pdev = adapter->dev.parent;
int status = 0;
u16 Addr = MICNG_EE_START;
u8 EETag[3];
u16 tag = 0;
int retry, i;
while (Addr + sizeof(u16) + 1 < MICNG_EE_END) {
if (i2c_read_eeprom(adapter, 0x50, Addr, EETag, sizeof(EETag)))
return -1;
tag = (EETag[0] << 8) | EETag[1];
if (tag == MICNG_EETAG_END0 || tag == MICNG_EETAG_END1)
return -1;
if (tag == Tag)
break;
Addr += sizeof(u16) + 1 + EETag[2];
}
if (Addr + sizeof(u16) + 1 + EETag[2] > MICNG_EE_END) {
dev_err(pdev, "Reached EOEE @ Tag = %04x Length = %3d\n",
tag, EETag[2]);
return -1;
}
if (Length > EETag[2])
return -EINVAL;
/* Note: We write the data one byte at a time to avoid
issues with page sizes. (which are different for
each manufacture and eeprom size)
*/
Addr += sizeof(u16) + 1;
for (i = 0; i < Length; i++, Addr++) {
status = i2c_write_eeprom(adapter, 0x50, Addr, data[i]);
if (status)
break;
/* Poll for finishing write cycle */
retry = 10;
while (retry) {
u8 Tmp;
msleep(50);
status = i2c_read_eeprom(adapter, 0x50, Addr, &Tmp, 1);
if (status)
break;
if (Tmp != data[i])
dev_err(pdev, "eeprom write error\n");
retry -= 1;
}
if (status) {
dev_err(pdev, "Timeout polling eeprom\n");
break;
}
}
return status;
}
static int eeprom_read_ushort(struct i2c_adapter *adapter, u16 tag, u16 *data)
{
int stat;
u8 buf[2];
u32 len = 0;
stat = ReadEEProm(adapter, tag, 2, buf, &len);
if (stat)
return stat;
if (len != 2)
return -EINVAL;
*data = (buf[0] << 8) | buf[1];
return 0;
}
static int eeprom_write_ushort(struct i2c_adapter *adapter, u16 tag, u16 data)
{
u8 buf[2];
buf[0] = data >> 8;
buf[1] = data & 0xff;
return WriteEEProm(adapter, tag, 2, buf);
}
static s16 osc_deviation(void *priv, s16 deviation, int flag)
{
struct ngene_channel *chan = priv;
struct device *pdev = &chan->dev->pci_dev->dev;
struct i2c_adapter *adap = &chan->i2c_adapter;
u16 data = 0;
if (flag) {
data = (u16) deviation;
dev_info(pdev, "write deviation %d\n",
deviation);
eeprom_write_ushort(adap, 0x1000 + chan->number, data);
} else {
if (eeprom_read_ushort(adap, 0x1000 + chan->number, &data))
data = 0;
dev_info(pdev, "read deviation %d\n",
(s16)data);
}
return (s16) data;
}
/****************************************************************************/
/* Switch control (I2C gates, etc.) *****************************************/
/****************************************************************************/
static struct stv090x_config fe_cineS2 = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
.clk_mode = STV090x_CLK_EXT,
.xtal = 27000000,
.address = 0x68,
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
.tuner_i2c_lock = cineS2_tuner_i2c_lock,
};
static struct stv090x_config fe_cineS2_2 = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
.clk_mode = STV090x_CLK_EXT,
.xtal = 27000000,
.address = 0x69,
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
.tuner_i2c_lock = cineS2_tuner_i2c_lock,
};
static struct stv6110x_config tuner_cineS2_0 = {
.addr = 0x60,
.refclk = 27000000,
.clk_div = 1,
};
static struct stv6110x_config tuner_cineS2_1 = {
.addr = 0x63,
.refclk = 27000000,
.clk_div = 1,
};
static const struct ngene_info ngene_info_cineS2 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
.fe_config = {&fe_cineS2, &fe_cineS2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0b, 0x08},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static const struct ngene_info ngene_info_satixS2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
.fe_config = {&fe_cineS2, &fe_cineS2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0b, 0x08},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static const struct ngene_info ngene_info_satixS2v2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual (v2)",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static const struct ngene_info ngene_info_cineS2v5 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static const struct ngene_info ngene_info_duoFlex = {
.type = NGENE_SIDEWINDER,
.name = "Digital Devices DuoFlex PCIe or miniPCIe",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe},
.tuner_attach = {tuner_attach_probe, tuner_attach_probe, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static const struct ngene_info ngene_info_m780 = {
.type = NGENE_APP,
.name = "Aver M780 ATSC/QAM-B",
/* Channel 0 is analog, which is currently unsupported */
.io_type = { NGENE_IO_NONE, NGENE_IO_TSIN },
.demod_attach = { NULL, demod_attach_lg330x },
/* Ensure these are NULL else the frame will call them (as funcs) */
.tuner_attach = { NULL, NULL, NULL, NULL },
.fe_config = { NULL, &aver_m780 },
.avf = { 0 },
/* A custom electrical interface config for the demod to bridge */
.tsf = { 4, 4 },
.fw_version = 15,
};
static struct drxd_config fe_terratec_dvbt_0 = {
.index = 0,
.demod_address = 0x70,
.demod_revision = 0xa2,
.demoda_address = 0x00,
.pll_address = 0x60,
.pll_type = DVB_PLL_THOMSON_DTT7520X,
.clock = 20000,
.osc_deviation = osc_deviation,
};
static struct drxd_config fe_terratec_dvbt_1 = {
.index = 1,
.demod_address = 0x71,
.demod_revision = 0xa2,
.demoda_address = 0x00,
.pll_address = 0x60,
.pll_type = DVB_PLL_THOMSON_DTT7520X,
.clock = 20000,
.osc_deviation = osc_deviation,
};
static const struct ngene_info ngene_info_terratec = {
.type = NGENE_TERRATEC,
.name = "Terratec Integra/Cinergy2400i Dual DVB-T",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_drxd, demod_attach_drxd},
.tuner_attach = {tuner_attach_dtt7520x, tuner_attach_dtt7520x},
.fe_config = {&fe_terratec_dvbt_0, &fe_terratec_dvbt_1},
.i2c_access = 1,
};
/****************************************************************************/
/****************************************************************************/
/* PCI Subsystem ID *********************************************************/
/****************************************************************************/
#define NGENE_ID(_subvend, _subdev, _driverdata) { \
.vendor = NGENE_VID, .device = NGENE_PID, \
.subvendor = _subvend, .subdevice = _subdev, \
.driver_data = (unsigned long) &_driverdata }
/****************************************************************************/
static const struct pci_device_id ngene_id_tbl[] = {
NGENE_ID(0x18c3, 0xab04, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xab05, ngene_info_cineS2v5),
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlex),
NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlex),
NGENE_ID(0x1461, 0x062e, ngene_info_m780),
NGENE_ID(0x153b, 0x1167, ngene_info_terratec),
{0}
};
MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
/****************************************************************************/
/* Init/Exit ****************************************************************/
/****************************************************************************/
static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
pci_channel_state_t state)
{
dev_err(&dev->dev, "PCI error\n");
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (state == pci_channel_io_frozen)
return PCI_ERS_RESULT_NEED_RESET;
return PCI_ERS_RESULT_CAN_RECOVER;
}
static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
{
dev_info(&dev->dev, "slot reset\n");
return 0;
}
static void ngene_resume(struct pci_dev *dev)
{
dev_info(&dev->dev, "resume\n");
}
static const struct pci_error_handlers ngene_errors = {
.error_detected = ngene_error_detected,
.slot_reset = ngene_slot_reset,
.resume = ngene_resume,
};
static struct pci_driver ngene_pci_driver = {
.name = "ngene",
.id_table = ngene_id_tbl,
.probe = ngene_probe,
.remove = ngene_remove,
.err_handler = &ngene_errors,
.shutdown = ngene_shutdown,
};
static __init int module_init_ngene(void)
{
/* pr_*() since we don't have a device to use with dev_*() yet */
pr_info("nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n");
return pci_register_driver(&ngene_pci_driver);
}
static __exit void module_exit_ngene(void)
{
pci_unregister_driver(&ngene_pci_driver);
}
module_init(module_init_ngene);
module_exit(module_exit_ngene);
MODULE_DESCRIPTION("nGene");
MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/ngene/ngene-cards.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ngene-dvb.c: nGene PCIe bridge driver - DVB functions
*
* Copyright (C) 2005-2007 Micronas
*
* Copyright (C) 2008-2009 Ralph Metzler <[email protected]>
* Modifications for new nGene firmware,
* support for EEPROM-copying,
* support for new dual DVB-S2 card prototype
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
#include <linux/timer.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "ngene.h"
static int ci_tsfix = 1;
module_param(ci_tsfix, int, 0444);
MODULE_PARM_DESC(ci_tsfix, "Detect and fix TS buffer offset shifts in conjunction with CI expansions (default: 1/enabled)");
/****************************************************************************/
/* COMMAND API interface ****************************************************/
/****************************************************************************/
static ssize_t ts_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct ngene_channel *chan = dvbdev->priv;
struct ngene *dev = chan->dev;
if (wait_event_interruptible(dev->tsout_rbuf.queue,
dvb_ringbuffer_free
(&dev->tsout_rbuf) >= count) < 0)
return 0;
dvb_ringbuffer_write_user(&dev->tsout_rbuf, buf, count);
return count;
}
static ssize_t ts_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct ngene_channel *chan = dvbdev->priv;
struct ngene *dev = chan->dev;
int left, avail;
left = count;
while (left) {
if (wait_event_interruptible(
dev->tsin_rbuf.queue,
dvb_ringbuffer_avail(&dev->tsin_rbuf) > 0) < 0)
return -EAGAIN;
avail = dvb_ringbuffer_avail(&dev->tsin_rbuf);
if (avail > left)
avail = left;
dvb_ringbuffer_read_user(&dev->tsin_rbuf, buf, avail);
left -= avail;
buf += avail;
}
return count;
}
static __poll_t ts_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct ngene_channel *chan = dvbdev->priv;
struct ngene *dev = chan->dev;
struct dvb_ringbuffer *rbuf = &dev->tsin_rbuf;
struct dvb_ringbuffer *wbuf = &dev->tsout_rbuf;
__poll_t mask = 0;
poll_wait(file, &rbuf->queue, wait);
poll_wait(file, &wbuf->queue, wait);
if (!dvb_ringbuffer_empty(rbuf))
mask |= EPOLLIN | EPOLLRDNORM;
if (dvb_ringbuffer_free(wbuf) >= 188)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
static const struct file_operations ci_fops = {
.owner = THIS_MODULE,
.read = ts_read,
.write = ts_write,
.open = dvb_generic_open,
.release = dvb_generic_release,
.poll = ts_poll,
.mmap = NULL,
};
struct dvb_device ngene_dvbdev_ci = {
.priv = NULL,
.readers = 1,
.writers = 1,
.users = 2,
.fops = &ci_fops,
};
/****************************************************************************/
/* DVB functions and API interface ******************************************/
/****************************************************************************/
static void swap_buffer(u32 *p, u32 len)
{
while (len) {
*p = swab32(*p);
p++;
len -= 4;
}
}
/* start of filler packet */
static u8 fill_ts[] = { 0x47, 0x1f, 0xff, 0x10, TS_FILLER };
static int tsin_find_offset(void *buf, u32 len)
{
int i, l;
l = len - sizeof(fill_ts);
if (l <= 0)
return -1;
for (i = 0; i < l; i++) {
if (((char *)buf)[i] == 0x47) {
if (!memcmp(buf + i, fill_ts, sizeof(fill_ts)))
return i % 188;
}
}
return -1;
}
static inline void tsin_copy_stripped(struct ngene *dev, void *buf)
{
if (memcmp(buf, fill_ts, sizeof(fill_ts)) != 0) {
if (dvb_ringbuffer_free(&dev->tsin_rbuf) >= 188) {
dvb_ringbuffer_write(&dev->tsin_rbuf, buf, 188);
wake_up(&dev->tsin_rbuf.queue);
}
}
}
void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
struct ngene *dev = chan->dev;
int tsoff;
if (flags & DF_SWAP32)
swap_buffer(buf, len);
if (dev->ci.en && chan->number == 2) {
/* blindly copy buffers if ci_tsfix is disabled */
if (!ci_tsfix) {
while (len >= 188) {
tsin_copy_stripped(dev, buf);
buf += 188;
len -= 188;
}
return NULL;
}
/* ci_tsfix = 1 */
/*
* since the remainder of the TS packet which got cut off
* in the previous tsin_exchange() run is at the beginning
* of the new TS buffer, append this to the temp buffer and
* send it to the DVB ringbuffer afterwards.
*/
if (chan->tsin_offset) {
memcpy(&chan->tsin_buffer[(188 - chan->tsin_offset)],
buf, chan->tsin_offset);
tsin_copy_stripped(dev, &chan->tsin_buffer);
buf += chan->tsin_offset;
len -= chan->tsin_offset;
}
/*
* copy TS packets to the DVB ringbuffer and detect new offset
* shifts by checking for a valid TS SYNC byte
*/
while (len >= 188) {
if (*((char *)buf) != 0x47) {
/*
* no SYNC header, find new offset shift
* (max. 188 bytes, tsoff will be mod 188)
*/
tsoff = tsin_find_offset(buf, len);
if (tsoff > 0) {
chan->tsin_offset += tsoff;
chan->tsin_offset %= 188;
buf += tsoff;
len -= tsoff;
dev_info(&dev->pci_dev->dev,
"%s(): tsin_offset shift by %d on channel %d\n",
__func__, tsoff,
chan->number);
/*
* offset corrected. re-check remaining
* len for a full TS frame, break and
* skip to fragment handling if < 188.
*/
if (len < 188)
break;
}
}
tsin_copy_stripped(dev, buf);
buf += 188;
len -= 188;
}
/*
* if a fragment is left, copy to temp buffer. The remainder
* will be appended in the next tsin_exchange() iteration.
*/
if (len > 0 && len < 188)
memcpy(&chan->tsin_buffer, buf, len);
return NULL;
}
if (chan->users > 0)
dvb_dmx_swfilter(&chan->demux, buf, len);
return NULL;
}
void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
struct ngene *dev = chan->dev;
u32 alen;
alen = dvb_ringbuffer_avail(&dev->tsout_rbuf);
alen -= alen % 188;
if (alen < len)
FillTSBuffer(buf + alen, len - alen, flags);
else
alen = len;
dvb_ringbuffer_read(&dev->tsout_rbuf, buf, alen);
if (flags & DF_SWAP32)
swap_buffer((u32 *)buf, alen);
wake_up_interruptible(&dev->tsout_rbuf.queue);
return buf;
}
int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct ngene_channel *chan = dvbdmx->priv;
if (chan->users == 0) {
if (!chan->dev->cmd_timeout_workaround || !chan->running)
set_transfer(chan, 1);
}
return ++chan->users;
}
int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct ngene_channel *chan = dvbdmx->priv;
if (--chan->users)
return chan->users;
if (!chan->dev->cmd_timeout_workaround)
set_transfer(chan, 0);
return 0;
}
int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
int (*start_feed)(struct dvb_demux_feed *),
int (*stop_feed)(struct dvb_demux_feed *),
void *priv)
{
dvbdemux->priv = priv;
dvbdemux->filternum = 256;
dvbdemux->feednum = 256;
dvbdemux->start_feed = start_feed;
dvbdemux->stop_feed = stop_feed;
dvbdemux->write_to_decoder = NULL;
dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING);
return dvb_dmx_init(dvbdemux);
}
int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
struct dvb_demux *dvbdemux,
struct dmx_frontend *hw_frontend,
struct dmx_frontend *mem_frontend,
struct dvb_adapter *dvb_adapter)
{
int ret;
dmxdev->filternum = 256;
dmxdev->demux = &dvbdemux->dmx;
dmxdev->capabilities = 0;
ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
if (ret < 0)
return ret;
hw_frontend->source = DMX_FRONTEND_0;
dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
mem_frontend->source = DMX_MEMORY_FE;
dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
}
| linux-master | drivers/media/pci/ngene/ngene-dvb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ngene.c: nGene PCIe bridge driver
*
* Copyright (C) 2005-2007 Micronas
*
* Copyright (C) 2008-2009 Ralph Metzler <[email protected]>
* Modifications for new nGene firmware,
* support for EEPROM-copying,
* support for new dual DVB-S2 card prototype
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/poll.h>
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
#include <linux/timer.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "ngene.h"
static int one_adapter;
module_param(one_adapter, int, 0444);
MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
static int shutdown_workaround;
module_param(shutdown_workaround, int, 0644);
MODULE_PARM_DESC(shutdown_workaround, "Activate workaround for shutdown problem with some chipsets.");
static int debug;
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "Print debugging information.");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define ngwriteb(dat, adr) writeb((dat), dev->iomem + (adr))
#define ngwritel(dat, adr) writel((dat), dev->iomem + (adr))
#define ngwriteb(dat, adr) writeb((dat), dev->iomem + (adr))
#define ngreadl(adr) readl(dev->iomem + (adr))
#define ngreadb(adr) readb(dev->iomem + (adr))
#define ngcpyto(adr, src, count) memcpy_toio(dev->iomem + (adr), (src), (count))
#define ngcpyfrom(dst, adr, count) memcpy_fromio((dst), dev->iomem + (adr), (count))
/****************************************************************************/
/* nGene interrupt handler **************************************************/
/****************************************************************************/
static void event_tasklet(struct tasklet_struct *t)
{
struct ngene *dev = from_tasklet(dev, t, event_tasklet);
while (dev->EventQueueReadIndex != dev->EventQueueWriteIndex) {
struct EVENT_BUFFER Event =
dev->EventQueue[dev->EventQueueReadIndex];
dev->EventQueueReadIndex =
(dev->EventQueueReadIndex + 1) & (EVENT_QUEUE_SIZE - 1);
if ((Event.UARTStatus & 0x01) && (dev->TxEventNotify))
dev->TxEventNotify(dev, Event.TimeStamp);
if ((Event.UARTStatus & 0x02) && (dev->RxEventNotify))
dev->RxEventNotify(dev, Event.TimeStamp,
Event.RXCharacter);
}
}
static void demux_tasklet(struct tasklet_struct *t)
{
struct ngene_channel *chan = from_tasklet(chan, t, demux_tasklet);
struct device *pdev = &chan->dev->pci_dev->dev;
struct SBufferHeader *Cur = chan->nextBuffer;
spin_lock_irq(&chan->state_lock);
while (Cur->ngeneBuffer.SR.Flags & 0x80) {
if (chan->mode & NGENE_IO_TSOUT) {
u32 Flags = chan->DataFormatFlags;
if (Cur->ngeneBuffer.SR.Flags & 0x20)
Flags |= BEF_OVERFLOW;
if (chan->pBufferExchange) {
if (!chan->pBufferExchange(chan,
Cur->Buffer1,
chan->Capture1Length,
Cur->ngeneBuffer.SR.
Clock, Flags)) {
/*
We didn't get data
Clear in service flag to make sure we
get called on next interrupt again.
leave fill/empty (0x80) flag alone
to avoid hardware running out of
buffers during startup, we hold only
in run state ( the source may be late
delivering data )
*/
if (chan->HWState == HWSTATE_RUN) {
Cur->ngeneBuffer.SR.Flags &=
~0x40;
break;
/* Stop processing stream */
}
} else {
/* We got a valid buffer,
so switch to run state */
chan->HWState = HWSTATE_RUN;
}
} else {
dev_err(pdev, "OOPS\n");
if (chan->HWState == HWSTATE_RUN) {
Cur->ngeneBuffer.SR.Flags &= ~0x40;
break; /* Stop processing stream */
}
}
if (chan->AudioDTOUpdated) {
dev_info(pdev, "Update AudioDTO = %d\n",
chan->AudioDTOValue);
Cur->ngeneBuffer.SR.DTOUpdate =
chan->AudioDTOValue;
chan->AudioDTOUpdated = 0;
}
} else {
if (chan->HWState == HWSTATE_RUN) {
u32 Flags = chan->DataFormatFlags;
IBufferExchange *exch1 = chan->pBufferExchange;
IBufferExchange *exch2 = chan->pBufferExchange2;
if (Cur->ngeneBuffer.SR.Flags & 0x01)
Flags |= BEF_EVEN_FIELD;
if (Cur->ngeneBuffer.SR.Flags & 0x20)
Flags |= BEF_OVERFLOW;
spin_unlock_irq(&chan->state_lock);
if (exch1)
exch1(chan, Cur->Buffer1,
chan->Capture1Length,
Cur->ngeneBuffer.SR.Clock,
Flags);
if (exch2)
exch2(chan, Cur->Buffer2,
chan->Capture2Length,
Cur->ngeneBuffer.SR.Clock,
Flags);
spin_lock_irq(&chan->state_lock);
} else if (chan->HWState != HWSTATE_STOP)
chan->HWState = HWSTATE_RUN;
}
Cur->ngeneBuffer.SR.Flags = 0x00;
Cur = Cur->Next;
}
chan->nextBuffer = Cur;
spin_unlock_irq(&chan->state_lock);
}
static irqreturn_t irq_handler(int irq, void *dev_id)
{
struct ngene *dev = (struct ngene *)dev_id;
struct device *pdev = &dev->pci_dev->dev;
u32 icounts = 0;
irqreturn_t rc = IRQ_NONE;
u32 i = MAX_STREAM;
u8 *tmpCmdDoneByte;
if (dev->BootFirmware) {
icounts = ngreadl(NGENE_INT_COUNTS);
if (icounts != dev->icounts) {
ngwritel(0, FORCE_NMI);
dev->cmd_done = 1;
wake_up(&dev->cmd_wq);
dev->icounts = icounts;
rc = IRQ_HANDLED;
}
return rc;
}
ngwritel(0, FORCE_NMI);
spin_lock(&dev->cmd_lock);
tmpCmdDoneByte = dev->CmdDoneByte;
if (tmpCmdDoneByte &&
(*tmpCmdDoneByte ||
(dev->ngenetohost[0] == 1 && dev->ngenetohost[1] != 0))) {
dev->CmdDoneByte = NULL;
dev->cmd_done = 1;
wake_up(&dev->cmd_wq);
rc = IRQ_HANDLED;
}
spin_unlock(&dev->cmd_lock);
if (dev->EventBuffer->EventStatus & 0x80) {
u8 nextWriteIndex =
(dev->EventQueueWriteIndex + 1) &
(EVENT_QUEUE_SIZE - 1);
if (nextWriteIndex != dev->EventQueueReadIndex) {
dev->EventQueue[dev->EventQueueWriteIndex] =
*(dev->EventBuffer);
dev->EventQueueWriteIndex = nextWriteIndex;
} else {
dev_err(pdev, "event overflow\n");
dev->EventQueueOverflowCount += 1;
dev->EventQueueOverflowFlag = 1;
}
dev->EventBuffer->EventStatus &= ~0x80;
tasklet_schedule(&dev->event_tasklet);
rc = IRQ_HANDLED;
}
while (i > 0) {
i--;
spin_lock(&dev->channel[i].state_lock);
/* if (dev->channel[i].State>=KSSTATE_RUN) { */
if (dev->channel[i].nextBuffer) {
if ((dev->channel[i].nextBuffer->
ngeneBuffer.SR.Flags & 0xC0) == 0x80) {
dev->channel[i].nextBuffer->
ngeneBuffer.SR.Flags |= 0x40;
tasklet_schedule(
&dev->channel[i].demux_tasklet);
rc = IRQ_HANDLED;
}
}
spin_unlock(&dev->channel[i].state_lock);
}
/* Request might have been processed by a previous call. */
return IRQ_HANDLED;
}
/****************************************************************************/
/* nGene command interface **************************************************/
/****************************************************************************/
static void dump_command_io(struct ngene *dev)
{
struct device *pdev = &dev->pci_dev->dev;
u8 buf[8], *b;
ngcpyfrom(buf, HOST_TO_NGENE, 8);
dev_err(pdev, "host_to_ngene (%04x): %*ph\n", HOST_TO_NGENE, 8, buf);
ngcpyfrom(buf, NGENE_TO_HOST, 8);
dev_err(pdev, "ngene_to_host (%04x): %*ph\n", NGENE_TO_HOST, 8, buf);
b = dev->hosttongene;
dev_err(pdev, "dev->hosttongene (%p): %*ph\n", b, 8, b);
b = dev->ngenetohost;
dev_err(pdev, "dev->ngenetohost (%p): %*ph\n", b, 8, b);
}
static int ngene_command_mutex(struct ngene *dev, struct ngene_command *com)
{
struct device *pdev = &dev->pci_dev->dev;
int ret;
u8 *tmpCmdDoneByte;
dev->cmd_done = 0;
if (com->cmd.hdr.Opcode == CMD_FWLOAD_PREPARE) {
dev->BootFirmware = 1;
dev->icounts = ngreadl(NGENE_INT_COUNTS);
ngwritel(0, NGENE_COMMAND);
ngwritel(0, NGENE_COMMAND_HI);
ngwritel(0, NGENE_STATUS);
ngwritel(0, NGENE_STATUS_HI);
ngwritel(0, NGENE_EVENT);
ngwritel(0, NGENE_EVENT_HI);
} else if (com->cmd.hdr.Opcode == CMD_FWLOAD_FINISH) {
u64 fwio = dev->PAFWInterfaceBuffer;
ngwritel(fwio & 0xffffffff, NGENE_COMMAND);
ngwritel(fwio >> 32, NGENE_COMMAND_HI);
ngwritel((fwio + 256) & 0xffffffff, NGENE_STATUS);
ngwritel((fwio + 256) >> 32, NGENE_STATUS_HI);
ngwritel((fwio + 512) & 0xffffffff, NGENE_EVENT);
ngwritel((fwio + 512) >> 32, NGENE_EVENT_HI);
}
memcpy(dev->FWInterfaceBuffer, com->cmd.raw8, com->in_len + 2);
if (dev->BootFirmware)
ngcpyto(HOST_TO_NGENE, com->cmd.raw8, com->in_len + 2);
spin_lock_irq(&dev->cmd_lock);
tmpCmdDoneByte = dev->ngenetohost + com->out_len;
if (!com->out_len)
tmpCmdDoneByte++;
*tmpCmdDoneByte = 0;
dev->ngenetohost[0] = 0;
dev->ngenetohost[1] = 0;
dev->CmdDoneByte = tmpCmdDoneByte;
spin_unlock_irq(&dev->cmd_lock);
/* Notify 8051. */
ngwritel(1, FORCE_INT);
ret = wait_event_timeout(dev->cmd_wq, dev->cmd_done == 1, 2 * HZ);
if (!ret) {
/*ngwritel(0, FORCE_NMI);*/
dev_err(pdev, "Command timeout cmd=%02x prev=%02x\n",
com->cmd.hdr.Opcode, dev->prev_cmd);
dump_command_io(dev);
return -1;
}
if (com->cmd.hdr.Opcode == CMD_FWLOAD_FINISH)
dev->BootFirmware = 0;
dev->prev_cmd = com->cmd.hdr.Opcode;
if (!com->out_len)
return 0;
memcpy(com->cmd.raw8, dev->ngenetohost, com->out_len);
return 0;
}
int ngene_command(struct ngene *dev, struct ngene_command *com)
{
int result;
mutex_lock(&dev->cmd_mutex);
result = ngene_command_mutex(dev, com);
mutex_unlock(&dev->cmd_mutex);
return result;
}
static int ngene_command_load_firmware(struct ngene *dev,
u8 *ngene_fw, u32 size)
{
#define FIRSTCHUNK (1024)
u32 cleft;
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_FWLOAD_PREPARE;
com.cmd.hdr.Length = 0;
com.in_len = 0;
com.out_len = 0;
ngene_command(dev, &com);
cleft = (size + 3) & ~3;
if (cleft > FIRSTCHUNK) {
ngcpyto(PROGRAM_SRAM + FIRSTCHUNK, ngene_fw + FIRSTCHUNK,
cleft - FIRSTCHUNK);
cleft = FIRSTCHUNK;
}
ngcpyto(DATA_FIFO_AREA, ngene_fw, cleft);
memset(&com, 0, sizeof(struct ngene_command));
com.cmd.hdr.Opcode = CMD_FWLOAD_FINISH;
com.cmd.hdr.Length = 4;
com.cmd.FWLoadFinish.Address = DATA_FIFO_AREA;
com.cmd.FWLoadFinish.Length = (unsigned short)cleft;
com.in_len = 4;
com.out_len = 0;
return ngene_command(dev, &com);
}
static int ngene_command_config_buf(struct ngene *dev, u8 config)
{
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_CONFIGURE_BUFFER;
com.cmd.hdr.Length = 1;
com.cmd.ConfigureBuffers.config = config;
com.in_len = 1;
com.out_len = 0;
if (ngene_command(dev, &com) < 0)
return -EIO;
return 0;
}
static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
{
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
com.cmd.hdr.Length = 6;
memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
com.in_len = 6;
com.out_len = 0;
if (ngene_command(dev, &com) < 0)
return -EIO;
return 0;
}
int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
{
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_SET_GPIO_PIN;
com.cmd.hdr.Length = 1;
com.cmd.SetGpioPin.select = select | (level << 7);
com.in_len = 1;
com.out_len = 0;
return ngene_command(dev, &com);
}
/*
02000640 is sample on rising edge.
02000740 is sample on falling edge.
02000040 is ignore "valid" signal
0: FD_CTL1 Bit 7,6 must be 0,1
7 disable(fw controlled)
6 0-AUX,1-TS
5 0-par,1-ser
4 0-lsb/1-msb
3,2 reserved
1,0 0-no sync, 1-use ext. start, 2-use 0x47, 3-both
1: FD_CTL2 has 3-valid must be hi, 2-use valid, 1-edge
2: FD_STA is read-only. 0-sync
3: FD_INSYNC is number of 47s to trigger "in sync".
4: FD_OUTSYNC is number of 47s to trigger "out of sync".
5: FD_MAXBYTE1 is low-order of bytes per packet.
6: FD_MAXBYTE2 is high-order of bytes per packet.
7: Top byte is unused.
*/
/****************************************************************************/
static u8 TSFeatureDecoderSetup[8 * 5] = {
0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00,
0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */
0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */
0x72, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */
};
/* Set NGENE I2S Config to 16 bit packed */
static u8 I2SConfiguration[] = {
0x00, 0x10, 0x00, 0x00,
0x80, 0x10, 0x00, 0x00,
};
static u8 SPDIFConfiguration[10] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* Set NGENE I2S Config to transport stream compatible mode */
static u8 TS_I2SConfiguration[4] = { 0x3E, 0x18, 0x00, 0x00 };
static u8 TS_I2SOutConfiguration[4] = { 0x80, 0x04, 0x00, 0x00 };
static u8 ITUDecoderSetup[4][16] = {
{0x1c, 0x13, 0x01, 0x68, 0x3d, 0x90, 0x14, 0x20, /* SDTV */
0x00, 0x00, 0x01, 0xb0, 0x9c, 0x00, 0x00, 0x00},
{0x9c, 0x03, 0x23, 0xC0, 0x60, 0x0E, 0x13, 0x00,
0x00, 0x00, 0x00, 0x01, 0xB0, 0x00, 0x00, 0x00},
{0x9f, 0x00, 0x23, 0xC0, 0x60, 0x0F, 0x13, 0x00, /* HDTV 1080i50 */
0x00, 0x00, 0x00, 0x01, 0xB0, 0x00, 0x00, 0x00},
{0x9c, 0x01, 0x23, 0xC0, 0x60, 0x0E, 0x13, 0x00, /* HDTV 1080i60 */
0x00, 0x00, 0x00, 0x01, 0xB0, 0x00, 0x00, 0x00},
};
/*
* 50 48 60 gleich
* 27p50 9f 00 22 80 42 69 18 ...
* 27p60 93 00 22 80 82 69 1c ...
*/
/* Maxbyte to 1144 (for raw data) */
static u8 ITUFeatureDecoderSetup[8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x04, 0x00
};
void FillTSBuffer(void *Buffer, int Length, u32 Flags)
{
u32 *ptr = Buffer;
memset(Buffer, TS_FILLER, Length);
while (Length > 0) {
if (Flags & DF_SWAP32)
*ptr = 0x471FFF10;
else
*ptr = 0x10FF1F47;
ptr += (188 / 4);
Length -= 188;
}
}
static void flush_buffers(struct ngene_channel *chan)
{
u8 val;
do {
msleep(1);
spin_lock_irq(&chan->state_lock);
val = chan->nextBuffer->ngeneBuffer.SR.Flags & 0x80;
spin_unlock_irq(&chan->state_lock);
} while (val);
}
static void clear_buffers(struct ngene_channel *chan)
{
struct SBufferHeader *Cur = chan->nextBuffer;
do {
memset(&Cur->ngeneBuffer.SR, 0, sizeof(Cur->ngeneBuffer.SR));
if (chan->mode & NGENE_IO_TSOUT)
FillTSBuffer(Cur->Buffer1,
chan->Capture1Length,
chan->DataFormatFlags);
Cur = Cur->Next;
} while (Cur != chan->nextBuffer);
if (chan->mode & NGENE_IO_TSOUT) {
chan->nextBuffer->ngeneBuffer.SR.DTOUpdate =
chan->AudioDTOValue;
chan->AudioDTOUpdated = 0;
Cur = chan->TSIdleBuffer.Head;
do {
memset(&Cur->ngeneBuffer.SR, 0,
sizeof(Cur->ngeneBuffer.SR));
FillTSBuffer(Cur->Buffer1,
chan->Capture1Length,
chan->DataFormatFlags);
Cur = Cur->Next;
} while (Cur != chan->TSIdleBuffer.Head);
}
}
static int ngene_command_stream_control(struct ngene *dev, u8 stream,
u8 control, u8 mode, u8 flags)
{
struct device *pdev = &dev->pci_dev->dev;
struct ngene_channel *chan = &dev->channel[stream];
struct ngene_command com;
u16 BsUVI = ((stream & 1) ? 0x9400 : 0x9300);
u16 BsSDI = ((stream & 1) ? 0x9600 : 0x9500);
u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700);
u16 BsSDO = 0x9B00;
memset(&com, 0, sizeof(com));
com.cmd.hdr.Opcode = CMD_CONTROL;
com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2;
com.cmd.StreamControl.Stream = stream | (control ? 8 : 0);
if (chan->mode & NGENE_IO_TSOUT)
com.cmd.StreamControl.Stream |= 0x07;
com.cmd.StreamControl.Control = control |
(flags & SFLAG_ORDER_LUMA_CHROMA);
com.cmd.StreamControl.Mode = mode;
com.in_len = sizeof(struct FW_STREAM_CONTROL);
com.out_len = 0;
dev_dbg(pdev, "Stream=%02x, Control=%02x, Mode=%02x\n",
com.cmd.StreamControl.Stream, com.cmd.StreamControl.Control,
com.cmd.StreamControl.Mode);
chan->Mode = mode;
if (!(control & 0x80)) {
spin_lock_irq(&chan->state_lock);
if (chan->State == KSSTATE_RUN) {
chan->State = KSSTATE_ACQUIRE;
chan->HWState = HWSTATE_STOP;
spin_unlock_irq(&chan->state_lock);
if (ngene_command(dev, &com) < 0)
return -1;
/* clear_buffers(chan); */
flush_buffers(chan);
return 0;
}
spin_unlock_irq(&chan->state_lock);
return 0;
}
if (mode & SMODE_AUDIO_CAPTURE) {
com.cmd.StreamControl.CaptureBlockCount =
chan->Capture1Length / AUDIO_BLOCK_SIZE;
com.cmd.StreamControl.Buffer_Address = chan->RingBuffer.PAHead;
} else if (mode & SMODE_TRANSPORT_STREAM) {
com.cmd.StreamControl.CaptureBlockCount =
chan->Capture1Length / TS_BLOCK_SIZE;
com.cmd.StreamControl.MaxLinesPerField =
chan->Capture1Length / TS_BLOCK_SIZE;
com.cmd.StreamControl.Buffer_Address =
chan->TSRingBuffer.PAHead;
if (chan->mode & NGENE_IO_TSOUT) {
com.cmd.StreamControl.BytesPerVBILine =
chan->Capture1Length / TS_BLOCK_SIZE;
com.cmd.StreamControl.Stream |= 0x07;
}
} else {
com.cmd.StreamControl.BytesPerVideoLine = chan->nBytesPerLine;
com.cmd.StreamControl.MaxLinesPerField = chan->nLines;
com.cmd.StreamControl.MinLinesPerField = 100;
com.cmd.StreamControl.Buffer_Address = chan->RingBuffer.PAHead;
if (mode & SMODE_VBI_CAPTURE) {
com.cmd.StreamControl.MaxVBILinesPerField =
chan->nVBILines;
com.cmd.StreamControl.MinVBILinesPerField = 0;
com.cmd.StreamControl.BytesPerVBILine =
chan->nBytesPerVBILine;
}
if (flags & SFLAG_COLORBAR)
com.cmd.StreamControl.Stream |= 0x04;
}
spin_lock_irq(&chan->state_lock);
if (mode & SMODE_AUDIO_CAPTURE) {
chan->nextBuffer = chan->RingBuffer.Head;
if (mode & SMODE_AUDIO_SPDIF) {
com.cmd.StreamControl.SetupDataLen =
sizeof(SPDIFConfiguration);
com.cmd.StreamControl.SetupDataAddr = BsSPI;
memcpy(com.cmd.StreamControl.SetupData,
SPDIFConfiguration, sizeof(SPDIFConfiguration));
} else {
com.cmd.StreamControl.SetupDataLen = 4;
com.cmd.StreamControl.SetupDataAddr = BsSDI;
memcpy(com.cmd.StreamControl.SetupData,
I2SConfiguration +
4 * dev->card_info->i2s[stream], 4);
}
} else if (mode & SMODE_TRANSPORT_STREAM) {
chan->nextBuffer = chan->TSRingBuffer.Head;
if (stream >= STREAM_AUDIOIN1) {
if (chan->mode & NGENE_IO_TSOUT) {
com.cmd.StreamControl.SetupDataLen =
sizeof(TS_I2SOutConfiguration);
com.cmd.StreamControl.SetupDataAddr = BsSDO;
memcpy(com.cmd.StreamControl.SetupData,
TS_I2SOutConfiguration,
sizeof(TS_I2SOutConfiguration));
} else {
com.cmd.StreamControl.SetupDataLen =
sizeof(TS_I2SConfiguration);
com.cmd.StreamControl.SetupDataAddr = BsSDI;
memcpy(com.cmd.StreamControl.SetupData,
TS_I2SConfiguration,
sizeof(TS_I2SConfiguration));
}
} else {
com.cmd.StreamControl.SetupDataLen = 8;
com.cmd.StreamControl.SetupDataAddr = BsUVI + 0x10;
memcpy(com.cmd.StreamControl.SetupData,
TSFeatureDecoderSetup +
8 * dev->card_info->tsf[stream], 8);
}
} else {
chan->nextBuffer = chan->RingBuffer.Head;
com.cmd.StreamControl.SetupDataLen =
16 + sizeof(ITUFeatureDecoderSetup);
com.cmd.StreamControl.SetupDataAddr = BsUVI;
memcpy(com.cmd.StreamControl.SetupData,
ITUDecoderSetup[chan->itumode], 16);
memcpy(com.cmd.StreamControl.SetupData + 16,
ITUFeatureDecoderSetup, sizeof(ITUFeatureDecoderSetup));
}
clear_buffers(chan);
chan->State = KSSTATE_RUN;
if (mode & SMODE_TRANSPORT_STREAM)
chan->HWState = HWSTATE_RUN;
else
chan->HWState = HWSTATE_STARTUP;
spin_unlock_irq(&chan->state_lock);
if (ngene_command(dev, &com) < 0)
return -1;
return 0;
}
void set_transfer(struct ngene_channel *chan, int state)
{
struct device *pdev = &chan->dev->pci_dev->dev;
u8 control = 0, mode = 0, flags = 0;
struct ngene *dev = chan->dev;
int ret;
/*
dev_info(pdev, "st %d\n", state);
msleep(100);
*/
if (state) {
if (chan->running) {
dev_info(pdev, "already running\n");
return;
}
} else {
if (!chan->running) {
dev_info(pdev, "already stopped\n");
return;
}
}
if (dev->card_info->switch_ctrl)
dev->card_info->switch_ctrl(chan, 1, state ^ 1);
if (state) {
spin_lock_irq(&chan->state_lock);
/* dev_info(pdev, "lock=%08x\n",
ngreadl(0x9310)); */
dvb_ringbuffer_flush(&dev->tsout_rbuf);
control = 0x80;
if (chan->mode & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
chan->Capture1Length = 512 * 188;
mode = SMODE_TRANSPORT_STREAM;
}
if (chan->mode & NGENE_IO_TSOUT) {
chan->pBufferExchange = tsout_exchange;
/* 0x66666666 = 50MHz *2^33 /250MHz */
chan->AudioDTOValue = 0x80000000;
chan->AudioDTOUpdated = 1;
}
if (chan->mode & NGENE_IO_TSIN)
chan->pBufferExchange = tsin_exchange;
spin_unlock_irq(&chan->state_lock);
}
/* else dev_info(pdev, "lock=%08x\n",
ngreadl(0x9310)); */
mutex_lock(&dev->stream_mutex);
ret = ngene_command_stream_control(dev, chan->number,
control, mode, flags);
mutex_unlock(&dev->stream_mutex);
if (!ret)
chan->running = state;
else
dev_err(pdev, "%s %d failed\n", __func__, state);
if (!state) {
spin_lock_irq(&chan->state_lock);
chan->pBufferExchange = NULL;
dvb_ringbuffer_flush(&dev->tsout_rbuf);
spin_unlock_irq(&chan->state_lock);
}
}
/****************************************************************************/
/* nGene hardware init and release functions ********************************/
/****************************************************************************/
static void free_ringbuffer(struct ngene *dev, struct SRingBufferDescriptor *rb)
{
struct SBufferHeader *Cur = rb->Head;
u32 j;
if (!Cur)
return;
for (j = 0; j < rb->NumBuffers; j++, Cur = Cur->Next) {
if (Cur->Buffer1)
dma_free_coherent(&dev->pci_dev->dev,
rb->Buffer1Length, Cur->Buffer1,
Cur->scList1->Address);
if (Cur->Buffer2)
dma_free_coherent(&dev->pci_dev->dev,
rb->Buffer2Length, Cur->Buffer2,
Cur->scList2->Address);
}
if (rb->SCListMem)
dma_free_coherent(&dev->pci_dev->dev, rb->SCListMemSize,
rb->SCListMem, rb->PASCListMem);
dma_free_coherent(&dev->pci_dev->dev, rb->MemSize, rb->Head,
rb->PAHead);
}
static void free_idlebuffer(struct ngene *dev,
struct SRingBufferDescriptor *rb,
struct SRingBufferDescriptor *tb)
{
int j;
struct SBufferHeader *Cur = tb->Head;
if (!rb->Head)
return;
free_ringbuffer(dev, rb);
for (j = 0; j < tb->NumBuffers; j++, Cur = Cur->Next) {
Cur->Buffer2 = NULL;
Cur->scList2 = NULL;
Cur->ngeneBuffer.Address_of_first_entry_2 = 0;
Cur->ngeneBuffer.Number_of_entries_2 = 0;
}
}
static void free_common_buffers(struct ngene *dev)
{
u32 i;
struct ngene_channel *chan;
for (i = STREAM_VIDEOIN1; i < MAX_STREAM; i++) {
chan = &dev->channel[i];
free_idlebuffer(dev, &chan->TSIdleBuffer, &chan->TSRingBuffer);
free_ringbuffer(dev, &chan->RingBuffer);
free_ringbuffer(dev, &chan->TSRingBuffer);
}
if (dev->OverflowBuffer)
dma_free_coherent(&dev->pci_dev->dev, OVERFLOW_BUFFER_SIZE,
dev->OverflowBuffer, dev->PAOverflowBuffer);
if (dev->FWInterfaceBuffer)
dma_free_coherent(&dev->pci_dev->dev, 4096,
dev->FWInterfaceBuffer,
dev->PAFWInterfaceBuffer);
}
/****************************************************************************/
/* Ring buffer handling *****************************************************/
/****************************************************************************/
static int create_ring_buffer(struct pci_dev *pci_dev,
struct SRingBufferDescriptor *descr, u32 NumBuffers)
{
dma_addr_t tmp;
struct SBufferHeader *Head;
u32 i;
u32 MemSize = SIZEOF_SBufferHeader * NumBuffers;
u64 PARingBufferHead;
u64 PARingBufferCur;
u64 PARingBufferNext;
struct SBufferHeader *Cur, *Next;
descr->Head = NULL;
descr->MemSize = 0;
descr->PAHead = 0;
descr->NumBuffers = 0;
if (MemSize < 4096)
MemSize = 4096;
Head = dma_alloc_coherent(&pci_dev->dev, MemSize, &tmp, GFP_KERNEL);
PARingBufferHead = tmp;
if (!Head)
return -ENOMEM;
PARingBufferCur = PARingBufferHead;
Cur = Head;
for (i = 0; i < NumBuffers - 1; i++) {
Next = (struct SBufferHeader *)
(((u8 *) Cur) + SIZEOF_SBufferHeader);
PARingBufferNext = PARingBufferCur + SIZEOF_SBufferHeader;
Cur->Next = Next;
Cur->ngeneBuffer.Next = PARingBufferNext;
Cur = Next;
PARingBufferCur = PARingBufferNext;
}
/* Last Buffer points back to first one */
Cur->Next = Head;
Cur->ngeneBuffer.Next = PARingBufferHead;
descr->Head = Head;
descr->MemSize = MemSize;
descr->PAHead = PARingBufferHead;
descr->NumBuffers = NumBuffers;
return 0;
}
static int AllocateRingBuffers(struct pci_dev *pci_dev,
dma_addr_t of,
struct SRingBufferDescriptor *pRingBuffer,
u32 Buffer1Length, u32 Buffer2Length)
{
dma_addr_t tmp;
u32 i, j;
u32 SCListMemSize = pRingBuffer->NumBuffers
* ((Buffer2Length != 0) ? (NUM_SCATTER_GATHER_ENTRIES * 2) :
NUM_SCATTER_GATHER_ENTRIES)
* sizeof(struct HW_SCATTER_GATHER_ELEMENT);
u64 PASCListMem;
struct HW_SCATTER_GATHER_ELEMENT *SCListEntry;
u64 PASCListEntry;
struct SBufferHeader *Cur;
void *SCListMem;
if (SCListMemSize < 4096)
SCListMemSize = 4096;
SCListMem = dma_alloc_coherent(&pci_dev->dev, SCListMemSize, &tmp,
GFP_KERNEL);
PASCListMem = tmp;
if (SCListMem == NULL)
return -ENOMEM;
pRingBuffer->SCListMem = SCListMem;
pRingBuffer->PASCListMem = PASCListMem;
pRingBuffer->SCListMemSize = SCListMemSize;
pRingBuffer->Buffer1Length = Buffer1Length;
pRingBuffer->Buffer2Length = Buffer2Length;
SCListEntry = SCListMem;
PASCListEntry = PASCListMem;
Cur = pRingBuffer->Head;
for (i = 0; i < pRingBuffer->NumBuffers; i += 1, Cur = Cur->Next) {
u64 PABuffer;
void *Buffer = dma_alloc_coherent(&pci_dev->dev,
Buffer1Length, &tmp, GFP_KERNEL);
PABuffer = tmp;
if (Buffer == NULL)
return -ENOMEM;
Cur->Buffer1 = Buffer;
SCListEntry->Address = PABuffer;
SCListEntry->Length = Buffer1Length;
Cur->scList1 = SCListEntry;
Cur->ngeneBuffer.Address_of_first_entry_1 = PASCListEntry;
Cur->ngeneBuffer.Number_of_entries_1 =
NUM_SCATTER_GATHER_ENTRIES;
SCListEntry += 1;
PASCListEntry += sizeof(struct HW_SCATTER_GATHER_ELEMENT);
#if NUM_SCATTER_GATHER_ENTRIES > 1
for (j = 0; j < NUM_SCATTER_GATHER_ENTRIES - 1; j += 1) {
SCListEntry->Address = of;
SCListEntry->Length = OVERFLOW_BUFFER_SIZE;
SCListEntry += 1;
PASCListEntry +=
sizeof(struct HW_SCATTER_GATHER_ELEMENT);
}
#endif
if (!Buffer2Length)
continue;
Buffer = dma_alloc_coherent(&pci_dev->dev, Buffer2Length,
&tmp, GFP_KERNEL);
PABuffer = tmp;
if (Buffer == NULL)
return -ENOMEM;
Cur->Buffer2 = Buffer;
SCListEntry->Address = PABuffer;
SCListEntry->Length = Buffer2Length;
Cur->scList2 = SCListEntry;
Cur->ngeneBuffer.Address_of_first_entry_2 = PASCListEntry;
Cur->ngeneBuffer.Number_of_entries_2 =
NUM_SCATTER_GATHER_ENTRIES;
SCListEntry += 1;
PASCListEntry += sizeof(struct HW_SCATTER_GATHER_ELEMENT);
#if NUM_SCATTER_GATHER_ENTRIES > 1
for (j = 0; j < NUM_SCATTER_GATHER_ENTRIES - 1; j++) {
SCListEntry->Address = of;
SCListEntry->Length = OVERFLOW_BUFFER_SIZE;
SCListEntry += 1;
PASCListEntry +=
sizeof(struct HW_SCATTER_GATHER_ELEMENT);
}
#endif
}
return 0;
}
static int FillTSIdleBuffer(struct SRingBufferDescriptor *pIdleBuffer,
struct SRingBufferDescriptor *pRingBuffer)
{
/* Copy pointer to scatter gather list in TSRingbuffer
structure for buffer 2
Load number of buffer
*/
u32 n = pRingBuffer->NumBuffers;
/* Point to first buffer entry */
struct SBufferHeader *Cur = pRingBuffer->Head;
int i;
/* Loop through all buffer and set Buffer 2 pointers to TSIdlebuffer */
for (i = 0; i < n; i++) {
Cur->Buffer2 = pIdleBuffer->Head->Buffer1;
Cur->scList2 = pIdleBuffer->Head->scList1;
Cur->ngeneBuffer.Address_of_first_entry_2 =
pIdleBuffer->Head->ngeneBuffer.
Address_of_first_entry_1;
Cur->ngeneBuffer.Number_of_entries_2 =
pIdleBuffer->Head->ngeneBuffer.Number_of_entries_1;
Cur = Cur->Next;
}
return 0;
}
static u32 RingBufferSizes[MAX_STREAM] = {
RING_SIZE_VIDEO,
RING_SIZE_VIDEO,
RING_SIZE_AUDIO,
RING_SIZE_AUDIO,
RING_SIZE_AUDIO,
};
static u32 Buffer1Sizes[MAX_STREAM] = {
MAX_VIDEO_BUFFER_SIZE,
MAX_VIDEO_BUFFER_SIZE,
MAX_AUDIO_BUFFER_SIZE,
MAX_AUDIO_BUFFER_SIZE,
MAX_AUDIO_BUFFER_SIZE
};
static u32 Buffer2Sizes[MAX_STREAM] = {
MAX_VBI_BUFFER_SIZE,
MAX_VBI_BUFFER_SIZE,
0,
0,
0
};
static int AllocCommonBuffers(struct ngene *dev)
{
int status = 0, i;
dev->FWInterfaceBuffer = dma_alloc_coherent(&dev->pci_dev->dev, 4096,
&dev->PAFWInterfaceBuffer,
GFP_KERNEL);
if (!dev->FWInterfaceBuffer)
return -ENOMEM;
dev->hosttongene = dev->FWInterfaceBuffer;
dev->ngenetohost = dev->FWInterfaceBuffer + 256;
dev->EventBuffer = dev->FWInterfaceBuffer + 512;
dev->OverflowBuffer = dma_alloc_coherent(&dev->pci_dev->dev,
OVERFLOW_BUFFER_SIZE,
&dev->PAOverflowBuffer, GFP_KERNEL);
if (!dev->OverflowBuffer)
return -ENOMEM;
for (i = STREAM_VIDEOIN1; i < MAX_STREAM; i++) {
int type = dev->card_info->io_type[i];
dev->channel[i].State = KSSTATE_STOP;
if (type & (NGENE_IO_TV | NGENE_IO_HDTV | NGENE_IO_AIN)) {
status = create_ring_buffer(dev->pci_dev,
&dev->channel[i].RingBuffer,
RingBufferSizes[i]);
if (status < 0)
break;
if (type & (NGENE_IO_TV | NGENE_IO_AIN)) {
status = AllocateRingBuffers(dev->pci_dev,
dev->
PAOverflowBuffer,
&dev->channel[i].
RingBuffer,
Buffer1Sizes[i],
Buffer2Sizes[i]);
if (status < 0)
break;
} else if (type & NGENE_IO_HDTV) {
status = AllocateRingBuffers(dev->pci_dev,
dev->
PAOverflowBuffer,
&dev->channel[i].
RingBuffer,
MAX_HDTV_BUFFER_SIZE,
0);
if (status < 0)
break;
}
}
if (type & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
status = create_ring_buffer(dev->pci_dev,
&dev->channel[i].
TSRingBuffer, RING_SIZE_TS);
if (status < 0)
break;
status = AllocateRingBuffers(dev->pci_dev,
dev->PAOverflowBuffer,
&dev->channel[i].
TSRingBuffer,
MAX_TS_BUFFER_SIZE, 0);
if (status)
break;
}
if (type & NGENE_IO_TSOUT) {
status = create_ring_buffer(dev->pci_dev,
&dev->channel[i].
TSIdleBuffer, 1);
if (status < 0)
break;
status = AllocateRingBuffers(dev->pci_dev,
dev->PAOverflowBuffer,
&dev->channel[i].
TSIdleBuffer,
MAX_TS_BUFFER_SIZE, 0);
if (status)
break;
FillTSIdleBuffer(&dev->channel[i].TSIdleBuffer,
&dev->channel[i].TSRingBuffer);
}
}
return status;
}
static void ngene_release_buffers(struct ngene *dev)
{
if (dev->iomem)
iounmap(dev->iomem);
free_common_buffers(dev);
vfree(dev->tsout_buf);
vfree(dev->tsin_buf);
vfree(dev->ain_buf);
vfree(dev->vin_buf);
vfree(dev);
}
static int ngene_get_buffers(struct ngene *dev)
{
if (AllocCommonBuffers(dev))
return -ENOMEM;
if (dev->card_info->io_type[4] & NGENE_IO_TSOUT) {
dev->tsout_buf = vmalloc(TSOUT_BUF_SIZE);
if (!dev->tsout_buf)
return -ENOMEM;
dvb_ringbuffer_init(&dev->tsout_rbuf,
dev->tsout_buf, TSOUT_BUF_SIZE);
}
if (dev->card_info->io_type[2]&NGENE_IO_TSIN) {
dev->tsin_buf = vmalloc(TSIN_BUF_SIZE);
if (!dev->tsin_buf)
return -ENOMEM;
dvb_ringbuffer_init(&dev->tsin_rbuf,
dev->tsin_buf, TSIN_BUF_SIZE);
}
if (dev->card_info->io_type[2] & NGENE_IO_AIN) {
dev->ain_buf = vmalloc(AIN_BUF_SIZE);
if (!dev->ain_buf)
return -ENOMEM;
dvb_ringbuffer_init(&dev->ain_rbuf, dev->ain_buf, AIN_BUF_SIZE);
}
if (dev->card_info->io_type[0] & NGENE_IO_HDTV) {
dev->vin_buf = vmalloc(VIN_BUF_SIZE);
if (!dev->vin_buf)
return -ENOMEM;
dvb_ringbuffer_init(&dev->vin_rbuf, dev->vin_buf, VIN_BUF_SIZE);
}
dev->iomem = ioremap(pci_resource_start(dev->pci_dev, 0),
pci_resource_len(dev->pci_dev, 0));
if (!dev->iomem)
return -ENOMEM;
return 0;
}
static void ngene_init(struct ngene *dev)
{
struct device *pdev = &dev->pci_dev->dev;
int i;
tasklet_setup(&dev->event_tasklet, event_tasklet);
memset_io(dev->iomem + 0xc000, 0x00, 0x220);
memset_io(dev->iomem + 0xc400, 0x00, 0x100);
for (i = 0; i < MAX_STREAM; i++) {
dev->channel[i].dev = dev;
dev->channel[i].number = i;
}
dev->fw_interface_version = 0;
ngwritel(0, NGENE_INT_ENABLE);
dev->icounts = ngreadl(NGENE_INT_COUNTS);
dev->device_version = ngreadl(DEV_VER) & 0x0f;
dev_info(pdev, "Device version %d\n", dev->device_version);
}
static int ngene_load_firm(struct ngene *dev)
{
struct device *pdev = &dev->pci_dev->dev;
u32 size;
const struct firmware *fw = NULL;
u8 *ngene_fw;
char *fw_name;
int err, version;
version = dev->card_info->fw_version;
switch (version) {
default:
case 15:
version = 15;
size = 23466;
fw_name = "ngene_15.fw";
dev->cmd_timeout_workaround = true;
break;
case 16:
size = 23498;
fw_name = "ngene_16.fw";
dev->cmd_timeout_workaround = true;
break;
case 17:
size = 24446;
fw_name = "ngene_17.fw";
dev->cmd_timeout_workaround = true;
break;
case 18:
size = 0;
fw_name = "ngene_18.fw";
break;
}
if (request_firmware(&fw, fw_name, &dev->pci_dev->dev) < 0) {
dev_err(pdev, "Could not load firmware file %s.\n", fw_name);
dev_info(pdev, "Copy %s to your hotplug directory!\n",
fw_name);
return -1;
}
if (size == 0)
size = fw->size;
if (size != fw->size) {
dev_err(pdev, "Firmware %s has invalid size!", fw_name);
err = -1;
} else {
dev_info(pdev, "Loading firmware file %s.\n", fw_name);
ngene_fw = (u8 *) fw->data;
err = ngene_command_load_firmware(dev, ngene_fw, size);
}
release_firmware(fw);
return err;
}
static void ngene_stop(struct ngene *dev)
{
mutex_destroy(&dev->cmd_mutex);
i2c_del_adapter(&(dev->channel[0].i2c_adapter));
i2c_del_adapter(&(dev->channel[1].i2c_adapter));
ngwritel(0, NGENE_INT_ENABLE);
ngwritel(0, NGENE_COMMAND);
ngwritel(0, NGENE_COMMAND_HI);
ngwritel(0, NGENE_STATUS);
ngwritel(0, NGENE_STATUS_HI);
ngwritel(0, NGENE_EVENT);
ngwritel(0, NGENE_EVENT_HI);
free_irq(dev->pci_dev->irq, dev);
#ifdef CONFIG_PCI_MSI
if (dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
#endif
}
static int ngene_buffer_config(struct ngene *dev)
{
int stat;
if (dev->card_info->fw_version >= 17) {
u8 tsin12_config[6] = { 0x60, 0x60, 0x00, 0x00, 0x00, 0x00 };
u8 tsin1234_config[6] = { 0x30, 0x30, 0x00, 0x30, 0x30, 0x00 };
u8 tsio1235_config[6] = { 0x30, 0x30, 0x00, 0x28, 0x00, 0x38 };
u8 *bconf = tsin12_config;
if (dev->card_info->io_type[2]&NGENE_IO_TSIN &&
dev->card_info->io_type[3]&NGENE_IO_TSIN) {
bconf = tsin1234_config;
if (dev->card_info->io_type[4]&NGENE_IO_TSOUT &&
dev->ci.en)
bconf = tsio1235_config;
}
stat = ngene_command_config_free_buf(dev, bconf);
} else {
int bconf = BUFFER_CONFIG_4422;
if (dev->card_info->io_type[3] == NGENE_IO_TSIN)
bconf = BUFFER_CONFIG_3333;
stat = ngene_command_config_buf(dev, bconf);
}
return stat;
}
static int ngene_start(struct ngene *dev)
{
int stat;
int i;
pci_set_master(dev->pci_dev);
ngene_init(dev);
stat = request_irq(dev->pci_dev->irq, irq_handler,
IRQF_SHARED, "nGene",
(void *)dev);
if (stat < 0)
return stat;
init_waitqueue_head(&dev->cmd_wq);
init_waitqueue_head(&dev->tx_wq);
init_waitqueue_head(&dev->rx_wq);
mutex_init(&dev->cmd_mutex);
mutex_init(&dev->stream_mutex);
sema_init(&dev->pll_mutex, 1);
mutex_init(&dev->i2c_switch_mutex);
spin_lock_init(&dev->cmd_lock);
for (i = 0; i < MAX_STREAM; i++)
spin_lock_init(&dev->channel[i].state_lock);
ngwritel(1, TIMESTAMPS);
ngwritel(1, NGENE_INT_ENABLE);
stat = ngene_load_firm(dev);
if (stat < 0)
goto fail;
#ifdef CONFIG_PCI_MSI
/* enable MSI if kernel and card support it */
if (pci_msi_enabled() && dev->card_info->msi_supported) {
struct device *pdev = &dev->pci_dev->dev;
unsigned long flags;
ngwritel(0, NGENE_INT_ENABLE);
free_irq(dev->pci_dev->irq, dev);
stat = pci_enable_msi(dev->pci_dev);
if (stat) {
dev_info(pdev, "MSI not available\n");
flags = IRQF_SHARED;
} else {
flags = 0;
dev->msi_enabled = true;
}
stat = request_irq(dev->pci_dev->irq, irq_handler,
flags, "nGene", dev);
if (stat < 0)
goto fail2;
ngwritel(1, NGENE_INT_ENABLE);
}
#endif
stat = ngene_i2c_init(dev, 0);
if (stat < 0)
goto fail;
stat = ngene_i2c_init(dev, 1);
if (stat < 0)
goto fail;
return 0;
fail:
ngwritel(0, NGENE_INT_ENABLE);
free_irq(dev->pci_dev->irq, dev);
#ifdef CONFIG_PCI_MSI
fail2:
if (dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
#endif
return stat;
}
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
static void release_channel(struct ngene_channel *chan)
{
struct dvb_demux *dvbdemux = &chan->demux;
struct ngene *dev = chan->dev;
if (chan->running)
set_transfer(chan, 0);
tasklet_kill(&chan->demux_tasklet);
if (chan->ci_dev) {
dvb_unregister_device(chan->ci_dev);
chan->ci_dev = NULL;
}
if (chan->fe2)
dvb_unregister_frontend(chan->fe2);
if (chan->fe) {
dvb_unregister_frontend(chan->fe);
/* release I2C client (tuner) if needed */
if (chan->i2c_client_fe) {
dvb_module_release(chan->i2c_client[0]);
chan->i2c_client[0] = NULL;
}
dvb_frontend_detach(chan->fe);
chan->fe = NULL;
}
if (chan->has_demux) {
dvb_net_release(&chan->dvbnet);
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&chan->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&chan->mem_frontend);
dvb_dmxdev_release(&chan->dmxdev);
dvb_dmx_release(&chan->demux);
chan->has_demux = false;
}
if (chan->has_adapter) {
dvb_unregister_adapter(&dev->adapter[chan->number]);
chan->has_adapter = false;
}
}
static int init_channel(struct ngene_channel *chan)
{
int ret = 0, nr = chan->number;
struct dvb_adapter *adapter = NULL;
struct dvb_demux *dvbdemux = &chan->demux;
struct ngene *dev = chan->dev;
struct ngene_info *ni = dev->card_info;
int io = ni->io_type[nr];
tasklet_setup(&chan->demux_tasklet, demux_tasklet);
chan->users = 0;
chan->type = io;
chan->mode = chan->type; /* for now only one mode */
chan->i2c_client_fe = 0; /* be sure this is set to zero */
if (io & NGENE_IO_TSIN) {
chan->fe = NULL;
if (ni->demod_attach[nr]) {
ret = ni->demod_attach[nr](chan);
if (ret < 0)
goto err;
}
if (chan->fe && ni->tuner_attach[nr]) {
ret = ni->tuner_attach[nr](chan);
if (ret < 0)
goto err;
}
}
if (!dev->ci.en && (io & NGENE_IO_TSOUT))
return 0;
if (io & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
if (nr >= STREAM_AUDIOIN1)
chan->DataFormatFlags = DF_SWAP32;
if (nr == 0 || !one_adapter || dev->first_adapter == NULL) {
adapter = &dev->adapter[nr];
ret = dvb_register_adapter(adapter, "nGene",
THIS_MODULE,
&chan->dev->pci_dev->dev,
adapter_nr);
if (ret < 0)
goto err;
if (dev->first_adapter == NULL)
dev->first_adapter = adapter;
chan->has_adapter = true;
} else
adapter = dev->first_adapter;
}
if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
set_transfer(chan, 1);
chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
set_transfer(&chan->dev->channel[2], 1);
dvb_register_device(adapter, &chan->ci_dev,
&ngene_dvbdev_ci, (void *) chan,
DVB_DEVICE_SEC, 0);
if (!chan->ci_dev)
goto err;
}
if (chan->fe) {
if (dvb_register_frontend(adapter, chan->fe) < 0)
goto err;
chan->has_demux = true;
}
if (chan->fe2) {
if (dvb_register_frontend(adapter, chan->fe2) < 0)
goto err;
if (chan->fe) {
chan->fe2->tuner_priv = chan->fe->tuner_priv;
memcpy(&chan->fe2->ops.tuner_ops,
&chan->fe->ops.tuner_ops,
sizeof(struct dvb_tuner_ops));
}
}
if (chan->has_demux) {
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
ngene_start_feed,
ngene_stop_feed, chan);
ret = my_dvb_dmxdev_ts_card_init(&chan->dmxdev, &chan->demux,
&chan->hw_frontend,
&chan->mem_frontend, adapter);
ret = dvb_net_init(adapter, &chan->dvbnet, &chan->demux.dmx);
}
return ret;
err:
if (chan->fe) {
dvb_frontend_detach(chan->fe);
chan->fe = NULL;
}
release_channel(chan);
return 0;
}
static int init_channels(struct ngene *dev)
{
int i, j;
for (i = 0; i < MAX_STREAM; i++) {
dev->channel[i].number = i;
if (init_channel(&dev->channel[i]) < 0) {
for (j = i - 1; j >= 0; j--)
release_channel(&dev->channel[j]);
return -1;
}
}
return 0;
}
static const struct cxd2099_cfg cxd_cfgtmpl = {
.bitrate = 62000,
.polarity = 0,
.clock_mode = 0,
};
static void cxd_attach(struct ngene *dev)
{
struct device *pdev = &dev->pci_dev->dev;
struct ngene_ci *ci = &dev->ci;
struct cxd2099_cfg cxd_cfg = cxd_cfgtmpl;
struct i2c_client *client;
int ret;
u8 type;
/* check for CXD2099AR presence before attaching */
ret = ngene_port_has_cxd2099(&dev->channel[0].i2c_adapter, &type);
if (!ret) {
dev_dbg(pdev, "No CXD2099AR found\n");
return;
}
if (type != 1) {
dev_warn(pdev, "CXD2099AR is uninitialized!\n");
return;
}
cxd_cfg.en = &ci->en;
client = dvb_module_probe("cxd2099", NULL,
&dev->channel[0].i2c_adapter,
0x40, &cxd_cfg);
if (!client)
goto err;
ci->dev = dev;
dev->channel[0].i2c_client[0] = client;
return;
err:
dev_err(pdev, "CXD2099AR attach failed\n");
return;
}
static void cxd_detach(struct ngene *dev)
{
struct ngene_ci *ci = &dev->ci;
dvb_ca_en50221_release(ci->en);
dvb_module_release(dev->channel[0].i2c_client[0]);
dev->channel[0].i2c_client[0] = NULL;
ci->en = NULL;
}
/***********************************/
/* workaround for shutdown failure */
/***********************************/
static void ngene_unlink(struct ngene *dev)
{
struct ngene_command com;
com.cmd.hdr.Opcode = CMD_MEM_WRITE;
com.cmd.hdr.Length = 3;
com.cmd.MemoryWrite.address = 0x910c;
com.cmd.MemoryWrite.data = 0xff;
com.in_len = 3;
com.out_len = 1;
mutex_lock(&dev->cmd_mutex);
ngwritel(0, NGENE_INT_ENABLE);
ngene_command_mutex(dev, &com);
mutex_unlock(&dev->cmd_mutex);
}
void ngene_shutdown(struct pci_dev *pdev)
{
struct ngene *dev = pci_get_drvdata(pdev);
if (!dev || !shutdown_workaround)
return;
dev_info(&pdev->dev, "shutdown workaround...\n");
ngene_unlink(dev);
pci_disable_device(pdev);
}
/****************************************************************************/
/* device probe/remove calls ************************************************/
/****************************************************************************/
void ngene_remove(struct pci_dev *pdev)
{
struct ngene *dev = pci_get_drvdata(pdev);
int i;
tasklet_kill(&dev->event_tasklet);
for (i = MAX_STREAM - 1; i >= 0; i--)
release_channel(&dev->channel[i]);
if (dev->ci.en)
cxd_detach(dev);
ngene_stop(dev);
ngene_release_buffers(dev);
pci_disable_device(pdev);
}
int ngene_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
struct ngene *dev;
int stat = 0;
if (pci_enable_device(pci_dev) < 0)
return -ENODEV;
dev = vzalloc(sizeof(struct ngene));
if (dev == NULL) {
stat = -ENOMEM;
goto fail0;
}
dev->pci_dev = pci_dev;
dev->card_info = (struct ngene_info *)id->driver_data;
dev_info(&pci_dev->dev, "Found %s\n", dev->card_info->name);
pci_set_drvdata(pci_dev, dev);
/* Alloc buffers and start nGene */
stat = ngene_get_buffers(dev);
if (stat < 0)
goto fail1;
stat = ngene_start(dev);
if (stat < 0)
goto fail1;
cxd_attach(dev);
stat = ngene_buffer_config(dev);
if (stat < 0)
goto fail1;
dev->i2c_current_bus = -1;
/* Register DVB adapters and devices for both channels */
stat = init_channels(dev);
if (stat < 0)
goto fail2;
return 0;
fail2:
ngene_stop(dev);
fail1:
ngene_release_buffers(dev);
fail0:
pci_disable_device(pci_dev);
return stat;
}
| linux-master | drivers/media/pci/ngene/ngene-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* budget.c: driver for the SAA7146 based Budget DVB cards
*
* Compiled from various sources by Michael Hunold <[email protected]>
*
* Copyright (C) 2002 Ralph Metzler <[email protected]>
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* 26feb2004 Support for FS Activy Card (Grundig tuner) by
* Michael Dreher <[email protected]>,
* Oliver Endriss <[email protected]> and
* Andreas 'randy' Weinberger
*
* the project's page is at https://linuxtv.org
*/
#include "budget.h"
#include "stv0299.h"
#include "ves1x93.h"
#include "ves1820.h"
#include "l64781.h"
#include "tda8083.h"
#include "s5h1420.h"
#include "tda10086.h"
#include "tda826x.h"
#include "lnbp21.h"
#include "bsru6.h"
#include "bsbe1.h"
#include "tdhd1.h"
#include "stv6110x.h"
#include "stv090x.h"
#include "isl6423.h"
#include "lnbh24.h"
static int diseqc_method;
module_param(diseqc_method, int, 0444);
MODULE_PARM_DESC(diseqc_method, "Select DiSEqC method for subsystem id 13c2:1003, 0: default, 1: more reliable (for newer revisions only)");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static void Set22K (struct budget *budget, int state)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, (state ? SAA7146_GPIO_OUTHI : SAA7146_GPIO_OUTLO));
}
/* Diseqc functions only for TT Budget card */
/* taken from the Skyvision DVB driver by
Ralph Metzler <[email protected]> */
static void DiseqcSendBit (struct budget *budget, int data)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
udelay(data ? 500 : 1000);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
udelay(data ? 1000 : 500);
}
static void DiseqcSendByte (struct budget *budget, int data)
{
int i, par=1, d;
dprintk(2, "budget: %p\n", budget);
for (i=7; i>=0; i--) {
d = (data>>i)&1;
par ^= d;
DiseqcSendBit(budget, d);
}
DiseqcSendBit(budget, par);
}
static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long burst)
{
struct saa7146_dev *dev=budget->dev;
int i;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
mdelay(16);
for (i=0; i<len; i++)
DiseqcSendByte(budget, msg[i]);
mdelay(16);
if (burst!=-1) {
if (burst)
DiseqcSendByte(budget, 0xff);
else {
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
mdelay(12);
udelay(500);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
}
msleep(20);
}
return 0;
}
/*
* Routines for the Fujitsu Siemens Activy budget card
* 22 kHz tone and DiSEqC are handled by the frontend.
* Voltage must be set here.
* GPIO 1: LNBP EN, GPIO 2: LNBP VSEL
*/
static int SetVoltage_Activy(struct budget *budget,
enum fe_sec_voltage voltage)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
switch (voltage) {
case SEC_VOLTAGE_13:
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTLO);
break;
case SEC_VOLTAGE_18:
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
break;
case SEC_VOLTAGE_OFF:
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO);
break;
default:
return -EINVAL;
}
return 0;
}
static int siemens_budget_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
struct budget *budget = fe->dvb->priv;
return SetVoltage_Activy (budget, voltage);
}
static int budget_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
struct budget *budget = fe->dvb->priv;
switch (tone) {
case SEC_TONE_ON:
Set22K (budget, 1);
break;
case SEC_TONE_OFF:
Set22K (budget, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
{
struct budget *budget = fe->dvb->priv;
SendDiSEqCMsg (budget, cmd->msg_len, cmd->msg, 0);
return 0;
}
static int budget_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd minicmd)
{
struct budget *budget = fe->dvb->priv;
SendDiSEqCMsg (budget, 0, NULL, minicmd);
return 0;
}
static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
u32 div = (c->frequency + 479500) / 125;
if (c->frequency > 2000000)
pwr = 3;
else if (c->frequency > 1800000)
pwr = 2;
else if (c->frequency > 1600000)
pwr = 1;
else if (c->frequency > 1200000)
pwr = 0;
else if (c->frequency >= 1100000)
pwr = 1;
else pwr = 2;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = ((div & 0x18000) >> 10) | 0x95;
buf[3] = (pwr << 6) | 0x30;
// NOTE: since we're using a prescaler of 2, we set the
// divisor frequency to 62.5kHz and divide by 125 above
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct ves1x93_config alps_bsrv2_config =
{
.demod_address = 0x08,
.xin = 90100000UL,
.invert_pwm = 0,
};
static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
div = (c->frequency + 35937500 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85 | ((div >> 10) & 0x60);
data[3] = (c->frequency < 174000000 ? 0x88 : c->frequency < 470000000 ? 0x84 : 0x81);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct ves1820_config alps_tdbe2_config = {
.demod_address = 0x09,
.xin = 57840000UL,
.invert = 1,
.selagc = VES1820_SELAGC_SIGNAMPERR,
};
static int grundig_29504_401_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 *tuner_addr = fe->tuner_priv;
u32 div;
u8 cfg, cpump, band_select;
u8 data[4];
struct i2c_msg msg = { .flags = 0, .buf = data, .len = sizeof(data) };
if (tuner_addr)
msg.addr = *tuner_addr;
else
msg.addr = 0x61;
div = (36125000 + c->frequency) / 166666;
cfg = 0x88;
if (c->frequency < 175000000)
cpump = 2;
else if (c->frequency < 390000000)
cpump = 1;
else if (c->frequency < 470000000)
cpump = 2;
else if (c->frequency < 750000000)
cpump = 1;
else
cpump = 3;
if (c->frequency < 175000000)
band_select = 0x0e;
else if (c->frequency < 470000000)
band_select = 0x05;
else
band_select = 0x03;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = ((div >> 10) & 0x60) | cfg;
data[3] = (cpump << 6) | band_select;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct l64781_config grundig_29504_401_config = {
.demod_address = 0x55,
};
static struct l64781_config grundig_29504_401_config_activy = {
.demod_address = 0x54,
};
static u8 tuner_address_grundig_29504_401_activy = 0x60;
static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = c->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
data[3] = 0x00;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct tda8083_config grundig_29504_451_config = {
.demod_address = 0x68,
};
static int s5h1420_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = c->frequency / 1000;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xc2;
if (div < 1450)
data[3] = 0x00;
else if (div < 1850)
data[3] = 0x40;
else if (div < 2000)
data[3] = 0x80;
else
data[3] = 0xc0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct s5h1420_config s5h1420_config = {
.demod_address = 0x53,
.invert = 1,
.cdclk_polarity = 1,
};
static struct tda10086_config tda10086_config = {
.demod_address = 0x0e,
.invert = 0,
.diseqc_tone = 1,
.xtal_freq = TDA10086_XTAL_16M,
};
static const struct stv0299_config alps_bsru6_config_activy = {
.demod_address = 0x68,
.inittab = alps_bsru6_inittab,
.mclk = 88000000UL,
.invert = 1,
.op0_off = 1,
.min_delay_ms = 100,
.set_symbol_rate = alps_bsru6_set_symbol_rate,
};
static const struct stv0299_config alps_bsbe1_config_activy = {
.demod_address = 0x68,
.inittab = alps_bsbe1_inittab,
.mclk = 88000000UL,
.invert = 1,
.op0_off = 1,
.min_delay_ms = 100,
.set_symbol_rate = alps_bsbe1_set_symbol_rate,
};
static int alps_tdhd1_204_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char *name)
{
struct budget *budget = fe->dvb->priv;
return request_firmware(fw, name, &budget->dev->pci->dev);
}
static int i2c_readreg(struct i2c_adapter *i2c, u8 adr, u8 reg)
{
u8 val;
struct i2c_msg msg[] = {
{ .addr = adr, .flags = 0, .buf = ®, .len = 1 },
{ .addr = adr, .flags = I2C_M_RD, .buf = &val, .len = 1 }
};
return (i2c_transfer(i2c, msg, 2) != 2) ? -EIO : val;
}
static u8 read_pwm(struct budget* budget)
{
u8 b = 0xff;
u8 pwm;
struct i2c_msg msg[] = { { .addr = 0x50,.flags = 0,.buf = &b,.len = 1 },
{ .addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1} };
if ((i2c_transfer(&budget->i2c_adap, msg, 2) != 2) || (pwm == 0xff))
pwm = 0x48;
return pwm;
}
static struct stv090x_config tt1600_stv090x_config = {
.device = STV0903,
.demod_mode = STV090x_SINGLE,
.clk_mode = STV090x_CLK_EXT,
.xtal = 13500000,
.address = 0x68,
.ts1_mode = STV090x_TSMODE_DVBCI,
.ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
.repeater_level = STV090x_RPTLEVEL_16,
.tuner_init = NULL,
.tuner_sleep = NULL,
.tuner_set_mode = NULL,
.tuner_set_frequency = NULL,
.tuner_get_frequency = NULL,
.tuner_set_bandwidth = NULL,
.tuner_get_bandwidth = NULL,
.tuner_set_bbgain = NULL,
.tuner_get_bbgain = NULL,
.tuner_set_refclk = NULL,
.tuner_get_status = NULL,
};
static struct stv6110x_config tt1600_stv6110x_config = {
.addr = 0x60,
.refclk = 27000000,
.clk_div = 2,
};
static struct isl6423_config tt1600_isl6423_config = {
.current_max = SEC_CURRENT_515m,
.curlim = SEC_CURRENT_LIM_ON,
.mod_extern = 1,
.addr = 0x08,
};
static void frontend_init(struct budget *budget)
{
(void)alps_bsbe1_config; /* avoid warning */
switch(budget->dev->pci->subsystem_device) {
case 0x1003: // Hauppauge/TT Nova budget (stv0299/ALPS BSRU6(tsa5059) OR ves1893/ALPS BSRV2(sp5659))
case 0x1013:
// try the ALPS BSRV2 first of all
budget->dvb_frontend = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_set_tone;
break;
}
// try the ALPS BSRU6 now
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
if (budget->dev->pci->subsystem_device == 0x1003 && diseqc_method == 0) {
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_set_tone;
}
break;
}
break;
case 0x1004: // Hauppauge/TT DVB-C budget (ves1820/ALPS TDBE2(sp5659))
budget->dvb_frontend = dvb_attach(ves1820_attach, &alps_tdbe2_config, &budget->i2c_adap, read_pwm(budget));
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params;
break;
}
break;
case 0x1005: // Hauppauge/TT Nova-T budget (L64781/Grundig 29504-401(tsa5060))
budget->dvb_frontend = dvb_attach(l64781_attach, &grundig_29504_401_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params;
budget->dvb_frontend->tuner_priv = NULL;
break;
}
break;
case 0x4f52: /* Cards based on Philips Semi Sylt PCI ref. design */
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
printk(KERN_INFO "budget: tuner ALPS BSRU6 in Philips Semi. Sylt detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
break;
}
break;
case 0x4f60: /* Fujitsu Siemens Activy Budget-S PCI rev AL (stv0299/tsa5059) */
{
int subtype = i2c_readreg(&budget->i2c_adap, 0x50, 0x67);
if (subtype < 0)
break;
/* fixme: find a better way to identify the card */
if (subtype < 0x36) {
/* assume ALPS BSRU6 */
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
printk(KERN_INFO "budget: tuner ALPS BSRU6 detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
break;
}
} else {
/* assume ALPS BSBE1 */
/* reset tuner */
saa7146_setgpio(budget->dev, 3, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 3, SAA7146_GPIO_OUTHI);
msleep(250);
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsbe1_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
printk(KERN_INFO "budget: tuner ALPS BSBE1 detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
break;
}
}
break;
}
case 0x4f61: // Fujitsu Siemens Activy Budget-S PCI rev GR (tda8083/Grundig 29504-451(tsa5522))
budget->dvb_frontend = dvb_attach(tda8083_attach, &grundig_29504_451_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
}
break;
case 0x5f60: /* Fujitsu Siemens Activy Budget-T PCI rev AL (tda10046/ALPS TDHD1-204A) */
budget->dvb_frontend = dvb_attach(tda10046_attach, &alps_tdhd1_204a_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_tdhd1_204a_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
}
break;
case 0x5f61: /* Fujitsu Siemens Activy Budget-T PCI rev GR (L64781/Grundig 29504-401(tsa5060)) */
budget->dvb_frontend = dvb_attach(l64781_attach, &grundig_29504_401_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->tuner_priv = &tuner_address_grundig_29504_401_activy;
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params;
}
break;
case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260))
{
struct dvb_frontend *fe;
fe = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = s5h1420_tuner_set_params;
budget->dvb_frontend = fe;
if (dvb_attach(lnbp21_attach, fe, &budget->i2c_adap,
0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
}
fallthrough;
case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262)
{
struct dvb_frontend *fe;
// gpio2 is connected to CLB - reset it + leave it high
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(1);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(1);
fe = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap);
if (fe) {
budget->dvb_frontend = fe;
if (dvb_attach(tda826x_attach, fe, 0x60,
&budget->i2c_adap, 0) == NULL)
printk("%s: No tda826x found!\n", __func__);
if (dvb_attach(lnbp21_attach, fe,
&budget->i2c_adap, 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
}
fallthrough;
case 0x101c: { /* TT S2-1600 */
const struct stv6110x_devctl *ctl;
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(250);
budget->dvb_frontend = dvb_attach(stv090x_attach,
&tt1600_stv090x_config,
&budget->i2c_adap,
STV090x_DEMODULATOR_0);
if (budget->dvb_frontend) {
ctl = dvb_attach(stv6110x_attach,
budget->dvb_frontend,
&tt1600_stv6110x_config,
&budget->i2c_adap);
if (ctl) {
tt1600_stv090x_config.tuner_init = ctl->tuner_init;
tt1600_stv090x_config.tuner_sleep = ctl->tuner_sleep;
tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
/* call the init function once to initialize
tuner's clock output divider and demod's
master clock */
if (budget->dvb_frontend->ops.init)
budget->dvb_frontend->ops.init(budget->dvb_frontend);
if (dvb_attach(isl6423_attach,
budget->dvb_frontend,
&budget->i2c_adap,
&tt1600_isl6423_config) == NULL) {
printk(KERN_ERR "%s: No Intersil ISL6423 found!\n", __func__);
goto error_out;
}
} else {
printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
goto error_out;
}
}
}
break;
case 0x1020: { /* Omicom S2 */
const struct stv6110x_devctl *ctl;
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(250);
budget->dvb_frontend = dvb_attach(stv090x_attach,
&tt1600_stv090x_config,
&budget->i2c_adap,
STV090x_DEMODULATOR_0);
if (budget->dvb_frontend) {
printk(KERN_INFO "budget: Omicom S2 detected\n");
ctl = dvb_attach(stv6110x_attach,
budget->dvb_frontend,
&tt1600_stv6110x_config,
&budget->i2c_adap);
if (ctl) {
tt1600_stv090x_config.tuner_init = ctl->tuner_init;
tt1600_stv090x_config.tuner_sleep = ctl->tuner_sleep;
tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
/* call the init function once to initialize
tuner's clock output divider and demod's
master clock */
if (budget->dvb_frontend->ops.init)
budget->dvb_frontend->ops.init(budget->dvb_frontend);
if (dvb_attach(lnbh24_attach,
budget->dvb_frontend,
&budget->i2c_adap,
LNBH24_PCL | LNBH24_TTX,
LNBH24_TEN, 0x14>>1) == NULL) {
printk(KERN_ERR
"No LNBH24 found!\n");
goto error_out;
}
} else {
printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
goto error_out;
}
}
}
break;
}
if (budget->dvb_frontend == NULL) {
printk("budget: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget->dev->pci->vendor,
budget->dev->pci->device,
budget->dev->pci->subsystem_vendor,
budget->dev->pci->subsystem_device);
} else {
if (dvb_register_frontend(&budget->dvb_adapter, budget->dvb_frontend))
goto error_out;
}
return;
error_out:
printk("budget: Frontend registration failed!\n");
dvb_frontend_detach(budget->dvb_frontend);
budget->dvb_frontend = NULL;
return;
}
static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_data *info)
{
struct budget *budget = NULL;
int err;
budget = kmalloc(sizeof(struct budget), GFP_KERNEL);
if( NULL == budget ) {
return -ENOMEM;
}
dprintk(2, "dev:%p, info:%p, budget:%p\n", dev, info, budget);
dev->ext_priv = budget;
err = ttpci_budget_init(budget, dev, info, THIS_MODULE, adapter_nr);
if (err) {
printk("==> failed\n");
kfree (budget);
return err;
}
budget->dvb_adapter.priv = budget;
frontend_init(budget);
ttpci_budget_init_hooks(budget);
return 0;
}
static int budget_detach (struct saa7146_dev* dev)
{
struct budget *budget = dev->ext_priv;
int err;
if (budget->dvb_frontend) {
dvb_unregister_frontend(budget->dvb_frontend);
dvb_frontend_detach(budget->dvb_frontend);
}
err = ttpci_budget_deinit (budget);
kfree (budget);
dev->ext_priv = NULL;
return err;
}
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(ttbs, "TT-Budget/WinTV-NOVA-S PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
MAKE_BUDGET_INFO(satel, "SATELCO Multimedia PCI", BUDGET_TT_HW_DISEQC);
MAKE_BUDGET_INFO(ttbs1401, "TT-Budget-S-1401 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(tt1600, "TT-Budget S2-1600 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(fsacs0, "Fujitsu Siemens Activy Budget-S PCI (rev GR/grundig frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsacs1, "Fujitsu Siemens Activy Budget-S PCI (rev AL/alps frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsact, "Fujitsu Siemens Activy Budget-T PCI (rev GR/Grundig frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsact1, "Fujitsu Siemens Activy Budget-T PCI (rev AL/ALPS TDHD1-204A)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(omicom, "Omicom S2 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(sylt, "Philips Semi Sylt PCI", BUDGET_TT_HW_DISEQC);
static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1003),
MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004),
MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005),
MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016),
MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018),
MAKE_EXTENSION_PCI(tt1600, 0x13c2, 0x101c),
MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60),
MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61),
MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60),
MAKE_EXTENSION_PCI(fsact, 0x1131, 0x5f61),
MAKE_EXTENSION_PCI(omicom, 0x14c4, 0x1020),
MAKE_EXTENSION_PCI(sylt, 0x1131, 0x4f52),
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension budget_extension = {
.name = "budget dvb",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
.pci_tbl = pci_tbl,
.attach = budget_attach,
.detach = budget_detach,
.irq_mask = MASK_10,
.irq_func = ttpci_budget_irq10_handler,
};
static int __init budget_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
module_init(budget_init);
module_exit(budget_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
| linux-master | drivers/media/pci/ttpci/budget.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* budget-core.c: driver for the SAA7146 based Budget DVB cards
*
* Compiled from various sources by Michael Hunold <[email protected]>
*
* Copyright (C) 2002 Ralph Metzler <[email protected]>
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* 26feb2004 Support for FS Activy Card (Grundig tuner) by
* Michael Dreher <[email protected]>,
* Oliver Endriss <[email protected]>,
* Andreas 'randy' Weinberger
*
* the project's page is at https://linuxtv.org
*/
#include "budget.h"
#include "ttpci-eeprom.h"
#define TS_WIDTH (2 * TS_SIZE)
#define TS_WIDTH_ACTIVY TS_SIZE
#define TS_WIDTH_DVBC TS_SIZE
#define TS_HEIGHT_MASK 0xf00
#define TS_HEIGHT_MASK_ACTIVY 0xc00
#define TS_HEIGHT_MASK_DVBC 0xe00
#define TS_MIN_BUFSIZE_K 188
#define TS_MAX_BUFSIZE_K 1410
#define TS_MAX_BUFSIZE_K_ACTIVY 564
#define TS_MAX_BUFSIZE_K_DVBC 1316
#define BUFFER_WARNING_WAIT (30*HZ)
int budget_debug;
static int dma_buffer_size = TS_MIN_BUFSIZE_K;
module_param_named(debug, budget_debug, int, 0644);
module_param_named(bufsize, dma_buffer_size, int, 0444);
MODULE_PARM_DESC(debug, "Turn on/off budget debugging (default:off).");
MODULE_PARM_DESC(bufsize, "DMA buffer size in KB, default: 188, min: 188, max: 1410 (Activy: 564)");
/****************************************************************************
* TT budget / WinTV Nova
****************************************************************************/
static int stop_ts_capture(struct budget *budget)
{
dprintk(2, "budget: %p\n", budget);
saa7146_write(budget->dev, MC1, MASK_20); // DMA3 off
SAA7146_IER_DISABLE(budget->dev, MASK_10);
return 0;
}
static int start_ts_capture(struct budget *budget)
{
struct saa7146_dev *dev = budget->dev;
dprintk(2, "budget: %p\n", budget);
if (!budget->feeding || !budget->fe_synced)
return 0;
saa7146_write(dev, MC1, MASK_20); // DMA3 off
memset(budget->grabbing, 0x00, budget->buffer_size);
saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000));
budget->ttbp = 0;
/*
* Signal path on the Activy:
*
* tuner -> SAA7146 port A -> SAA7146 BRS -> SAA7146 DMA3 -> memory
*
* Since the tuner feeds 204 bytes packets into the SAA7146,
* DMA3 is configured to strip the trailing 16 FEC bytes:
* Pitch: 188, NumBytes3: 188, NumLines3: 1024
*/
switch(budget->card->type) {
case BUDGET_FS_ACTIVY:
saa7146_write(dev, DD1_INIT, 0x04000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25));
saa7146_write(dev, BRS_CTRL, 0x00000000);
break;
case BUDGET_PATCH:
saa7146_write(dev, DD1_INIT, 0x00000200);
saa7146_write(dev, MC2, (MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x60000000);
break;
case BUDGET_CIN1200C_MK3:
case BUDGET_KNC1C_MK3:
case BUDGET_KNC1C_TDA10024:
case BUDGET_KNC1CP_MK3:
if (budget->video_port == BUDGET_VIDEO_PORTA) {
saa7146_write(dev, DD1_INIT, 0x06000200);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x00000000);
} else {
saa7146_write(dev, DD1_INIT, 0x00000600);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x60000000);
}
break;
default:
if (budget->video_port == BUDGET_VIDEO_PORTA) {
saa7146_write(dev, DD1_INIT, 0x06000200);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x00000000);
} else {
saa7146_write(dev, DD1_INIT, 0x02000600);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x60000000);
}
}
saa7146_write(dev, MC2, (MASK_08 | MASK_24));
mdelay(10);
saa7146_write(dev, BASE_ODD3, 0);
if (budget->buffer_size > budget->buffer_height * budget->buffer_width) {
// using odd/even buffers
saa7146_write(dev, BASE_EVEN3, budget->buffer_height * budget->buffer_width);
} else {
// using a single buffer
saa7146_write(dev, BASE_EVEN3, 0);
}
saa7146_write(dev, PROT_ADDR3, budget->buffer_size);
saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90);
saa7146_write(dev, PITCH3, budget->buffer_width);
saa7146_write(dev, NUM_LINE_BYTE3,
(budget->buffer_height << 16) | budget->buffer_width);
saa7146_write(dev, MC2, (MASK_04 | MASK_20));
SAA7146_ISR_CLEAR(budget->dev, MASK_10); /* VPE */
SAA7146_IER_ENABLE(budget->dev, MASK_10); /* VPE */
saa7146_write(dev, MC1, (MASK_04 | MASK_20)); /* DMA3 on */
return 0;
}
static int budget_read_fe_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct budget *budget = fe->dvb->priv;
int synced;
int ret;
if (budget->read_fe_status)
ret = budget->read_fe_status(fe, status);
else
ret = -EINVAL;
if (!ret) {
synced = (*status & FE_HAS_LOCK);
if (synced != budget->fe_synced) {
budget->fe_synced = synced;
spin_lock(&budget->feedlock);
if (synced)
start_ts_capture(budget);
else
stop_ts_capture(budget);
spin_unlock(&budget->feedlock);
}
}
return ret;
}
static void vpeirq(struct tasklet_struct *t)
{
struct budget *budget = from_tasklet(budget, t, vpe_tasklet);
u8 *mem = (u8 *) (budget->grabbing);
u32 olddma = budget->ttbp;
u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
u32 count;
/* Ensure streamed PCI data is synced to CPU */
dma_sync_sg_for_cpu(&budget->dev->pci->dev, budget->pt.slist,
budget->pt.nents, DMA_FROM_DEVICE);
/* nearest lower position divisible by 188 */
newdma -= newdma % 188;
if (newdma >= budget->buffer_size)
return;
budget->ttbp = newdma;
if (budget->feeding == 0 || newdma == olddma)
return;
if (newdma > olddma) { /* no wraparound, dump olddma..newdma */
count = newdma - olddma;
dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188);
} else { /* wraparound, dump olddma..buflen and 0..newdma */
count = budget->buffer_size - olddma;
dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188);
count += newdma;
dvb_dmx_swfilter_packets(&budget->demux, mem, newdma / 188);
}
if (count > budget->buffer_warning_threshold)
budget->buffer_warnings++;
if (budget->buffer_warnings && time_after(jiffies, budget->buffer_warning_time)) {
printk("%s %s: used %d times >80%% of buffer (%u bytes now)\n",
budget->dev->name, __func__, budget->buffer_warnings, count);
budget->buffer_warning_time = jiffies + BUFFER_WARNING_WAIT;
budget->buffer_warnings = 0;
}
}
static int ttpci_budget_debiread_nolock(struct budget *budget, u32 config,
int addr, int count, int nobusyloop)
{
struct saa7146_dev *saa = budget->dev;
int result;
result = saa7146_wait_for_debi_done(saa, nobusyloop);
if (result < 0)
return result;
saa7146_write(saa, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff));
saa7146_write(saa, DEBI_CONFIG, config);
saa7146_write(saa, DEBI_PAGE, 0);
saa7146_write(saa, MC2, (2 << 16) | 2);
result = saa7146_wait_for_debi_done(saa, nobusyloop);
if (result < 0)
return result;
result = saa7146_read(saa, DEBI_AD);
result &= (0xffffffffUL >> ((4 - count) * 8));
return result;
}
int ttpci_budget_debiread(struct budget *budget, u32 config, int addr, int count,
int uselocks, int nobusyloop)
{
if (count > 4 || count <= 0)
return 0;
if (uselocks) {
unsigned long flags;
int result;
spin_lock_irqsave(&budget->debilock, flags);
result = ttpci_budget_debiread_nolock(budget, config, addr,
count, nobusyloop);
spin_unlock_irqrestore(&budget->debilock, flags);
return result;
}
return ttpci_budget_debiread_nolock(budget, config, addr,
count, nobusyloop);
}
static int ttpci_budget_debiwrite_nolock(struct budget *budget, u32 config,
int addr, int count, u32 value, int nobusyloop)
{
struct saa7146_dev *saa = budget->dev;
int result;
result = saa7146_wait_for_debi_done(saa, nobusyloop);
if (result < 0)
return result;
saa7146_write(saa, DEBI_COMMAND, (count << 17) | 0x00000 | (addr & 0xffff));
saa7146_write(saa, DEBI_CONFIG, config);
saa7146_write(saa, DEBI_PAGE, 0);
saa7146_write(saa, DEBI_AD, value);
saa7146_write(saa, MC2, (2 << 16) | 2);
result = saa7146_wait_for_debi_done(saa, nobusyloop);
return result < 0 ? result : 0;
}
int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr,
int count, u32 value, int uselocks, int nobusyloop)
{
if (count > 4 || count <= 0)
return 0;
if (uselocks) {
unsigned long flags;
int result;
spin_lock_irqsave(&budget->debilock, flags);
result = ttpci_budget_debiwrite_nolock(budget, config, addr,
count, value, nobusyloop);
spin_unlock_irqrestore(&budget->debilock, flags);
return result;
}
return ttpci_budget_debiwrite_nolock(budget, config, addr,
count, value, nobusyloop);
}
/****************************************************************************
* DVB API SECTION
****************************************************************************/
static int budget_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct budget *budget = demux->priv;
int status = 0;
dprintk(2, "budget: %p\n", budget);
if (!demux->dmx.frontend)
return -EINVAL;
spin_lock(&budget->feedlock);
feed->pusi_seen = false; /* have a clean section start */
if (budget->feeding++ == 0)
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock);
return status;
}
static int budget_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct budget *budget = demux->priv;
int status = 0;
dprintk(2, "budget: %p\n", budget);
spin_lock(&budget->feedlock);
if (--budget->feeding == 0)
status = stop_ts_capture(budget);
spin_unlock(&budget->feedlock);
return status;
}
static int budget_register(struct budget *budget)
{
struct dvb_demux *dvbdemux = &budget->demux;
int ret;
dprintk(2, "budget: %p\n", budget);
dvbdemux->priv = (void *) budget;
dvbdemux->filternum = 256;
dvbdemux->feednum = 256;
dvbdemux->start_feed = budget_start_feed;
dvbdemux->stop_feed = budget_stop_feed;
dvbdemux->write_to_decoder = NULL;
dvbdemux->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING);
dvb_dmx_init(&budget->demux);
budget->dmxdev.filternum = 256;
budget->dmxdev.demux = &dvbdemux->dmx;
budget->dmxdev.capabilities = 0;
dvb_dmxdev_init(&budget->dmxdev, &budget->dvb_adapter);
budget->hw_frontend.source = DMX_FRONTEND_0;
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
if (ret < 0)
goto err_release_dmx;
budget->mem_frontend.source = DMX_MEMORY_FE;
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
if (ret < 0)
goto err_release_dmx;
ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
if (ret < 0)
goto err_release_dmx;
dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
return 0;
err_release_dmx:
dvb_dmxdev_release(&budget->dmxdev);
dvb_dmx_release(&budget->demux);
return ret;
}
static void budget_unregister(struct budget *budget)
{
struct dvb_demux *dvbdemux = &budget->demux;
dprintk(2, "budget: %p\n", budget);
dvb_net_release(&budget->dvb_net);
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &budget->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &budget->mem_frontend);
dvb_dmxdev_release(&budget->dmxdev);
dvb_dmx_release(&budget->demux);
}
int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
struct saa7146_pci_extension_data *info,
struct module *owner, short *adapter_nums)
{
int ret = 0;
struct budget_info *bi = info->ext_priv;
int max_bufsize;
int height_mask;
memset(budget, 0, sizeof(struct budget));
dprintk(2, "dev: %p, budget: %p\n", dev, budget);
budget->card = bi;
budget->dev = (struct saa7146_dev *) dev;
switch(budget->card->type) {
case BUDGET_FS_ACTIVY:
budget->buffer_width = TS_WIDTH_ACTIVY;
max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY;
height_mask = TS_HEIGHT_MASK_ACTIVY;
break;
case BUDGET_KNC1C:
case BUDGET_KNC1CP:
case BUDGET_CIN1200C:
case BUDGET_KNC1C_MK3:
case BUDGET_KNC1C_TDA10024:
case BUDGET_KNC1CP_MK3:
case BUDGET_CIN1200C_MK3:
budget->buffer_width = TS_WIDTH_DVBC;
max_bufsize = TS_MAX_BUFSIZE_K_DVBC;
height_mask = TS_HEIGHT_MASK_DVBC;
break;
default:
budget->buffer_width = TS_WIDTH;
max_bufsize = TS_MAX_BUFSIZE_K;
height_mask = TS_HEIGHT_MASK;
}
if (dma_buffer_size < TS_MIN_BUFSIZE_K)
dma_buffer_size = TS_MIN_BUFSIZE_K;
else if (dma_buffer_size > max_bufsize)
dma_buffer_size = max_bufsize;
budget->buffer_height = dma_buffer_size * 1024 / budget->buffer_width;
if (budget->buffer_height > 0xfff) {
budget->buffer_height /= 2;
budget->buffer_height &= height_mask;
budget->buffer_size = 2 * budget->buffer_height * budget->buffer_width;
} else {
budget->buffer_height &= height_mask;
budget->buffer_size = budget->buffer_height * budget->buffer_width;
}
budget->buffer_warning_threshold = budget->buffer_size * 80/100;
budget->buffer_warnings = 0;
budget->buffer_warning_time = jiffies;
dprintk(2, "%s: buffer type = %s, width = %d, height = %d\n",
budget->dev->name,
budget->buffer_size > budget->buffer_width * budget->buffer_height ? "odd/even" : "single",
budget->buffer_width, budget->buffer_height);
printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size);
ret = dvb_register_adapter(&budget->dvb_adapter, budget->card->name,
owner, &budget->dev->pci->dev, adapter_nums);
if (ret < 0)
return ret;
/* set dd1 stream a & b */
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25));
saa7146_write(dev, MC2, (MASK_10 | MASK_26));
saa7146_write(dev, DD1_INIT, 0x02000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
if (bi->type != BUDGET_FS_ACTIVY)
budget->video_port = BUDGET_VIDEO_PORTB;
else
budget->video_port = BUDGET_VIDEO_PORTA;
spin_lock_init(&budget->feedlock);
spin_lock_init(&budget->debilock);
/* the Siemens DVB needs this if you want to have the i2c chips
get recognized before the main driver is loaded */
if (bi->type != BUDGET_FS_ACTIVY)
saa7146_write(dev, GPIO_CTRL, 0x500000); /* GPIO 3 = 1 */
strscpy(budget->i2c_adap.name, budget->card->name,
sizeof(budget->i2c_adap.name));
saa7146_i2c_adapter_prepare(dev, &budget->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120);
strscpy(budget->i2c_adap.name, budget->card->name,
sizeof(budget->i2c_adap.name));
if (i2c_add_adapter(&budget->i2c_adap) < 0) {
ret = -ENOMEM;
goto err_dvb_unregister;
}
ttpci_eeprom_parse_mac(&budget->i2c_adap, budget->dvb_adapter.proposed_mac);
budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, budget->buffer_size, &budget->pt);
if (NULL == budget->grabbing) {
ret = -ENOMEM;
goto err_del_i2c;
}
saa7146_write(dev, PCI_BT_V1, 0x001c0000);
/* upload all */
saa7146_write(dev, GPIO_CTRL, 0x000000);
tasklet_setup(&budget->vpe_tasklet, vpeirq);
/* frontend power on */
if (bi->type != BUDGET_FS_ACTIVY)
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
if ((ret = budget_register(budget)) == 0)
return 0; /* Everything OK */
/* An error occurred, cleanup resources */
saa7146_vfree_destroy_pgtable(dev->pci, budget->grabbing, &budget->pt);
err_del_i2c:
i2c_del_adapter(&budget->i2c_adap);
err_dvb_unregister:
dvb_unregister_adapter(&budget->dvb_adapter);
return ret;
}
void ttpci_budget_init_hooks(struct budget *budget)
{
if (budget->dvb_frontend && !budget->read_fe_status) {
budget->read_fe_status = budget->dvb_frontend->ops.read_status;
budget->dvb_frontend->ops.read_status = budget_read_fe_status;
}
}
int ttpci_budget_deinit(struct budget *budget)
{
struct saa7146_dev *dev = budget->dev;
dprintk(2, "budget: %p\n", budget);
budget_unregister(budget);
tasklet_kill(&budget->vpe_tasklet);
saa7146_vfree_destroy_pgtable(dev->pci, budget->grabbing, &budget->pt);
i2c_del_adapter(&budget->i2c_adap);
dvb_unregister_adapter(&budget->dvb_adapter);
return 0;
}
void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr)
{
struct budget *budget = dev->ext_priv;
dprintk(8, "dev: %p, budget: %p\n", dev, budget);
if (*isr & MASK_10)
tasklet_schedule(&budget->vpe_tasklet);
}
void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port)
{
struct budget *budget = dev->ext_priv;
spin_lock(&budget->feedlock);
budget->video_port = video_port;
if (budget->feeding) {
stop_ts_capture(budget);
start_ts_capture(budget);
}
spin_unlock(&budget->feedlock);
}
EXPORT_SYMBOL_GPL(ttpci_budget_debiread);
EXPORT_SYMBOL_GPL(ttpci_budget_debiwrite);
EXPORT_SYMBOL_GPL(ttpci_budget_init);
EXPORT_SYMBOL_GPL(ttpci_budget_init_hooks);
EXPORT_SYMBOL_GPL(ttpci_budget_deinit);
EXPORT_SYMBOL_GPL(ttpci_budget_irq10_handler);
EXPORT_SYMBOL_GPL(ttpci_budget_set_video_port);
EXPORT_SYMBOL_GPL(budget_debug);
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/ttpci/budget-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* budget-av.c: driver for the SAA7146 based Budget DVB cards
* with analog video in
*
* Compiled from various sources by Michael Hunold <[email protected]>
*
* CI interface support (c) 2004 Olivier Gournet <[email protected]> &
* Andrew de Quincey <[email protected]>
*
* Copyright (C) 2002 Ralph Metzler <[email protected]>
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* the project's page is at https://linuxtv.org
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "budget.h"
#include "stv0299.h"
#include "stb0899_drv.h"
#include "stb0899_reg.h"
#include "stb0899_cfg.h"
#include "tda8261.h"
#include "tda8261_cfg.h"
#include "tda1002x.h"
#include "tda1004x.h"
#include "tua6100.h"
#include "dvb-pll.h"
#include <media/drv-intf/saa7146_vv.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/spinlock.h>
#include <media/dvb_ca_en50221.h>
#define DEBICICAM 0x02420000
#define SLOTSTATUS_NONE 1
#define SLOTSTATUS_PRESENT 2
#define SLOTSTATUS_RESET 4
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_av {
struct budget budget;
struct video_device vd;
int cur_input;
int has_saa7113;
struct tasklet_struct ciintf_irq_tasklet;
int slot_status;
struct dvb_ca_en50221 ca;
u8 reinitialise_demod:1;
};
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot);
/* GPIO Connections:
* 0 - Vcc/Reset (Reset is controlled by capacitor). Resets the frontend *AS WELL*!
* 1 - CI memory select 0=>IO memory, 1=>Attribute Memory
* 2 - CI Card Enable (Active Low)
* 3 - CI Card Detect
*/
/****************************************************************************
* INITIALIZATION
****************************************************************************/
static u8 i2c_readreg(struct i2c_adapter *i2c, u8 id, u8 reg)
{
u8 mm1[] = { 0x00 };
u8 mm2[] = { 0x00 };
struct i2c_msg msgs[2];
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
msgs[0].addr = msgs[1].addr = id / 2;
mm1[0] = reg;
msgs[0].len = 1;
msgs[1].len = 1;
msgs[0].buf = mm1;
msgs[1].buf = mm2;
i2c_transfer(i2c, msgs, 2);
return mm2[0];
}
static int i2c_readregs(struct i2c_adapter *i2c, u8 id, u8 reg, u8 * buf, u8 len)
{
u8 mm1[] = { reg };
struct i2c_msg msgs[2] = {
{.addr = id / 2,.flags = 0,.buf = mm1,.len = 1},
{.addr = id / 2,.flags = I2C_M_RD,.buf = buf,.len = len}
};
if (i2c_transfer(i2c, msgs, 2) != 2)
return -EIO;
return 0;
}
static int i2c_writereg(struct i2c_adapter *i2c, u8 id, u8 reg, u8 val)
{
u8 msg[2] = { reg, val };
struct i2c_msg msgs;
msgs.flags = 0;
msgs.addr = id / 2;
msgs.len = 2;
msgs.buf = msg;
return i2c_transfer(i2c, &msgs, 1);
}
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
{
struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI);
udelay(1);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 0xfff, 1, 0, 1);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 1\n");
}
return result;
}
static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
{
struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI);
udelay(1);
result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 0xfff, 1, value, 0, 1);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 2\n");
}
return result;
}
static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
{
struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
udelay(1);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 3, 1, 0, 0);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 3\n");
return -ETIMEDOUT;
}
return result;
}
static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
{
struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
udelay(1);
result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 3, 1, value, 0, 0);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 5\n");
}
return result;
}
static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
return -EINVAL;
dprintk(1, "ciintf_slot_reset\n");
budget_av->slot_status = SLOTSTATUS_RESET;
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTHI); /* disable card */
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTHI); /* Vcc off */
msleep(2);
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO); /* Vcc on */
msleep(20); /* 20 ms Vcc settling time */
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTLO); /* enable card */
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
msleep(20);
/* reinitialise the frontend if necessary */
if (budget_av->reinitialise_demod)
dvb_frontend_reinitialise(budget_av->budget.dvb_frontend);
return 0;
}
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
return -EINVAL;
dprintk(1, "ciintf_slot_shutdown\n");
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
budget_av->slot_status = SLOTSTATUS_NONE;
return 0;
}
static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
return -EINVAL;
dprintk(1, "ciintf_slot_ts_enable: %d\n", budget_av->slot_status);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA);
return 0;
}
static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
int result;
if (slot != 0)
return -EINVAL;
/* test the card detect line - needs to be done carefully
* since it never goes high for some CAMs on this interface (e.g. topuptv) */
if (budget_av->slot_status == SLOTSTATUS_NONE) {
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
udelay(1);
if (saa7146_read(saa, PSR) & MASK_06) {
if (budget_av->slot_status == SLOTSTATUS_NONE) {
budget_av->slot_status = SLOTSTATUS_PRESENT;
pr_info("cam inserted A\n");
}
}
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
}
/* We also try and read from IO memory to work round the above detection bug. If
* there is no CAM, we will get a timeout. Only done if there is no cam
* present, since this test actually breaks some cams :(
*
* if the CI interface is not open, we also do the above test since we
* don't care if the cam has problems - we'll be resetting it on open() anyway */
if ((budget_av->slot_status == SLOTSTATUS_NONE) || (!open)) {
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1);
if ((result >= 0) && (budget_av->slot_status == SLOTSTATUS_NONE)) {
budget_av->slot_status = SLOTSTATUS_PRESENT;
pr_info("cam inserted B\n");
} else if (result < 0) {
if (budget_av->slot_status != SLOTSTATUS_NONE) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 5\n");
return 0;
}
}
}
/* read from attribute memory in reset/ready state to know when the CAM is ready */
if (budget_av->slot_status == SLOTSTATUS_RESET) {
result = ciintf_read_attribute_mem(ca, slot, 0);
if (result == 0x1d) {
budget_av->slot_status = SLOTSTATUS_READY;
}
}
/* work out correct return code */
if (budget_av->slot_status != SLOTSTATUS_NONE) {
if (budget_av->slot_status & SLOTSTATUS_READY) {
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
}
return DVB_CA_EN50221_POLL_CAM_PRESENT;
}
return 0;
}
static int ciintf_init(struct budget_av *budget_av)
{
struct saa7146_dev *saa = budget_av->budget.dev;
int result;
memset(&budget_av->ca, 0, sizeof(struct dvb_ca_en50221));
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO);
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTLO);
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTLO);
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
/* Enable DEBI pins */
saa7146_write(saa, MC1, MASK_27 | MASK_11);
/* register CI interface */
budget_av->ca.owner = THIS_MODULE;
budget_av->ca.read_attribute_mem = ciintf_read_attribute_mem;
budget_av->ca.write_attribute_mem = ciintf_write_attribute_mem;
budget_av->ca.read_cam_control = ciintf_read_cam_control;
budget_av->ca.write_cam_control = ciintf_write_cam_control;
budget_av->ca.slot_reset = ciintf_slot_reset;
budget_av->ca.slot_shutdown = ciintf_slot_shutdown;
budget_av->ca.slot_ts_enable = ciintf_slot_ts_enable;
budget_av->ca.poll_slot_status = ciintf_poll_slot_status;
budget_av->ca.data = budget_av;
budget_av->budget.ci_present = 1;
budget_av->slot_status = SLOTSTATUS_NONE;
if ((result = dvb_ca_en50221_init(&budget_av->budget.dvb_adapter,
&budget_av->ca, 0, 1)) != 0) {
pr_err("ci initialisation failed\n");
goto error;
}
pr_info("ci interface initialised\n");
return 0;
error:
saa7146_write(saa, MC1, MASK_27);
return result;
}
static void ciintf_deinit(struct budget_av *budget_av)
{
struct saa7146_dev *saa = budget_av->budget.dev;
saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT);
saa7146_setgpio(saa, 1, SAA7146_GPIO_INPUT);
saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT);
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
/* release the CA device */
dvb_ca_en50221_release(&budget_av->ca);
/* disable DEBI pins */
saa7146_write(saa, MC1, MASK_27);
}
static const u8 saa7113_tab[] = {
0x01, 0x08,
0x02, 0xc0,
0x03, 0x33,
0x04, 0x00,
0x05, 0x00,
0x06, 0xeb,
0x07, 0xe0,
0x08, 0x28,
0x09, 0x00,
0x0a, 0x80,
0x0b, 0x47,
0x0c, 0x40,
0x0d, 0x00,
0x0e, 0x01,
0x0f, 0x44,
0x10, 0x08,
0x11, 0x0c,
0x12, 0x7b,
0x13, 0x00,
0x15, 0x00, 0x16, 0x00, 0x17, 0x00,
0x57, 0xff,
0x40, 0x82, 0x58, 0x00, 0x59, 0x54, 0x5a, 0x07,
0x5b, 0x83, 0x5e, 0x00,
0xff
};
static int saa7113_init(struct budget_av *budget_av)
{
struct budget *budget = &budget_av->budget;
struct saa7146_dev *saa = budget->dev;
const u8 *data = saa7113_tab;
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTHI);
msleep(200);
if (i2c_writereg(&budget->i2c_adap, 0x4a, 0x01, 0x08) != 1) {
dprintk(1, "saa7113 not found on KNC card\n");
return -ENODEV;
}
dprintk(1, "saa7113 detected and initializing\n");
while (*data != 0xff) {
i2c_writereg(&budget->i2c_adap, 0x4a, *data, *(data + 1));
data += 2;
}
dprintk(1, "saa7113 status=%02x\n", i2c_readreg(&budget->i2c_adap, 0x4a, 0x1f));
return 0;
}
static int saa7113_setinput(struct budget_av *budget_av, int input)
{
struct budget *budget = &budget_av->budget;
if (1 != budget_av->has_saa7113)
return -ENODEV;
if (input == 1) {
i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc7);
i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x80);
} else if (input == 0) {
i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc0);
i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x00);
} else
return -EINVAL;
budget_av->cur_input = input;
return 0;
}
static int philips_su1278_ty_ci_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
u8 m1;
aclk = 0xb5;
if (srate < 2000000)
bclk = 0x86;
else if (srate < 5000000)
bclk = 0x89;
else if (srate < 15000000)
bclk = 0x8f;
else if (srate < 45000000)
bclk = 0x95;
m1 = 0x14;
if (srate < 4000000)
m1 = 0x10;
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
stv0299_writereg(fe, 0x0f, 0x80 | m1);
return 0;
}
static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 div;
u8 buf[4];
struct budget *budget = fe->dvb->priv;
struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
if ((c->frequency < 950000) || (c->frequency > 2150000))
return -EINVAL;
div = (c->frequency + (125 - 1)) / 125; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
buf[3] = 0x20;
if (c->symbol_rate < 4000000)
buf[3] |= 1;
if (c->frequency < 1250000)
buf[3] |= 0;
else if (c->frequency < 1550000)
buf[3] |= 0x40;
else if (c->frequency < 2050000)
buf[3] |= 0x80;
else if (c->frequency < 2150000)
buf[3] |= 0xC0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static u8 typhoon_cinergy1200s_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
0x06, 0x40, /* DAC not used, set to high impendance mode */
0x07, 0x00, /* DAC LSB */
0x08, 0x40, /* DiSEqC off */
0x09, 0x00, /* FIFO */
0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
0x10, 0x3f, // AGC2 0x3d
0x11, 0x84,
0x12, 0xb9,
0x15, 0xc9, // lock detector threshold
0x16, 0x00,
0x17, 0x00,
0x18, 0x00,
0x19, 0x00,
0x1a, 0x00,
0x1f, 0x50,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
0x29, 0x1e, // 1/2 threshold
0x2a, 0x14, // 2/3 threshold
0x2b, 0x0f, // 3/4 threshold
0x2c, 0x09, // 5/6 threshold
0x2d, 0x05, // 7/8 threshold
0x2e, 0x01,
0x31, 0x1f, // test all FECs
0x32, 0x19, // viterbi and synchro search
0x33, 0xfc, // rs control
0x34, 0x93, // error control
0x0f, 0x92,
0xff, 0xff
};
static const struct stv0299_config typhoon_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
static const struct stv0299_config cinergy_1200s_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_0,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
static const struct stv0299_config cinergy_1200s_1894_0010_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
.invert = 1,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 buf[6];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
int i;
#define CU1216_IF 36125000
#define TUNER_MUL 62500
u32 div = (c->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0xce;
buf[3] = (c->frequency < 150000000 ? 0x01 :
c->frequency < 445000000 ? 0x02 : 0x04);
buf[4] = 0xde;
buf[5] = 0x20;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
/* wait for the pll lock */
msg.flags = I2C_M_RD;
msg.len = 1;
for (i = 0; i < 20; i++) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) == 1 && (buf[0] & 0x40))
break;
msleep(10);
}
/* switch the charge pump to the lower current */
msg.flags = 0;
msg.len = 2;
msg.buf = &buf[2];
buf[2] &= ~0x40;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct tda1002x_config philips_cu1216_config = {
.demod_address = 0x0c,
.invert = 1,
};
static struct tda1002x_config philips_cu1216_config_altaddress = {
.demod_address = 0x0d,
.invert = 0,
};
static struct tda10023_config philips_cu1216_tda10023_config = {
.demod_address = 0x0c,
.invert = 1,
};
static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
{
struct budget *budget = fe->dvb->priv;
static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab };
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) };
// setup PLL configuration
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf,.len =
sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
// determine charge pump
tuner_frequency = c->frequency + 36166000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
cp = 3;
else if (tuner_frequency < 160000000)
cp = 5;
else if (tuner_frequency < 200000000)
cp = 6;
else if (tuner_frequency < 290000000)
cp = 3;
else if (tuner_frequency < 420000000)
cp = 5;
else if (tuner_frequency < 480000000)
cp = 6;
else if (tuner_frequency < 620000000)
cp = 3;
else if (tuner_frequency < 830000000)
cp = 5;
else if (tuner_frequency < 895000000)
cp = 7;
else
return -EINVAL;
// determine band
if (c->frequency < 49000000)
return -EINVAL;
else if (c->frequency < 161000000)
band = 1;
else if (c->frequency < 444000000)
band = 2;
else if (c->frequency < 861000000)
band = 4;
else
return -EINVAL;
// setup PLL filter
switch (c->bandwidth_hz) {
case 6000000:
filter = 0;
break;
case 7000000:
filter = 0;
break;
case 8000000:
filter = 1;
break;
default:
return -EINVAL;
}
// calculate divisor
// ((36166000+((1000000/6)/2)) + Finput)/(1000000/6)
tuner_frequency = (((c->frequency / 1000) * 6) + 217496) / 1000;
// setup tuner buffer
tuner_buf[0] = (tuner_frequency >> 8) & 0x7f;
tuner_buf[1] = tuner_frequency & 0xff;
tuner_buf[2] = 0xca;
tuner_buf[3] = (cp << 5) | (filter << 3) | band;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static int philips_tu1216_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char *name)
{
struct budget *budget = fe->dvb->priv;
return request_firmware(fw, name, &budget->dev->pci->dev);
}
static struct tda1004x_config philips_tu1216_config = {
.demod_address = 0x8,
.invert = 1,
.invert_oclk = 1,
.xtal_freq = TDA10046_XTAL_4M,
.agc_config = TDA10046_AGC_DEFAULT,
.if_freq = TDA10046_FREQ_3617,
.request_firmware = philips_tu1216_request_firmware,
};
static u8 philips_sd1878_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x7d,
0x05, 0x35,
0x06, 0x40,
0x07, 0x00,
0x08, 0x43,
0x09, 0x02,
0x0C, 0x51,
0x0D, 0x82,
0x0E, 0x23,
0x10, 0x3f,
0x11, 0x84,
0x12, 0xb9,
0x15, 0xc9,
0x16, 0x19,
0x17, 0x8c,
0x18, 0x59,
0x19, 0xf8,
0x1a, 0xfe,
0x1c, 0x7f,
0x1d, 0x00,
0x1e, 0x00,
0x1f, 0x50,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00,
0x29, 0x28,
0x2a, 0x14,
0x2b, 0x0f,
0x2c, 0x09,
0x2d, 0x09,
0x31, 0x1f,
0x32, 0x19,
0x33, 0xfc,
0x34, 0x93,
0xff, 0xff
};
static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
u8 m1;
aclk = 0xb5;
if (srate < 2000000)
bclk = 0x86;
else if (srate < 5000000)
bclk = 0x89;
else if (srate < 15000000)
bclk = 0x8f;
else if (srate < 45000000)
bclk = 0x95;
m1 = 0x14;
if (srate < 4000000)
m1 = 0x10;
stv0299_writereg(fe, 0x0e, 0x23);
stv0299_writereg(fe, 0x0f, 0x94);
stv0299_writereg(fe, 0x10, 0x39);
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x15, 0xc9);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
stv0299_writereg(fe, 0x0f, 0x80 | m1);
return 0;
}
static const struct stv0299_config philips_sd1878_config = {
.demod_address = 0x68,
.inittab = philips_sd1878_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_sd1878_ci_set_symbol_rate,
};
/* KNC1 DVB-S (STB0899) Inittab */
static const struct stb0899_s1_reg knc1_stb0899_s1_init_1[] = {
{ STB0899_DEV_ID , 0x81 },
{ STB0899_DISCNTRL1 , 0x32 },
{ STB0899_DISCNTRL2 , 0x80 },
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x8c },
{ STB0899_DISF22RX , 0x9a },
{ STB0899_SYSREG , 0x0b },
{ STB0899_ACRPRESC , 0x11 },
{ STB0899_ACRDIV1 , 0x0a },
{ STB0899_ACRDIV2 , 0x05 },
{ STB0899_DACR1 , 0x00 },
{ STB0899_DACR2 , 0x00 },
{ STB0899_OUTCFG , 0x00 },
{ STB0899_MODECFG , 0x00 },
{ STB0899_IRQSTATUS_3 , 0x30 },
{ STB0899_IRQSTATUS_2 , 0x00 },
{ STB0899_IRQSTATUS_1 , 0x00 },
{ STB0899_IRQSTATUS_0 , 0x00 },
{ STB0899_IRQMSK_3 , 0xf3 },
{ STB0899_IRQMSK_2 , 0xfc },
{ STB0899_IRQMSK_1 , 0xff },
{ STB0899_IRQMSK_0 , 0xff },
{ STB0899_IRQCFG , 0x00 },
{ STB0899_I2CCFG , 0x88 },
{ STB0899_I2CRPT , 0x58 }, /* Repeater=8, Stop=disabled */
{ STB0899_IOPVALUE5 , 0x00 },
{ STB0899_IOPVALUE4 , 0x20 },
{ STB0899_IOPVALUE3 , 0xc9 },
{ STB0899_IOPVALUE2 , 0x90 },
{ STB0899_IOPVALUE1 , 0x40 },
{ STB0899_IOPVALUE0 , 0x00 },
{ STB0899_GPIO00CFG , 0x82 },
{ STB0899_GPIO01CFG , 0x82 },
{ STB0899_GPIO02CFG , 0x82 },
{ STB0899_GPIO03CFG , 0x82 },
{ STB0899_GPIO04CFG , 0x82 },
{ STB0899_GPIO05CFG , 0x82 },
{ STB0899_GPIO06CFG , 0x82 },
{ STB0899_GPIO07CFG , 0x82 },
{ STB0899_GPIO08CFG , 0x82 },
{ STB0899_GPIO09CFG , 0x82 },
{ STB0899_GPIO10CFG , 0x82 },
{ STB0899_GPIO11CFG , 0x82 },
{ STB0899_GPIO12CFG , 0x82 },
{ STB0899_GPIO13CFG , 0x82 },
{ STB0899_GPIO14CFG , 0x82 },
{ STB0899_GPIO15CFG , 0x82 },
{ STB0899_GPIO16CFG , 0x82 },
{ STB0899_GPIO17CFG , 0x82 },
{ STB0899_GPIO18CFG , 0x82 },
{ STB0899_GPIO19CFG , 0x82 },
{ STB0899_GPIO20CFG , 0x82 },
{ STB0899_SDATCFG , 0xb8 },
{ STB0899_SCLTCFG , 0xba },
{ STB0899_AGCRFCFG , 0x08 }, /* 0x1c */
{ STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
{ STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
{ STB0899_DIRCLKCFG , 0x82 },
{ STB0899_CLKOUT27CFG , 0x7e },
{ STB0899_STDBYCFG , 0x82 },
{ STB0899_CS0CFG , 0x82 },
{ STB0899_CS1CFG , 0x82 },
{ STB0899_DISEQCOCFG , 0x20 },
{ STB0899_GPIO32CFG , 0x82 },
{ STB0899_GPIO33CFG , 0x82 },
{ STB0899_GPIO34CFG , 0x82 },
{ STB0899_GPIO35CFG , 0x82 },
{ STB0899_GPIO36CFG , 0x82 },
{ STB0899_GPIO37CFG , 0x82 },
{ STB0899_GPIO38CFG , 0x82 },
{ STB0899_GPIO39CFG , 0x82 },
{ STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
{ STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
{ STB0899_FILTCTRL , 0x00 },
{ STB0899_SYSCTRL , 0x00 },
{ STB0899_STOPCLK1 , 0x20 },
{ STB0899_STOPCLK2 , 0x00 },
{ STB0899_INTBUFSTATUS , 0x00 },
{ STB0899_INTBUFCTRL , 0x0a },
{ 0xffff , 0xff },
};
static const struct stb0899_s1_reg knc1_stb0899_s1_init_3[] = {
{ STB0899_DEMOD , 0x00 },
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x41 },
{ STB0899_AGC1REF , 0x08 },
{ STB0899_RTC , 0x7a },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x33 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xee },
{ STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xdd },
{ STB0899_LDT2 , 0xc9 },
{ STB0899_EQUALREF , 0xb4 },
{ STB0899_TMGRAMP , 0x10 },
{ STB0899_TMGTHD , 0x30 },
{ STB0899_IDCCOMP , 0xfb },
{ STB0899_QDCCOMP , 0x03 },
{ STB0899_POWERI , 0x3b },
{ STB0899_POWERQ , 0x3d },
{ STB0899_RCOMP , 0x81 },
{ STB0899_AGCIQIN , 0x80 },
{ STB0899_AGC2I1 , 0x04 },
{ STB0899_AGC2I2 , 0xf5 },
{ STB0899_TLIR , 0x25 },
{ STB0899_RTF , 0x80 },
{ STB0899_DSTATUS , 0x00 },
{ STB0899_LDI , 0xca },
{ STB0899_CFRM , 0xf1 },
{ STB0899_CFRL , 0xf3 },
{ STB0899_NIRM , 0x2a },
{ STB0899_NIRL , 0x05 },
{ STB0899_ISYMB , 0x17 },
{ STB0899_QSYMB , 0xfa },
{ STB0899_SFRH , 0x2f },
{ STB0899_SFRM , 0x68 },
{ STB0899_SFRL , 0x40 },
{ STB0899_SFRUPH , 0x2f },
{ STB0899_SFRUPM , 0x68 },
{ STB0899_SFRUPL , 0x40 },
{ STB0899_EQUAI1 , 0xfd },
{ STB0899_EQUAQ1 , 0x04 },
{ STB0899_EQUAI2 , 0x0f },
{ STB0899_EQUAQ2 , 0xff },
{ STB0899_EQUAI3 , 0xdf },
{ STB0899_EQUAQ3 , 0xfa },
{ STB0899_EQUAI4 , 0x37 },
{ STB0899_EQUAQ4 , 0x0d },
{ STB0899_EQUAI5 , 0xbd },
{ STB0899_EQUAQ5 , 0xf7 },
{ STB0899_DSTATUS2 , 0x00 },
{ STB0899_VSTATUS , 0x00 },
{ STB0899_VERROR , 0xff },
{ STB0899_IQSWAP , 0x2a },
{ STB0899_ECNT1M , 0x00 },
{ STB0899_ECNT1L , 0x00 },
{ STB0899_ECNT2M , 0x00 },
{ STB0899_ECNT2L , 0x00 },
{ STB0899_ECNT3M , 0x00 },
{ STB0899_ECNT3L , 0x00 },
{ STB0899_FECAUTO1 , 0x06 },
{ STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xf0 },
{ STB0899_VTH23 , 0xa0 },
{ STB0899_VTH34 , 0x78 },
{ STB0899_VTH56 , 0x4e },
{ STB0899_VTH67 , 0x48 },
{ STB0899_VTH78 , 0x38 },
{ STB0899_PRVIT , 0xff },
{ STB0899_VITSYNC , 0x19 },
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x40 },
{ STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x0c },
{ STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */
{ STB0899_RSSYNCDEL , 0x00 },
{ STB0899_TSINHDELH , 0x02 },
{ STB0899_TSINHDELM , 0x00 },
{ STB0899_TSINHDELL , 0x00 },
{ STB0899_TSLLSTKM , 0x00 },
{ STB0899_TSLLSTKL , 0x00 },
{ STB0899_TSULSTKM , 0x00 },
{ STB0899_TSULSTKL , 0xab },
{ STB0899_PCKLENUL , 0x00 },
{ STB0899_PCKLENLL , 0xcc },
{ STB0899_RSPCKLEN , 0xcc },
{ STB0899_TSSTATUS , 0x80 },
{ STB0899_ERRCTRL1 , 0xb6 },
{ STB0899_ERRCTRL2 , 0x96 },
{ STB0899_ERRCTRL3 , 0x89 },
{ STB0899_DMONMSK1 , 0x27 },
{ STB0899_DMONMSK0 , 0x03 },
{ STB0899_DEMAPVIT , 0x5c },
{ STB0899_PLPARM , 0x1f },
{ STB0899_PDELCTRL , 0x48 },
{ STB0899_PDELCTRL2 , 0x00 },
{ STB0899_BBHCTRL1 , 0x00 },
{ STB0899_BBHCTRL2 , 0x00 },
{ STB0899_HYSTTHRESH , 0x77 },
{ STB0899_MATCSTM , 0x00 },
{ STB0899_MATCSTL , 0x00 },
{ STB0899_UPLCSTM , 0x00 },
{ STB0899_UPLCSTL , 0x00 },
{ STB0899_DFLCSTM , 0x00 },
{ STB0899_DFLCSTL , 0x00 },
{ STB0899_SYNCCST , 0x00 },
{ STB0899_SYNCDCSTM , 0x00 },
{ STB0899_SYNCDCSTL , 0x00 },
{ STB0899_ISI_ENTRY , 0x00 },
{ STB0899_ISI_BIT_EN , 0x00 },
{ STB0899_MATSTRM , 0x00 },
{ STB0899_MATSTRL , 0x00 },
{ STB0899_UPLSTRM , 0x00 },
{ STB0899_UPLSTRL , 0x00 },
{ STB0899_DFLSTRM , 0x00 },
{ STB0899_DFLSTRL , 0x00 },
{ STB0899_SYNCSTR , 0x00 },
{ STB0899_SYNCDSTRM , 0x00 },
{ STB0899_SYNCDSTRL , 0x00 },
{ STB0899_CFGPDELSTATUS1 , 0x10 },
{ STB0899_CFGPDELSTATUS2 , 0x00 },
{ STB0899_BBFERRORM , 0x00 },
{ STB0899_BBFERRORL , 0x00 },
{ STB0899_UPKTERRORM , 0x00 },
{ STB0899_UPKTERRORL , 0x00 },
{ 0xffff , 0xff },
};
/* STB0899 demodulator config for the KNC1 and clones */
static struct stb0899_config knc1_dvbs2_config = {
.init_dev = knc1_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = knc1_stb0899_s1_init_3,
.init_s2_fec = stb0899_s2_init_4,
.init_tst = stb0899_s1_init_5,
.postproc = NULL,
.demod_address = 0x68,
// .ts_output_mode = STB0899_OUT_PARALLEL, /* types = SERIAL/PARALLEL */
.block_sync_mode = STB0899_SYNC_FORCED, /* DSS, SYNC_FORCED/UNSYNCED */
// .ts_pfbit_toggle = STB0899_MPEG_NORMAL, /* DirecTV, MPEG toggling seq */
.xtal_freq = 27000000,
.inversion = IQ_SWAP_OFF,
.lo_clk = 76500000,
.hi_clk = 90000000,
.esno_ave = STB0899_DVBS2_ESNO_AVE,
.esno_quant = STB0899_DVBS2_ESNO_QUANT,
.avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
.avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
.miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
.uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
.uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
.uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
.sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
.btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
.btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
.crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
.ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
.tuner_get_frequency = tda8261_get_frequency,
.tuner_set_frequency = tda8261_set_frequency,
.tuner_set_bandwidth = NULL,
.tuner_get_bandwidth = tda8261_get_bandwidth,
.tuner_set_rfsiggain = NULL
};
/*
* SD1878/SHA tuner config
* 1F, Single I/P, Horizontal mount, High Sensitivity
*/
static const struct tda8261_config sd1878c_config = {
// .name = "SD1878/SHA",
.addr = 0x60,
.step_size = TDA8261_STEP_1000 /* kHz */
};
static u8 read_pwm(struct budget_av *budget_av)
{
u8 b = 0xff;
u8 pwm;
struct i2c_msg msg[] = { {.addr = 0x50,.flags = 0,.buf = &b,.len = 1},
{.addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1}
};
if ((i2c_transfer(&budget_av->budget.i2c_adap, msg, 2) != 2)
|| (pwm == 0xff))
pwm = 0x48;
return pwm;
}
#define SUBID_DVBS_KNC1 0x0010
#define SUBID_DVBS_KNC1_PLUS 0x0011
#define SUBID_DVBS_TYPHOON 0x4f56
#define SUBID_DVBS_CINERGY1200 0x1154
#define SUBID_DVBS_CYNERGY1200N 0x1155
#define SUBID_DVBS_TV_STAR 0x0014
#define SUBID_DVBS_TV_STAR_PLUS_X4 0x0015
#define SUBID_DVBS_TV_STAR_CI 0x0016
#define SUBID_DVBS2_KNC1 0x0018
#define SUBID_DVBS2_KNC1_OEM 0x0019
#define SUBID_DVBS_EASYWATCH_1 0x001a
#define SUBID_DVBS_EASYWATCH_2 0x001b
#define SUBID_DVBS2_EASYWATCH 0x001d
#define SUBID_DVBS_EASYWATCH 0x001e
#define SUBID_DVBC_EASYWATCH 0x002a
#define SUBID_DVBC_EASYWATCH_MK3 0x002c
#define SUBID_DVBC_KNC1 0x0020
#define SUBID_DVBC_KNC1_PLUS 0x0021
#define SUBID_DVBC_KNC1_MK3 0x0022
#define SUBID_DVBC_KNC1_TDA10024 0x0028
#define SUBID_DVBC_KNC1_PLUS_MK3 0x0023
#define SUBID_DVBC_CINERGY1200 0x1156
#define SUBID_DVBC_CINERGY1200_MK3 0x1176
#define SUBID_DVBT_EASYWATCH 0x003a
#define SUBID_DVBT_KNC1_PLUS 0x0031
#define SUBID_DVBT_KNC1 0x0030
#define SUBID_DVBT_CINERGY1200 0x1157
static void frontend_init(struct budget_av *budget_av)
{
struct saa7146_dev * saa = budget_av->budget.dev;
struct dvb_frontend * fe = NULL;
/* Enable / PowerON Frontend */
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO);
/* Wait for PowerON */
msleep(100);
/* additional setup necessary for the PLUS cards */
switch (saa->pci->subsystem_device) {
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBC_KNC1_PLUS:
case SUBID_DVBT_KNC1_PLUS:
case SUBID_DVBC_EASYWATCH:
case SUBID_DVBC_KNC1_PLUS_MK3:
case SUBID_DVBS2_KNC1:
case SUBID_DVBS2_KNC1_OEM:
case SUBID_DVBS2_EASYWATCH:
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
break;
}
switch (saa->pci->subsystem_device) {
case SUBID_DVBS_KNC1:
/*
* maybe that setting is needed for other dvb-s cards as well,
* but so far it has been only confirmed for this type
*/
budget_av->reinitialise_demod = 1;
fallthrough;
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBS_EASYWATCH_1:
if (saa->pci->subsystem_vendor == 0x1894) {
fe = dvb_attach(stv0299_attach, &cinergy_1200s_1894_0010_config,
&budget_av->budget.i2c_adap);
if (fe) {
dvb_attach(tua6100_attach, fe, 0x60, &budget_av->budget.i2c_adap);
}
} else {
fe = dvb_attach(stv0299_attach, &typhoon_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
}
}
break;
case SUBID_DVBS_TV_STAR:
case SUBID_DVBS_TV_STAR_PLUS_X4:
case SUBID_DVBS_TV_STAR_CI:
case SUBID_DVBS_CYNERGY1200N:
case SUBID_DVBS_EASYWATCH:
case SUBID_DVBS_EASYWATCH_2:
fe = dvb_attach(stv0299_attach, &philips_sd1878_config,
&budget_av->budget.i2c_adap);
if (fe) {
dvb_attach(dvb_pll_attach, fe, 0x60,
&budget_av->budget.i2c_adap,
DVB_PLL_PHILIPS_SD1878_TDA8261);
}
break;
case SUBID_DVBS_TYPHOON:
fe = dvb_attach(stv0299_attach, &typhoon_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
}
break;
case SUBID_DVBS2_KNC1:
case SUBID_DVBS2_KNC1_OEM:
case SUBID_DVBS2_EASYWATCH:
budget_av->reinitialise_demod = 1;
if ((fe = dvb_attach(stb0899_attach, &knc1_dvbs2_config, &budget_av->budget.i2c_adap)))
dvb_attach(tda8261_attach, fe, &sd1878c_config, &budget_av->budget.i2c_adap);
break;
case SUBID_DVBS_CINERGY1200:
fe = dvb_attach(stv0299_attach, &cinergy_1200s_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
}
break;
case SUBID_DVBC_KNC1:
case SUBID_DVBC_KNC1_PLUS:
case SUBID_DVBC_CINERGY1200:
case SUBID_DVBC_EASYWATCH:
budget_av->reinitialise_demod = 1;
budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
fe = dvb_attach(tda10021_attach, &philips_cu1216_config,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
if (fe == NULL)
fe = dvb_attach(tda10021_attach, &philips_cu1216_config_altaddress,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
if (fe) {
fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
}
break;
case SUBID_DVBC_EASYWATCH_MK3:
case SUBID_DVBC_CINERGY1200_MK3:
case SUBID_DVBC_KNC1_MK3:
case SUBID_DVBC_KNC1_TDA10024:
case SUBID_DVBC_KNC1_PLUS_MK3:
budget_av->reinitialise_demod = 1;
budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
fe = dvb_attach(tda10023_attach,
&philips_cu1216_tda10023_config,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
if (fe) {
fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
}
break;
case SUBID_DVBT_EASYWATCH:
case SUBID_DVBT_KNC1:
case SUBID_DVBT_KNC1_PLUS:
case SUBID_DVBT_CINERGY1200:
budget_av->reinitialise_demod = 1;
fe = dvb_attach(tda10046_attach, &philips_tu1216_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.init = philips_tu1216_tuner_init;
fe->ops.tuner_ops.set_params = philips_tu1216_tuner_set_params;
}
break;
}
if (fe == NULL) {
pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
saa->pci->vendor,
saa->pci->device,
saa->pci->subsystem_vendor,
saa->pci->subsystem_device);
return;
}
budget_av->budget.dvb_frontend = fe;
if (dvb_register_frontend(&budget_av->budget.dvb_adapter,
budget_av->budget.dvb_frontend)) {
pr_err("Frontend registration failed!\n");
dvb_frontend_detach(budget_av->budget.dvb_frontend);
budget_av->budget.dvb_frontend = NULL;
}
}
static void budget_av_irq(struct saa7146_dev *dev, u32 * isr)
{
struct budget_av *budget_av = dev->ext_priv;
dprintk(8, "dev: %p, budget_av: %p\n", dev, budget_av);
if (*isr & MASK_10)
ttpci_budget_irq10_handler(dev, isr);
}
static int budget_av_detach(struct saa7146_dev *dev)
{
struct budget_av *budget_av = dev->ext_priv;
int err;
dprintk(2, "dev: %p\n", dev);
if (1 == budget_av->has_saa7113) {
saa7146_setgpio(dev, 0, SAA7146_GPIO_OUTLO);
msleep(200);
saa7146_unregister_device(&budget_av->vd, dev);
saa7146_vv_release(dev);
}
if (budget_av->budget.ci_present)
ciintf_deinit(budget_av);
if (budget_av->budget.dvb_frontend != NULL) {
dvb_unregister_frontend(budget_av->budget.dvb_frontend);
dvb_frontend_detach(budget_av->budget.dvb_frontend);
}
err = ttpci_budget_deinit(&budget_av->budget);
kfree(budget_av);
return err;
}
#define KNC1_INPUTS 2
static struct v4l2_input knc1_inputs[KNC1_INPUTS] = {
{ 0, "Composite", V4L2_INPUT_TYPE_TUNER, 1, 0,
V4L2_STD_PAL_BG | V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 1, "S-Video", V4L2_INPUT_TYPE_CAMERA, 2, 0,
V4L2_STD_PAL_BG | V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
};
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
dprintk(1, "VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= KNC1_INPUTS)
return -EINVAL;
memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input));
return 0;
}
static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
{
struct saa7146_dev *dev = video_drvdata(file);
struct budget_av *budget_av = dev->ext_priv;
*i = budget_av->cur_input;
dprintk(1, "VIDIOC_G_INPUT %d\n", *i);
return 0;
}
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = video_drvdata(file);
struct budget_av *budget_av = dev->ext_priv;
dprintk(1, "VIDIOC_S_INPUT %d\n", input);
return saa7113_setinput(budget_av, input);
}
static struct saa7146_ext_vv vv_data;
static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct budget_av *budget_av;
u8 *mac;
int err;
dprintk(2, "dev: %p\n", dev);
if (!(budget_av = kzalloc(sizeof(struct budget_av), GFP_KERNEL)))
return -ENOMEM;
budget_av->has_saa7113 = 0;
budget_av->budget.ci_present = 0;
dev->ext_priv = budget_av;
err = ttpci_budget_init(&budget_av->budget, dev, info, THIS_MODULE,
adapter_nr);
if (err) {
kfree(budget_av);
return err;
}
/* knc1 initialization */
saa7146_write(dev, DD1_STREAM_B, 0x04000000);
saa7146_write(dev, DD1_INIT, 0x07000600);
saa7146_write(dev, MC2, MASK_09 | MASK_25 | MASK_10 | MASK_26);
if (saa7113_init(budget_av) == 0) {
budget_av->has_saa7113 = 1;
err = saa7146_vv_init(dev, &vv_data);
if (err != 0) {
/* fixme: proper cleanup here */
ERR("cannot init vv subsystem\n");
return err;
}
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
/* fixme: proper cleanup here */
ERR("cannot register capture v4l2 device\n");
saa7146_vv_release(dev);
return err;
}
/* beware: this modifies dev->vv ... */
saa7146_set_hps_source_and_sync(dev, SAA7146_HPS_SOURCE_PORT_A,
SAA7146_HPS_SYNC_PORT_A);
saa7113_setinput(budget_av, 0);
}
/* fixme: find some sane values here... */
saa7146_write(dev, PCI_BT_V1, 0x1c00101f);
mac = budget_av->budget.dvb_adapter.proposed_mac;
if (i2c_readregs(&budget_av->budget.i2c_adap, 0xa0, 0x30, mac, 6)) {
pr_err("KNC1-%d: Could not read MAC from KNC1 card\n",
budget_av->budget.dvb_adapter.num);
eth_zero_addr(mac);
} else {
pr_info("KNC1-%d: MAC addr = %pM\n",
budget_av->budget.dvb_adapter.num, mac);
}
budget_av->budget.dvb_adapter.priv = budget_av;
frontend_init(budget_av);
ciintf_init(budget_av);
ttpci_budget_init_hooks(&budget_av->budget);
return 0;
}
static struct saa7146_standard standard[] = {
{.name = "PAL",.id = V4L2_STD_PAL,
.v_offset = 0x17,.v_field = 288,
.h_offset = 0x14,.h_pixels = 680,
.v_max_out = 576,.h_max_out = 768 },
{.name = "NTSC",.id = V4L2_STD_NTSC,
.v_offset = 0x16,.v_field = 240,
.h_offset = 0x06,.h_pixels = 708,
.v_max_out = 480,.h_max_out = 640, },
};
static struct saa7146_ext_vv vv_data = {
.inputs = 2,
.capabilities = 0, // perhaps later: V4L2_CAP_VBI_CAPTURE, but that need tweaking with the saa7113
.flags = 0,
.stds = &standard[0],
.num_stds = ARRAY_SIZE(standard),
};
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S);
MAKE_BUDGET_INFO(knc1s2,"KNC1 DVB-S2", BUDGET_KNC1S2);
MAKE_BUDGET_INFO(sates2,"Satelco EasyWatch DVB-S2", BUDGET_KNC1S2);
MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C);
MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S);
MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3);
MAKE_BUDGET_INFO(satewt, "Satelco EasyWatch DVB-T", BUDGET_KNC1T);
MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1spx4, "KNC1 DVB-S Plus X4", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3);
MAKE_BUDGET_INFO(knc1ctda10024, "KNC1 DVB-C TDA10024", BUDGET_KNC1C_TDA10024);
MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3);
MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
MAKE_BUDGET_INFO(cin1200sn, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C);
MAKE_BUDGET_INFO(cin1200cmk3, "Terratec Cinergy 1200 DVB-C MK3", BUDGET_CIN1200C_MK3);
MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T);
static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x4f56),
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x0010),
MAKE_EXTENSION_PCI(knc1s, 0x1894, 0x0010),
MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011),
MAKE_EXTENSION_PCI(knc1sp, 0x1894, 0x0011),
MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014),
MAKE_EXTENSION_PCI(knc1spx4, 0x1894, 0x0015),
MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0018),
MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0019),
MAKE_EXTENSION_PCI(sates2, 0x1894, 0x001d),
MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b),
MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c),
MAKE_EXTENSION_PCI(satewt, 0x1894, 0x003a),
MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022),
MAKE_EXTENSION_PCI(knc1ctda10024, 0x1894, 0x0028),
MAKE_EXTENSION_PCI(knc1cpmk3, 0x1894, 0x0023),
MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031),
MAKE_EXTENSION_PCI(cin1200s, 0x153b, 0x1154),
MAKE_EXTENSION_PCI(cin1200sn, 0x153b, 0x1155),
MAKE_EXTENSION_PCI(cin1200c, 0x153b, 0x1156),
MAKE_EXTENSION_PCI(cin1200cmk3, 0x153b, 0x1176),
MAKE_EXTENSION_PCI(cin1200t, 0x153b, 0x1157),
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension budget_extension = {
.name = "budget_av",
.flags = SAA7146_USE_I2C_IRQ,
.pci_tbl = pci_tbl,
.module = THIS_MODULE,
.attach = budget_av_attach,
.detach = budget_av_detach,
.irq_mask = MASK_10,
.irq_func = budget_av_irq,
};
static int __init budget_av_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_av_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
module_init(budget_av_init);
module_exit(budget_av_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
| linux-master | drivers/media/pci/ttpci/budget-av.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* budget-ci.c: driver for the SAA7146 based Budget DVB cards
*
* Compiled from various sources by Michael Hunold <[email protected]>
*
* msp430 IR support contributed by Jack Thomasson <[email protected]>
* partially based on the Siemens DVB driver by Ralph+Marcus Metzler
*
* CI interface support (c) 2004 Andrew de Quincey <[email protected]>
*
* the project's page is at https://linuxtv.org
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <media/rc-core.h>
#include "budget.h"
#include <media/dvb_ca_en50221.h>
#include "stv0299.h"
#include "stv0297.h"
#include "tda1004x.h"
#include "stb0899_drv.h"
#include "stb0899_reg.h"
#include "stb0899_cfg.h"
#include "stb6100.h"
#include "stb6100_cfg.h"
#include "lnbp21.h"
#include "bsbe1.h"
#include "bsru6.h"
#include "tda1002x.h"
#include "tda827x.h"
#include "bsbe1-d01a.h"
#define MODULE_NAME "budget_ci"
/*
* Regarding DEBIADDR_IR:
* Some CI modules hang if random addresses are read.
* Using address 0x4000 for the IR read means that we
* use the same address as for CI version, which should
* be a safe default.
*/
#define DEBIADDR_IR 0x4000
#define DEBIADDR_CICONTROL 0x0000
#define DEBIADDR_CIVERSION 0x4000
#define DEBIADDR_IO 0x1000
#define DEBIADDR_ATTR 0x3000
#define CICONTROL_RESET 0x01
#define CICONTROL_ENABLETS 0x02
#define CICONTROL_CAMDETECT 0x08
#define DEBICICTL 0x00420000
#define DEBICICAM 0x02420000
#define SLOTSTATUS_NONE 1
#define SLOTSTATUS_PRESENT 2
#define SLOTSTATUS_RESET 4
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
/* RC5 device wildcard */
#define IR_DEVICE_ANY 255
static int rc5_device = -1;
module_param(rc5_device, int, 0644);
MODULE_PARM_DESC(rc5_device, "only IR commands to given RC5 device (device = 0 - 31, any device = 255, default: autodetect)");
static int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_ci_ir {
struct rc_dev *dev;
struct tasklet_struct msp430_irq_tasklet;
char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
char phys[32];
int rc5_device;
u32 ir_key;
bool have_command;
bool full_rc5; /* Outputs a full RC5 code */
};
struct budget_ci {
struct budget budget;
struct tasklet_struct ciintf_irq_tasklet;
int slot_status;
int ci_irq;
struct dvb_ca_en50221 ca;
struct budget_ci_ir ir;
u8 tuner_pll_address; /* used for philips_tdm1316l configs */
};
static void msp430_ir_interrupt(struct tasklet_struct *t)
{
struct budget_ci_ir *ir = from_tasklet(ir, t, msp430_irq_tasklet);
struct budget_ci *budget_ci = container_of(ir, typeof(*budget_ci), ir);
struct rc_dev *dev = budget_ci->ir.dev;
u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
/*
* The msp430 chip can generate two different bytes, command and device
*
* type1: X1CCCCCC, C = command bits (0 - 63)
* type2: X0TDDDDD, D = device bits (0 - 31), T = RC5 toggle bit
*
* Each signal from the remote control can generate one or more command
* bytes and one or more device bytes. For the repeated bytes, the
* highest bit (X) is set. The first command byte is always generated
* before the first device byte. Other than that, no specific order
* seems to apply. To make life interesting, bytes can also be lost.
*
* Only when we have a command and device byte, a keypress is
* generated.
*/
if (ir_debug)
printk("budget_ci: received byte 0x%02x\n", command);
/* Remove repeat bit, we use every command */
command = command & 0x7f;
/* Is this a RC5 command byte? */
if (command & 0x40) {
budget_ci->ir.have_command = true;
budget_ci->ir.ir_key = command & 0x3f;
return;
}
/* It's a RC5 device byte */
if (!budget_ci->ir.have_command)
return;
budget_ci->ir.have_command = false;
if (budget_ci->ir.rc5_device != IR_DEVICE_ANY &&
budget_ci->ir.rc5_device != (command & 0x1f))
return;
if (budget_ci->ir.full_rc5) {
rc_keydown(dev, RC_PROTO_RC5,
RC_SCANCODE_RC5(budget_ci->ir.rc5_device, budget_ci->ir.ir_key),
!!(command & 0x20));
return;
}
/* FIXME: We should generate complete scancodes for all devices */
rc_keydown(dev, RC_PROTO_UNKNOWN, budget_ci->ir.ir_key,
!!(command & 0x20));
}
static int msp430_ir_init(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
struct rc_dev *dev;
int error;
dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev) {
printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
return -ENOMEM;
}
snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name),
"Budget-CI dvb ir receiver %s", saa->name);
snprintf(budget_ci->ir.phys, sizeof(budget_ci->ir.phys),
"pci-%s/ir0", pci_name(saa->pci));
dev->driver_name = MODULE_NAME;
dev->device_name = budget_ci->ir.name;
dev->input_phys = budget_ci->ir.phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.version = 1;
if (saa->pci->subsystem_vendor) {
dev->input_id.vendor = saa->pci->subsystem_vendor;
dev->input_id.product = saa->pci->subsystem_device;
} else {
dev->input_id.vendor = saa->pci->vendor;
dev->input_id.product = saa->pci->device;
}
dev->dev.parent = &saa->pci->dev;
if (rc5_device < 0)
budget_ci->ir.rc5_device = IR_DEVICE_ANY;
else
budget_ci->ir.rc5_device = rc5_device;
/* Select keymap and address */
switch (budget_ci->budget.dev->pci->subsystem_device) {
case 0x100c:
case 0x100f:
case 0x1011:
case 0x1012:
/* The hauppauge keymap is a superset of these remotes */
dev->map_name = RC_MAP_HAUPPAUGE;
budget_ci->ir.full_rc5 = true;
if (rc5_device < 0)
budget_ci->ir.rc5_device = 0x1f;
break;
case 0x1010:
case 0x1017:
case 0x1019:
case 0x101a:
case 0x101b:
/* for the Technotrend 1500 bundled remote */
dev->map_name = RC_MAP_TT_1500;
break;
default:
/* unknown remote */
dev->map_name = RC_MAP_BUDGET_CI_OLD;
break;
}
if (!budget_ci->ir.full_rc5)
dev->scancode_mask = 0xff;
error = rc_register_device(dev);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
rc_free_device(dev);
return error;
}
budget_ci->ir.dev = dev;
tasklet_setup(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt);
SAA7146_IER_ENABLE(saa, MASK_06);
saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI);
return 0;
}
static void msp430_ir_deinit(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
SAA7146_IER_DISABLE(saa, MASK_06);
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
rc_unregister_device(budget_ci->ir.dev);
}
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
{
struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiread(&budget_ci->budget, DEBICICAM,
DEBIADDR_ATTR | (address & 0xfff), 1, 1, 0);
}
static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
{
struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiwrite(&budget_ci->budget, DEBICICAM,
DEBIADDR_ATTR | (address & 0xfff), 1, value, 1, 0);
}
static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
{
struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiread(&budget_ci->budget, DEBICICAM,
DEBIADDR_IO | (address & 3), 1, 1, 0);
}
static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
{
struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiwrite(&budget_ci->budget, DEBICICAM,
DEBIADDR_IO | (address & 3), 1, value, 1, 0);
}
static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_ci *budget_ci = ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
if (slot != 0)
return -EINVAL;
if (budget_ci->ci_irq) {
// trigger on RISING edge during reset so we know when READY is re-asserted
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
}
budget_ci->slot_status = SLOTSTATUS_RESET;
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 0, 1, 0);
msleep(1);
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
CICONTROL_RESET, 1, 0);
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTHI);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
return 0;
}
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_ci *budget_ci = ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTHI);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
return 0;
}
static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_ci *budget_ci = ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
int tmp;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTLO);
tmp = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
tmp | CICONTROL_ENABLETS, 1, 0);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA);
return 0;
}
static void ciintf_interrupt(struct tasklet_struct *t)
{
struct budget_ci *budget_ci = from_tasklet(budget_ci, t,
ciintf_irq_tasklet);
struct saa7146_dev *saa = budget_ci->budget.dev;
unsigned int flags;
// ensure we don't get spurious IRQs during initialisation
if (!budget_ci->budget.ci_present)
return;
// read the CAM status
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
if (flags & CICONTROL_CAMDETECT) {
// GPIO should be set to trigger on falling edge if a CAM is present
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO);
if (budget_ci->slot_status & SLOTSTATUS_NONE) {
// CAM insertion IRQ
budget_ci->slot_status = SLOTSTATUS_PRESENT;
dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0,
DVB_CA_EN50221_CAMCHANGE_INSERTED);
} else if (budget_ci->slot_status & SLOTSTATUS_RESET) {
// CAM ready (reset completed)
budget_ci->slot_status = SLOTSTATUS_READY;
dvb_ca_en50221_camready_irq(&budget_ci->ca, 0);
} else if (budget_ci->slot_status & SLOTSTATUS_READY) {
// FR/DA IRQ
dvb_ca_en50221_frda_irq(&budget_ci->ca, 0);
}
} else {
// trigger on rising edge if a CAM is not present - when a CAM is inserted, we
// only want to get the IRQ when it sets READY. If we trigger on the falling edge,
// the CAM might not actually be ready yet.
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
// generate a CAM removal IRQ if we haven't already
if (budget_ci->slot_status & SLOTSTATUS_OCCUPIED) {
// CAM removal IRQ
budget_ci->slot_status = SLOTSTATUS_NONE;
dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0,
DVB_CA_EN50221_CAMCHANGE_REMOVED);
}
}
}
static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
struct budget_ci *budget_ci = ca->data;
unsigned int flags;
// ensure we don't get spurious IRQs during initialisation
if (!budget_ci->budget.ci_present)
return -EINVAL;
// read the CAM status
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
if (flags & CICONTROL_CAMDETECT) {
// mark it as present if it wasn't before
if (budget_ci->slot_status & SLOTSTATUS_NONE) {
budget_ci->slot_status = SLOTSTATUS_PRESENT;
}
// during a RESET, we check if we can read from IO memory to see when CAM is ready
if (budget_ci->slot_status & SLOTSTATUS_RESET) {
if (ciintf_read_attribute_mem(ca, slot, 0) == 0x1d) {
budget_ci->slot_status = SLOTSTATUS_READY;
}
}
} else {
budget_ci->slot_status = SLOTSTATUS_NONE;
}
if (budget_ci->slot_status != SLOTSTATUS_NONE) {
if (budget_ci->slot_status & SLOTSTATUS_READY) {
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
}
return DVB_CA_EN50221_POLL_CAM_PRESENT;
}
return 0;
}
static int ciintf_init(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
int flags;
int result;
int ci_version;
int ca_flags;
memset(&budget_ci->ca, 0, sizeof(struct dvb_ca_en50221));
// enable DEBI pins
saa7146_write(saa, MC1, MASK_27 | MASK_11);
// test if it is there
ci_version = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CIVERSION, 1, 1, 0);
if ((ci_version & 0xa0) != 0xa0) {
result = -ENODEV;
goto error;
}
// determine whether a CAM is present or not
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
budget_ci->slot_status = SLOTSTATUS_NONE;
if (flags & CICONTROL_CAMDETECT)
budget_ci->slot_status = SLOTSTATUS_PRESENT;
// version 0xa2 of the CI firmware doesn't generate interrupts
if (ci_version == 0xa2) {
ca_flags = 0;
budget_ci->ci_irq = 0;
} else {
ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE |
DVB_CA_EN50221_FLAG_IRQ_FR |
DVB_CA_EN50221_FLAG_IRQ_DA;
budget_ci->ci_irq = 1;
}
// register CI interface
budget_ci->ca.owner = THIS_MODULE;
budget_ci->ca.read_attribute_mem = ciintf_read_attribute_mem;
budget_ci->ca.write_attribute_mem = ciintf_write_attribute_mem;
budget_ci->ca.read_cam_control = ciintf_read_cam_control;
budget_ci->ca.write_cam_control = ciintf_write_cam_control;
budget_ci->ca.slot_reset = ciintf_slot_reset;
budget_ci->ca.slot_shutdown = ciintf_slot_shutdown;
budget_ci->ca.slot_ts_enable = ciintf_slot_ts_enable;
budget_ci->ca.poll_slot_status = ciintf_poll_slot_status;
budget_ci->ca.data = budget_ci;
if ((result = dvb_ca_en50221_init(&budget_ci->budget.dvb_adapter,
&budget_ci->ca,
ca_flags, 1)) != 0) {
printk("budget_ci: CI interface detected, but initialisation failed.\n");
goto error;
}
// Setup CI slot IRQ
if (budget_ci->ci_irq) {
tasklet_setup(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt);
if (budget_ci->slot_status != SLOTSTATUS_NONE) {
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO);
} else {
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
}
SAA7146_IER_ENABLE(saa, MASK_03);
}
// enable interface
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
CICONTROL_RESET, 1, 0);
// success!
printk("budget_ci: CI interface initialised\n");
budget_ci->budget.ci_present = 1;
// forge a fake CI IRQ so the CAM state is setup correctly
if (budget_ci->ci_irq) {
flags = DVB_CA_EN50221_CAMCHANGE_REMOVED;
if (budget_ci->slot_status != SLOTSTATUS_NONE)
flags = DVB_CA_EN50221_CAMCHANGE_INSERTED;
dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0, flags);
}
return 0;
error:
saa7146_write(saa, MC1, MASK_27);
return result;
}
static void ciintf_deinit(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
// disable CI interrupts
if (budget_ci->ci_irq) {
SAA7146_IER_DISABLE(saa, MASK_03);
saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ciintf_irq_tasklet);
}
// reset interface
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 0, 1, 0);
msleep(1);
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
CICONTROL_RESET, 1, 0);
// disable TS data stream to CI interface
saa7146_setgpio(saa, 1, SAA7146_GPIO_INPUT);
// release the CA device
dvb_ca_en50221_release(&budget_ci->ca);
// disable DEBI pins
saa7146_write(saa, MC1, MASK_27);
}
static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
{
struct budget_ci *budget_ci = dev->ext_priv;
dprintk(8, "dev: %p, budget_ci: %p\n", dev, budget_ci);
if (*isr & MASK_06)
tasklet_schedule(&budget_ci->ir.msp430_irq_tasklet);
if (*isr & MASK_10)
ttpci_budget_irq10_handler(dev, isr);
if ((*isr & MASK_03) && (budget_ci->budget.ci_present) && (budget_ci->ci_irq))
tasklet_schedule(&budget_ci->ciintf_irq_tasklet);
}
static u8 philips_su1278_tt_inittab[] = {
0x01, 0x0f,
0x02, 0x30,
0x03, 0x00,
0x04, 0x5b,
0x05, 0x85,
0x06, 0x02,
0x07, 0x00,
0x08, 0x02,
0x09, 0x00,
0x0C, 0x01,
0x0D, 0x81,
0x0E, 0x44,
0x0f, 0x14,
0x10, 0x3c,
0x11, 0x84,
0x12, 0xda,
0x13, 0x97,
0x14, 0x95,
0x15, 0xc9,
0x16, 0x19,
0x17, 0x8c,
0x18, 0x59,
0x19, 0xf8,
0x1a, 0xfe,
0x1c, 0x7f,
0x1d, 0x00,
0x1e, 0x00,
0x1f, 0x50,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00,
0x29, 0x28,
0x2a, 0x14,
0x2b, 0x0f,
0x2c, 0x09,
0x2d, 0x09,
0x31, 0x1f,
0x32, 0x19,
0x33, 0xfc,
0x34, 0x93,
0xff, 0xff
};
static int philips_su1278_tt_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio)
{
stv0299_writereg(fe, 0x0e, 0x44);
if (srate >= 10000000) {
stv0299_writereg(fe, 0x13, 0x97);
stv0299_writereg(fe, 0x14, 0x95);
stv0299_writereg(fe, 0x15, 0xc9);
stv0299_writereg(fe, 0x17, 0x8c);
stv0299_writereg(fe, 0x1a, 0xfe);
stv0299_writereg(fe, 0x1c, 0x7f);
stv0299_writereg(fe, 0x2d, 0x09);
} else {
stv0299_writereg(fe, 0x13, 0x99);
stv0299_writereg(fe, 0x14, 0x8d);
stv0299_writereg(fe, 0x15, 0xce);
stv0299_writereg(fe, 0x17, 0x43);
stv0299_writereg(fe, 0x1a, 0x1d);
stv0299_writereg(fe, 0x1c, 0x12);
stv0299_writereg(fe, 0x2d, 0x05);
}
stv0299_writereg(fe, 0x0e, 0x23);
stv0299_writereg(fe, 0x0f, 0x94);
stv0299_writereg(fe, 0x10, 0x39);
stv0299_writereg(fe, 0x15, 0xc9);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
return 0;
}
static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = fe->dvb->priv;
u32 div;
u8 buf[4];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
div = (p->frequency + (500 - 1)) / 500; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 2;
buf[3] = 0x20;
if (p->symbol_rate < 4000000)
buf[3] |= 1;
if (p->frequency < 1250000)
buf[3] |= 0;
else if (p->frequency < 1550000)
buf[3] |= 0x40;
else if (p->frequency < 2050000)
buf[3] |= 0x80;
else if (p->frequency < 2150000)
buf[3] |= 0xC0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static const struct stv0299_config philips_su1278_tt_config = {
.demod_address = 0x68,
.inittab = philips_su1278_tt_inittab,
.mclk = 64000000UL,
.invert = 0,
.skip_reinit = 1,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP1,
.min_delay_ms = 50,
.set_symbol_rate = philips_su1278_tt_set_symbol_rate,
};
static int philips_tdm1316l_tuner_init(struct dvb_frontend *fe)
{
struct budget_ci *budget_ci = fe->dvb->priv;
static u8 td1316_init[] = { 0x0b, 0xf5, 0x85, 0xab };
static u8 disable_mc44BC374c[] = { 0x1d, 0x74, 0xa0, 0x68 };
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = td1316_init,.len =
sizeof(td1316_init) };
// setup PLL configuration
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
// disable the mc44BC374c (do not check for errors)
tuner_msg.addr = 0x65;
tuner_msg.buf = disable_mc44BC374c;
tuner_msg.len = sizeof(disable_mc44BC374c);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1);
}
return 0;
}
static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = tuner_buf,.len = sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
// determine charge pump
tuner_frequency = p->frequency + 36130000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
cp = 3;
else if (tuner_frequency < 160000000)
cp = 5;
else if (tuner_frequency < 200000000)
cp = 6;
else if (tuner_frequency < 290000000)
cp = 3;
else if (tuner_frequency < 420000000)
cp = 5;
else if (tuner_frequency < 480000000)
cp = 6;
else if (tuner_frequency < 620000000)
cp = 3;
else if (tuner_frequency < 830000000)
cp = 5;
else if (tuner_frequency < 895000000)
cp = 7;
else
return -EINVAL;
// determine band
if (p->frequency < 49000000)
return -EINVAL;
else if (p->frequency < 159000000)
band = 1;
else if (p->frequency < 444000000)
band = 2;
else if (p->frequency < 861000000)
band = 4;
else
return -EINVAL;
// setup PLL filter and TDA9889
switch (p->bandwidth_hz) {
case 6000000:
tda1004x_writereg(fe, 0x0C, 0x14);
filter = 0;
break;
case 7000000:
tda1004x_writereg(fe, 0x0C, 0x80);
filter = 0;
break;
case 8000000:
tda1004x_writereg(fe, 0x0C, 0x14);
filter = 1;
break;
default:
return -EINVAL;
}
// calculate divisor
// ((36130000+((1000000/6)/2)) + Finput)/(1000000/6)
tuner_frequency = (((p->frequency / 1000) * 6) + 217280) / 1000;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
tuner_buf[1] = tuner_frequency & 0xff;
tuner_buf[2] = 0xca;
tuner_buf[3] = (cp << 5) | (filter << 3) | band;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static int philips_tdm1316l_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char *name)
{
struct budget_ci *budget_ci = fe->dvb->priv;
return request_firmware(fw, name, &budget_ci->budget.dev->pci->dev);
}
static struct tda1004x_config philips_tdm1316l_config = {
.demod_address = 0x8,
.invert = 0,
.invert_oclk = 0,
.xtal_freq = TDA10046_XTAL_4M,
.agc_config = TDA10046_AGC_DEFAULT,
.if_freq = TDA10046_FREQ_3617,
.request_firmware = philips_tdm1316l_request_firmware,
};
static struct tda1004x_config philips_tdm1316l_config_invert = {
.demod_address = 0x8,
.invert = 1,
.invert_oclk = 0,
.xtal_freq = TDA10046_XTAL_4M,
.agc_config = TDA10046_AGC_DEFAULT,
.if_freq = TDA10046_FREQ_3617,
.request_firmware = philips_tdm1316l_request_firmware,
};
static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = fe->dvb->priv;
u8 tuner_buf[5];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,
.flags = 0,
.buf = tuner_buf,
.len = sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
// determine charge pump
tuner_frequency = p->frequency + 36125000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000) {
cp = 3;
band = 1;
} else if (tuner_frequency < 160000000) {
cp = 5;
band = 1;
} else if (tuner_frequency < 200000000) {
cp = 6;
band = 1;
} else if (tuner_frequency < 290000000) {
cp = 3;
band = 2;
} else if (tuner_frequency < 420000000) {
cp = 5;
band = 2;
} else if (tuner_frequency < 480000000) {
cp = 6;
band = 2;
} else if (tuner_frequency < 620000000) {
cp = 3;
band = 4;
} else if (tuner_frequency < 830000000) {
cp = 5;
band = 4;
} else if (tuner_frequency < 895000000) {
cp = 7;
band = 4;
} else
return -EINVAL;
// assume PLL filter should always be 8MHz for the moment.
filter = 1;
// calculate divisor
tuner_frequency = (p->frequency + 36125000 + (62500/2)) / 62500;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
tuner_buf[1] = tuner_frequency & 0xff;
tuner_buf[2] = 0xc8;
tuner_buf[3] = (cp << 5) | (filter << 3) | band;
tuner_buf[4] = 0x80;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(50);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static u8 dvbc_philips_tdm1316l_inittab[] = {
0x80, 0x01,
0x80, 0x00,
0x81, 0x01,
0x81, 0x00,
0x00, 0x09,
0x01, 0x69,
0x03, 0x00,
0x04, 0x00,
0x07, 0x00,
0x08, 0x00,
0x20, 0x00,
0x21, 0x40,
0x22, 0x00,
0x23, 0x00,
0x24, 0x40,
0x25, 0x88,
0x30, 0xff,
0x31, 0x00,
0x32, 0xff,
0x33, 0x00,
0x34, 0x50,
0x35, 0x7f,
0x36, 0x00,
0x37, 0x20,
0x38, 0x00,
0x40, 0x1c,
0x41, 0xff,
0x42, 0x29,
0x43, 0x20,
0x44, 0xff,
0x45, 0x00,
0x46, 0x00,
0x49, 0x04,
0x4a, 0x00,
0x4b, 0x7b,
0x52, 0x30,
0x55, 0xae,
0x56, 0x47,
0x57, 0xe1,
0x58, 0x3a,
0x5a, 0x1e,
0x5b, 0x34,
0x60, 0x00,
0x63, 0x00,
0x64, 0x00,
0x65, 0x00,
0x66, 0x00,
0x67, 0x00,
0x68, 0x00,
0x69, 0x00,
0x6a, 0x02,
0x6b, 0x00,
0x70, 0xff,
0x71, 0x00,
0x72, 0x00,
0x73, 0x00,
0x74, 0x0c,
0x80, 0x00,
0x81, 0x00,
0x82, 0x00,
0x83, 0x00,
0x84, 0x04,
0x85, 0x80,
0x86, 0x24,
0x87, 0x78,
0x88, 0x10,
0x89, 0x00,
0x90, 0x01,
0x91, 0x01,
0xa0, 0x04,
0xa1, 0x00,
0xa2, 0x00,
0xb0, 0x91,
0xb1, 0x0b,
0xc0, 0x53,
0xc1, 0x70,
0xc2, 0x12,
0xd0, 0x00,
0xd1, 0x00,
0xd2, 0x00,
0xd3, 0x00,
0xd4, 0x00,
0xd5, 0x00,
0xde, 0x00,
0xdf, 0x00,
0x61, 0x38,
0x62, 0x0a,
0x53, 0x13,
0x59, 0x08,
0xff, 0xff,
};
static struct stv0297_config dvbc_philips_tdm1316l_config = {
.demod_address = 0x1c,
.inittab = dvbc_philips_tdm1316l_inittab,
.invert = 0,
.stop_during_read = 1,
};
static struct tda10023_config tda10023_config = {
.demod_address = 0xc,
.invert = 0,
.xtal = 16000000,
.pll_m = 11,
.pll_p = 3,
.pll_n = 1,
.deltaf = 0xa511,
};
static struct tda827x_config tda827x_config = {
.config = 0,
};
/* TT S2-3200 DVB-S (STB0899) Inittab */
static const struct stb0899_s1_reg tt3200_stb0899_s1_init_1[] = {
{ STB0899_DEV_ID , 0x81 },
{ STB0899_DISCNTRL1 , 0x32 },
{ STB0899_DISCNTRL2 , 0x80 },
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x8c },
{ STB0899_DISF22RX , 0x9a },
{ STB0899_SYSREG , 0x0b },
{ STB0899_ACRPRESC , 0x11 },
{ STB0899_ACRDIV1 , 0x0a },
{ STB0899_ACRDIV2 , 0x05 },
{ STB0899_DACR1 , 0x00 },
{ STB0899_DACR2 , 0x00 },
{ STB0899_OUTCFG , 0x00 },
{ STB0899_MODECFG , 0x00 },
{ STB0899_IRQSTATUS_3 , 0x30 },
{ STB0899_IRQSTATUS_2 , 0x00 },
{ STB0899_IRQSTATUS_1 , 0x00 },
{ STB0899_IRQSTATUS_0 , 0x00 },
{ STB0899_IRQMSK_3 , 0xf3 },
{ STB0899_IRQMSK_2 , 0xfc },
{ STB0899_IRQMSK_1 , 0xff },
{ STB0899_IRQMSK_0 , 0xff },
{ STB0899_IRQCFG , 0x00 },
{ STB0899_I2CCFG , 0x88 },
{ STB0899_I2CRPT , 0x48 }, /* 12k Pullup, Repeater=16, Stop=disabled */
{ STB0899_IOPVALUE5 , 0x00 },
{ STB0899_IOPVALUE4 , 0x20 },
{ STB0899_IOPVALUE3 , 0xc9 },
{ STB0899_IOPVALUE2 , 0x90 },
{ STB0899_IOPVALUE1 , 0x40 },
{ STB0899_IOPVALUE0 , 0x00 },
{ STB0899_GPIO00CFG , 0x82 },
{ STB0899_GPIO01CFG , 0x82 },
{ STB0899_GPIO02CFG , 0x82 },
{ STB0899_GPIO03CFG , 0x82 },
{ STB0899_GPIO04CFG , 0x82 },
{ STB0899_GPIO05CFG , 0x82 },
{ STB0899_GPIO06CFG , 0x82 },
{ STB0899_GPIO07CFG , 0x82 },
{ STB0899_GPIO08CFG , 0x82 },
{ STB0899_GPIO09CFG , 0x82 },
{ STB0899_GPIO10CFG , 0x82 },
{ STB0899_GPIO11CFG , 0x82 },
{ STB0899_GPIO12CFG , 0x82 },
{ STB0899_GPIO13CFG , 0x82 },
{ STB0899_GPIO14CFG , 0x82 },
{ STB0899_GPIO15CFG , 0x82 },
{ STB0899_GPIO16CFG , 0x82 },
{ STB0899_GPIO17CFG , 0x82 },
{ STB0899_GPIO18CFG , 0x82 },
{ STB0899_GPIO19CFG , 0x82 },
{ STB0899_GPIO20CFG , 0x82 },
{ STB0899_SDATCFG , 0xb8 },
{ STB0899_SCLTCFG , 0xba },
{ STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
{ STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
{ STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
{ STB0899_DIRCLKCFG , 0x82 },
{ STB0899_CLKOUT27CFG , 0x7e },
{ STB0899_STDBYCFG , 0x82 },
{ STB0899_CS0CFG , 0x82 },
{ STB0899_CS1CFG , 0x82 },
{ STB0899_DISEQCOCFG , 0x20 },
{ STB0899_GPIO32CFG , 0x82 },
{ STB0899_GPIO33CFG , 0x82 },
{ STB0899_GPIO34CFG , 0x82 },
{ STB0899_GPIO35CFG , 0x82 },
{ STB0899_GPIO36CFG , 0x82 },
{ STB0899_GPIO37CFG , 0x82 },
{ STB0899_GPIO38CFG , 0x82 },
{ STB0899_GPIO39CFG , 0x82 },
{ STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
{ STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
{ STB0899_FILTCTRL , 0x00 },
{ STB0899_SYSCTRL , 0x00 },
{ STB0899_STOPCLK1 , 0x20 },
{ STB0899_STOPCLK2 , 0x00 },
{ STB0899_INTBUFSTATUS , 0x00 },
{ STB0899_INTBUFCTRL , 0x0a },
{ 0xffff , 0xff },
};
static const struct stb0899_s1_reg tt3200_stb0899_s1_init_3[] = {
{ STB0899_DEMOD , 0x00 },
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x41 },
{ STB0899_AGC1REF , 0x10 },
{ STB0899_RTC , 0x7a },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xc7 },
{ STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xdd },
{ STB0899_LDT2 , 0xc9 },
{ STB0899_EQUALREF , 0xb4 },
{ STB0899_TMGRAMP , 0x10 },
{ STB0899_TMGTHD , 0x30 },
{ STB0899_IDCCOMP , 0xfb },
{ STB0899_QDCCOMP , 0x03 },
{ STB0899_POWERI , 0x3b },
{ STB0899_POWERQ , 0x3d },
{ STB0899_RCOMP , 0x81 },
{ STB0899_AGCIQIN , 0x80 },
{ STB0899_AGC2I1 , 0x04 },
{ STB0899_AGC2I2 , 0xf5 },
{ STB0899_TLIR , 0x25 },
{ STB0899_RTF , 0x80 },
{ STB0899_DSTATUS , 0x00 },
{ STB0899_LDI , 0xca },
{ STB0899_CFRM , 0xf1 },
{ STB0899_CFRL , 0xf3 },
{ STB0899_NIRM , 0x2a },
{ STB0899_NIRL , 0x05 },
{ STB0899_ISYMB , 0x17 },
{ STB0899_QSYMB , 0xfa },
{ STB0899_SFRH , 0x2f },
{ STB0899_SFRM , 0x68 },
{ STB0899_SFRL , 0x40 },
{ STB0899_SFRUPH , 0x2f },
{ STB0899_SFRUPM , 0x68 },
{ STB0899_SFRUPL , 0x40 },
{ STB0899_EQUAI1 , 0xfd },
{ STB0899_EQUAQ1 , 0x04 },
{ STB0899_EQUAI2 , 0x0f },
{ STB0899_EQUAQ2 , 0xff },
{ STB0899_EQUAI3 , 0xdf },
{ STB0899_EQUAQ3 , 0xfa },
{ STB0899_EQUAI4 , 0x37 },
{ STB0899_EQUAQ4 , 0x0d },
{ STB0899_EQUAI5 , 0xbd },
{ STB0899_EQUAQ5 , 0xf7 },
{ STB0899_DSTATUS2 , 0x00 },
{ STB0899_VSTATUS , 0x00 },
{ STB0899_VERROR , 0xff },
{ STB0899_IQSWAP , 0x2a },
{ STB0899_ECNT1M , 0x00 },
{ STB0899_ECNT1L , 0x00 },
{ STB0899_ECNT2M , 0x00 },
{ STB0899_ECNT2L , 0x00 },
{ STB0899_ECNT3M , 0x00 },
{ STB0899_ECNT3L , 0x00 },
{ STB0899_FECAUTO1 , 0x06 },
{ STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xf0 },
{ STB0899_VTH23 , 0xa0 },
{ STB0899_VTH34 , 0x78 },
{ STB0899_VTH56 , 0x4e },
{ STB0899_VTH67 , 0x48 },
{ STB0899_VTH78 , 0x38 },
{ STB0899_PRVIT , 0xff },
{ STB0899_VITSYNC , 0x19 },
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x40 },
{ STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x0c },
{ STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */
{ STB0899_RSSYNCDEL , 0x00 },
{ STB0899_TSINHDELH , 0x02 },
{ STB0899_TSINHDELM , 0x00 },
{ STB0899_TSINHDELL , 0x00 },
{ STB0899_TSLLSTKM , 0x00 },
{ STB0899_TSLLSTKL , 0x00 },
{ STB0899_TSULSTKM , 0x00 },
{ STB0899_TSULSTKL , 0xab },
{ STB0899_PCKLENUL , 0x00 },
{ STB0899_PCKLENLL , 0xcc },
{ STB0899_RSPCKLEN , 0xcc },
{ STB0899_TSSTATUS , 0x80 },
{ STB0899_ERRCTRL1 , 0xb6 },
{ STB0899_ERRCTRL2 , 0x96 },
{ STB0899_ERRCTRL3 , 0x89 },
{ STB0899_DMONMSK1 , 0x27 },
{ STB0899_DMONMSK0 , 0x03 },
{ STB0899_DEMAPVIT , 0x5c },
{ STB0899_PLPARM , 0x1f },
{ STB0899_PDELCTRL , 0x48 },
{ STB0899_PDELCTRL2 , 0x00 },
{ STB0899_BBHCTRL1 , 0x00 },
{ STB0899_BBHCTRL2 , 0x00 },
{ STB0899_HYSTTHRESH , 0x77 },
{ STB0899_MATCSTM , 0x00 },
{ STB0899_MATCSTL , 0x00 },
{ STB0899_UPLCSTM , 0x00 },
{ STB0899_UPLCSTL , 0x00 },
{ STB0899_DFLCSTM , 0x00 },
{ STB0899_DFLCSTL , 0x00 },
{ STB0899_SYNCCST , 0x00 },
{ STB0899_SYNCDCSTM , 0x00 },
{ STB0899_SYNCDCSTL , 0x00 },
{ STB0899_ISI_ENTRY , 0x00 },
{ STB0899_ISI_BIT_EN , 0x00 },
{ STB0899_MATSTRM , 0x00 },
{ STB0899_MATSTRL , 0x00 },
{ STB0899_UPLSTRM , 0x00 },
{ STB0899_UPLSTRL , 0x00 },
{ STB0899_DFLSTRM , 0x00 },
{ STB0899_DFLSTRL , 0x00 },
{ STB0899_SYNCSTR , 0x00 },
{ STB0899_SYNCDSTRM , 0x00 },
{ STB0899_SYNCDSTRL , 0x00 },
{ STB0899_CFGPDELSTATUS1 , 0x10 },
{ STB0899_CFGPDELSTATUS2 , 0x00 },
{ STB0899_BBFERRORM , 0x00 },
{ STB0899_BBFERRORL , 0x00 },
{ STB0899_UPKTERRORM , 0x00 },
{ STB0899_UPKTERRORL , 0x00 },
{ 0xffff , 0xff },
};
static struct stb0899_config tt3200_config = {
.init_dev = tt3200_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = tt3200_stb0899_s1_init_3,
.init_s2_fec = stb0899_s2_init_4,
.init_tst = stb0899_s1_init_5,
.postproc = NULL,
.demod_address = 0x68,
.xtal_freq = 27000000,
.inversion = IQ_SWAP_ON,
.lo_clk = 76500000,
.hi_clk = 99000000,
.esno_ave = STB0899_DVBS2_ESNO_AVE,
.esno_quant = STB0899_DVBS2_ESNO_QUANT,
.avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
.avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
.miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
.uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
.uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
.uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
.sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
.btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
.btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
.crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
.ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
.tuner_get_frequency = stb6100_get_frequency,
.tuner_set_frequency = stb6100_set_frequency,
.tuner_set_bandwidth = stb6100_set_bandwidth,
.tuner_get_bandwidth = stb6100_get_bandwidth,
.tuner_set_rfsiggain = NULL
};
static struct stb6100_config tt3200_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
static void frontend_init(struct budget_ci *budget_ci)
{
switch (budget_ci->budget.dev->pci->subsystem_device) {
case 0x100c: // Hauppauge/TT Nova-CI budget (stv0299/ALPS BSRU6(tsa5059))
budget_ci->budget.dvb_frontend =
dvb_attach(stv0299_attach, &alps_bsru6_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget_ci->budget.dvb_frontend->tuner_priv = &budget_ci->budget.i2c_adap;
break;
}
break;
case 0x100f: // Hauppauge/TT Nova-CI budget (stv0299b/Philips su1278(tsa5059))
budget_ci->budget.dvb_frontend =
dvb_attach(stv0299_attach, &philips_su1278_tt_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = philips_su1278_tt_tuner_set_params;
break;
}
break;
case 0x1010: // TT DVB-C CI budget (stv0297/Philips tdm1316l(tda6651tt))
budget_ci->tuner_pll_address = 0x61;
budget_ci->budget.dvb_frontend =
dvb_attach(stv0297_attach, &dvbc_philips_tdm1316l_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = dvbc_philips_tdm1316l_tuner_set_params;
break;
}
break;
case 0x1011: // Hauppauge/TT Nova-T budget (tda10045/Philips tdm1316l(tda6651tt) + TDA9889)
budget_ci->tuner_pll_address = 0x63;
budget_ci->budget.dvb_frontend =
dvb_attach(tda10045_attach, &philips_tdm1316l_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.init = philips_tdm1316l_tuner_init;
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = philips_tdm1316l_tuner_set_params;
break;
}
break;
case 0x1012: // TT DVB-T CI budget (tda10046/Philips tdm1316l(tda6651tt))
budget_ci->tuner_pll_address = 0x60;
budget_ci->budget.dvb_frontend =
dvb_attach(tda10046_attach, &philips_tdm1316l_config_invert, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.init = philips_tdm1316l_tuner_init;
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = philips_tdm1316l_tuner_set_params;
break;
}
break;
case 0x1017: // TT S-1500 PCI
budget_ci->budget.dvb_frontend = dvb_attach(stv0299_attach, &alps_bsbe1_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
budget_ci->budget.dvb_frontend->tuner_priv = &budget_ci->budget.i2c_adap;
budget_ci->budget.dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
if (dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, LNBP21_LLC, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
case 0x101a: /* TT Budget-C-1501 (philips tda10023/philips tda8274A) */
budget_ci->budget.dvb_frontend = dvb_attach(tda10023_attach, &tda10023_config, &budget_ci->budget.i2c_adap, 0x48);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(tda827x_attach, budget_ci->budget.dvb_frontend, 0x61, &budget_ci->budget.i2c_adap, &tda827x_config) == NULL) {
printk(KERN_ERR "%s: No tda827x found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
case 0x101b: /* TT S-1500B (BSBE1-D01A - STV0288/STB6000/LNBP21) */
budget_ci->budget.dvb_frontend = dvb_attach(stv0288_attach, &stv0288_bsbe1_d01a_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(stb6000_attach, budget_ci->budget.dvb_frontend, 0x63, &budget_ci->budget.i2c_adap)) {
if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
printk(KERN_ERR "%s: No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
} else {
printk(KERN_ERR "%s: No STB6000 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
case 0x1019: // TT S2-3200 PCI
/*
* NOTE! on some STB0899 versions, the internal PLL takes a longer time
* to settle, aka LOCK. On the older revisions of the chip, we don't see
* this, as a result on the newer chips the entire clock tree, will not
* be stable after a freshly POWER 'ed up situation.
* In this case, we should RESET the STB0899 (Active LOW) and wait for
* PLL stabilization.
*
* On the TT S2 3200 and clones, the STB0899 demodulator's RESETB is
* connected to the SAA7146 GPIO, GPIO2, Pin 142
*/
/* Reset Demodulator */
saa7146_setgpio(budget_ci->budget.dev, 2, SAA7146_GPIO_OUTLO);
/* Wait for everything to die */
msleep(50);
/* Pull it up out of Reset state */
saa7146_setgpio(budget_ci->budget.dev, 2, SAA7146_GPIO_OUTHI);
/* Wait for PLL to stabilize */
msleep(250);
/*
* PLL state should be stable now. Ideally, we should check
* for PLL LOCK status. But well, never mind!
*/
budget_ci->budget.dvb_frontend = dvb_attach(stb0899_attach, &tt3200_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(stb6100_attach, budget_ci->budget.dvb_frontend, &tt3200_stb6100_config, &budget_ci->budget.i2c_adap)) {
if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
printk("%s: No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
} else {
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
}
if (budget_ci->budget.dvb_frontend == NULL) {
printk("budget-ci: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget_ci->budget.dev->pci->vendor,
budget_ci->budget.dev->pci->device,
budget_ci->budget.dev->pci->subsystem_vendor,
budget_ci->budget.dev->pci->subsystem_device);
} else {
if (dvb_register_frontend
(&budget_ci->budget.dvb_adapter, budget_ci->budget.dvb_frontend)) {
printk("budget-ci: Frontend registration failed!\n");
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
}
static int budget_ci_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct budget_ci *budget_ci;
int err;
budget_ci = kzalloc(sizeof(struct budget_ci), GFP_KERNEL);
if (!budget_ci) {
err = -ENOMEM;
goto out1;
}
dprintk(2, "budget_ci: %p\n", budget_ci);
dev->ext_priv = budget_ci;
err = ttpci_budget_init(&budget_ci->budget, dev, info, THIS_MODULE,
adapter_nr);
if (err)
goto out2;
err = msp430_ir_init(budget_ci);
if (err)
goto out3;
ciintf_init(budget_ci);
budget_ci->budget.dvb_adapter.priv = budget_ci;
frontend_init(budget_ci);
ttpci_budget_init_hooks(&budget_ci->budget);
return 0;
out3:
ttpci_budget_deinit(&budget_ci->budget);
out2:
kfree(budget_ci);
out1:
return err;
}
static int budget_ci_detach(struct saa7146_dev *dev)
{
struct budget_ci *budget_ci = dev->ext_priv;
struct saa7146_dev *saa = budget_ci->budget.dev;
int err;
if (budget_ci->budget.ci_present)
ciintf_deinit(budget_ci);
msp430_ir_deinit(budget_ci);
if (budget_ci->budget.dvb_frontend) {
dvb_unregister_frontend(budget_ci->budget.dvb_frontend);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
}
err = ttpci_budget_deinit(&budget_ci->budget);
// disable frontend and CI interface
saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT);
kfree(budget_ci);
return err;
}
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(ttbs2, "TT-Budget/S-1500 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbci, "TT-Budget/WinTV-NOVA-CI PCI", BUDGET_TT_HW_DISEQC);
MAKE_BUDGET_INFO(ttbt2, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbtci, "TT-Budget-T-CI PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbcci, "TT-Budget-C-CI PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttc1501, "TT-Budget C-1501 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(tt3200, "TT-Budget S2-3200 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbs1500b, "TT-Budget S-1500B PCI", BUDGET_TT);
static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100c),
MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100f),
MAKE_EXTENSION_PCI(ttbcci, 0x13c2, 0x1010),
MAKE_EXTENSION_PCI(ttbt2, 0x13c2, 0x1011),
MAKE_EXTENSION_PCI(ttbtci, 0x13c2, 0x1012),
MAKE_EXTENSION_PCI(ttbs2, 0x13c2, 0x1017),
MAKE_EXTENSION_PCI(ttc1501, 0x13c2, 0x101a),
MAKE_EXTENSION_PCI(tt3200, 0x13c2, 0x1019),
MAKE_EXTENSION_PCI(ttbs1500b, 0x13c2, 0x101b),
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension budget_extension = {
.name = "budget_ci dvb",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
.pci_tbl = &pci_tbl[0],
.attach = budget_ci_attach,
.detach = budget_ci_detach,
.irq_mask = MASK_03 | MASK_06 | MASK_10,
.irq_func = budget_ci_irq,
};
static int __init budget_ci_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_ci_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
module_init(budget_ci_init);
module_exit(budget_ci_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hunold, Jack Thomasson, Andrew de Quincey, others");
MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards w/ CI-module produced by Siemens, Technotrend, Hauppauge");
| linux-master | drivers/media/pci/ttpci/budget-ci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Earthsoft PT3 driver
*
* Copyright (C) 2014 Akihiro Tsukada <[email protected]>
*/
#include <linux/freezer.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/sched/signal.h>
#include <media/dmxdev.h>
#include <media/dvbdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
#include "pt3.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static bool one_adapter;
module_param(one_adapter, bool, 0444);
MODULE_PARM_DESC(one_adapter, "Place FE's together under one adapter.");
static int num_bufs = 4;
module_param(num_bufs, int, 0444);
MODULE_PARM_DESC(num_bufs, "Number of DMA buffer (188KiB) per FE.");
static const struct i2c_algorithm pt3_i2c_algo = {
.master_xfer = &pt3_i2c_master_xfer,
.functionality = &pt3_i2c_functionality,
};
static const struct pt3_adap_config adap_conf[PT3_NUM_FE] = {
{
.demod_info = {
I2C_BOARD_INFO(TC90522_I2C_DEV_SAT, 0x11),
},
.tuner_info = {
I2C_BOARD_INFO("qm1d1c0042", 0x63),
},
.tuner_cfg.qm1d1c0042 = {
.lpf = 1,
},
.init_freq = 1049480 - 300,
},
{
.demod_info = {
I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x10),
},
.tuner_info = {
I2C_BOARD_INFO("mxl301rf", 0x62),
},
.init_freq = 515142857,
},
{
.demod_info = {
I2C_BOARD_INFO(TC90522_I2C_DEV_SAT, 0x13),
},
.tuner_info = {
I2C_BOARD_INFO("qm1d1c0042", 0x60),
},
.tuner_cfg.qm1d1c0042 = {
.lpf = 1,
},
.init_freq = 1049480 + 300,
},
{
.demod_info = {
I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x12),
},
.tuner_info = {
I2C_BOARD_INFO("mxl301rf", 0x61),
},
.init_freq = 521142857,
},
};
struct reg_val {
u8 reg;
u8 val;
};
static int
pt3_demod_write(struct pt3_adapter *adap, const struct reg_val *data, int num)
{
struct i2c_msg msg;
int i, ret;
ret = 0;
msg.addr = adap->i2c_demod->addr;
msg.flags = 0;
msg.len = 2;
for (i = 0; i < num; i++) {
msg.buf = (u8 *)&data[i];
ret = i2c_transfer(adap->i2c_demod->adapter, &msg, 1);
if (ret == 0)
ret = -EREMOTE;
if (ret < 0)
return ret;
}
return 0;
}
static inline void pt3_lnb_ctrl(struct pt3_board *pt3, bool on)
{
iowrite32((on ? 0x0f : 0x0c), pt3->regs[0] + REG_SYSTEM_W);
}
static inline struct pt3_adapter *pt3_find_adapter(struct dvb_frontend *fe)
{
struct pt3_board *pt3;
int i;
if (one_adapter) {
pt3 = fe->dvb->priv;
for (i = 0; i < PT3_NUM_FE; i++)
if (pt3->adaps[i]->fe == fe)
return pt3->adaps[i];
}
return container_of(fe->dvb, struct pt3_adapter, dvb_adap);
}
/*
* all 4 tuners in PT3 are packaged in a can module (Sharp VA4M6JC2103).
* it seems that they share the power lines and Amp power line and
* adaps[3] controls those powers.
*/
static int
pt3_set_tuner_power(struct pt3_board *pt3, bool tuner_on, bool amp_on)
{
struct reg_val rv = { 0x1e, 0x99 };
if (tuner_on)
rv.val |= 0x40;
if (amp_on)
rv.val |= 0x04;
return pt3_demod_write(pt3->adaps[PT3_NUM_FE - 1], &rv, 1);
}
static int pt3_set_lna(struct dvb_frontend *fe)
{
struct pt3_adapter *adap;
struct pt3_board *pt3;
u32 val;
int ret;
/* LNA is shared btw. 2 TERR-tuners */
adap = pt3_find_adapter(fe);
val = fe->dtv_property_cache.lna;
if (val == LNA_AUTO || val == adap->cur_lna)
return 0;
pt3 = adap->dvb_adap.priv;
if (mutex_lock_interruptible(&pt3->lock))
return -ERESTARTSYS;
if (val)
pt3->lna_on_cnt++;
else
pt3->lna_on_cnt--;
if (val && pt3->lna_on_cnt <= 1) {
pt3->lna_on_cnt = 1;
ret = pt3_set_tuner_power(pt3, true, true);
} else if (!val && pt3->lna_on_cnt <= 0) {
pt3->lna_on_cnt = 0;
ret = pt3_set_tuner_power(pt3, true, false);
} else
ret = 0;
mutex_unlock(&pt3->lock);
adap->cur_lna = (val != 0);
return ret;
}
static int pt3_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage volt)
{
struct pt3_adapter *adap;
struct pt3_board *pt3;
bool on;
/* LNB power is shared btw. 2 SAT-tuners */
adap = pt3_find_adapter(fe);
on = (volt != SEC_VOLTAGE_OFF);
if (on == adap->cur_lnb)
return 0;
adap->cur_lnb = on;
pt3 = adap->dvb_adap.priv;
if (mutex_lock_interruptible(&pt3->lock))
return -ERESTARTSYS;
if (on)
pt3->lnb_on_cnt++;
else
pt3->lnb_on_cnt--;
if (on && pt3->lnb_on_cnt <= 1) {
pt3->lnb_on_cnt = 1;
pt3_lnb_ctrl(pt3, true);
} else if (!on && pt3->lnb_on_cnt <= 0) {
pt3->lnb_on_cnt = 0;
pt3_lnb_ctrl(pt3, false);
}
mutex_unlock(&pt3->lock);
return 0;
}
/* register values used in pt3_fe_init() */
static const struct reg_val init0_sat[] = {
{ 0x03, 0x01 },
{ 0x1e, 0x10 },
};
static const struct reg_val init0_ter[] = {
{ 0x01, 0x40 },
{ 0x1c, 0x10 },
};
static const struct reg_val cfg_sat[] = {
{ 0x1c, 0x15 },
{ 0x1f, 0x04 },
};
static const struct reg_val cfg_ter[] = {
{ 0x1d, 0x01 },
};
/*
* pt3_fe_init: initialize demod sub modules and ISDB-T tuners all at once.
*
* As for demod IC (TC90522) and ISDB-T tuners (MxL301RF),
* the i2c sequences for init'ing them are not public and hidden in a ROM,
* and include the board specific configurations as well.
* They are stored in a lump and cannot be taken out / accessed separately,
* thus cannot be moved to the FE/tuner driver.
*/
static int pt3_fe_init(struct pt3_board *pt3)
{
int i, ret;
struct dvb_frontend *fe;
pt3_i2c_reset(pt3);
ret = pt3_init_all_demods(pt3);
if (ret < 0) {
dev_warn(&pt3->pdev->dev, "Failed to init demod chips\n");
return ret;
}
/* additional config? */
for (i = 0; i < PT3_NUM_FE; i++) {
fe = pt3->adaps[i]->fe;
if (fe->ops.delsys[0] == SYS_ISDBS)
ret = pt3_demod_write(pt3->adaps[i],
init0_sat, ARRAY_SIZE(init0_sat));
else
ret = pt3_demod_write(pt3->adaps[i],
init0_ter, ARRAY_SIZE(init0_ter));
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
"demod[%d] failed in init sequence0\n", i);
return ret;
}
ret = fe->ops.init(fe);
if (ret < 0)
return ret;
}
usleep_range(2000, 4000);
ret = pt3_set_tuner_power(pt3, true, false);
if (ret < 0) {
dev_warn(&pt3->pdev->dev, "Failed to control tuner module\n");
return ret;
}
/* output pin configuration */
for (i = 0; i < PT3_NUM_FE; i++) {
fe = pt3->adaps[i]->fe;
if (fe->ops.delsys[0] == SYS_ISDBS)
ret = pt3_demod_write(pt3->adaps[i],
cfg_sat, ARRAY_SIZE(cfg_sat));
else
ret = pt3_demod_write(pt3->adaps[i],
cfg_ter, ARRAY_SIZE(cfg_ter));
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
"demod[%d] failed in init sequence1\n", i);
return ret;
}
}
usleep_range(4000, 6000);
for (i = 0; i < PT3_NUM_FE; i++) {
fe = pt3->adaps[i]->fe;
if (fe->ops.delsys[0] != SYS_ISDBS)
continue;
/* init and wake-up ISDB-S tuners */
ret = fe->ops.tuner_ops.init(fe);
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
"Failed to init SAT-tuner[%d]\n", i);
return ret;
}
}
ret = pt3_init_all_mxl301rf(pt3);
if (ret < 0) {
dev_warn(&pt3->pdev->dev, "Failed to init TERR-tuners\n");
return ret;
}
ret = pt3_set_tuner_power(pt3, true, true);
if (ret < 0) {
dev_warn(&pt3->pdev->dev, "Failed to control tuner module\n");
return ret;
}
/* Wake up all tuners and make an initial tuning,
* in order to avoid interference among the tuners in the module,
* according to the doc from the manufacturer.
*/
for (i = 0; i < PT3_NUM_FE; i++) {
fe = pt3->adaps[i]->fe;
ret = 0;
if (fe->ops.delsys[0] == SYS_ISDBT)
ret = fe->ops.tuner_ops.init(fe);
/* set only when called from pt3_probe(), not resume() */
if (ret == 0 && fe->dtv_property_cache.frequency == 0) {
fe->dtv_property_cache.frequency =
adap_conf[i].init_freq;
ret = fe->ops.tuner_ops.set_params(fe);
}
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
"Failed in initial tuning of tuner[%d]\n", i);
return ret;
}
}
/* and sleep again, waiting to be opened by users. */
for (i = 0; i < PT3_NUM_FE; i++) {
fe = pt3->adaps[i]->fe;
if (fe->ops.tuner_ops.sleep)
ret = fe->ops.tuner_ops.sleep(fe);
if (ret < 0)
break;
if (fe->ops.sleep)
ret = fe->ops.sleep(fe);
if (ret < 0)
break;
if (fe->ops.delsys[0] == SYS_ISDBS)
fe->ops.set_voltage = &pt3_set_voltage;
else
fe->ops.set_lna = &pt3_set_lna;
}
if (i < PT3_NUM_FE) {
dev_warn(&pt3->pdev->dev, "FE[%d] failed to standby\n", i);
return ret;
}
return 0;
}
static int pt3_attach_fe(struct pt3_board *pt3, int i)
{
const struct i2c_board_info *info;
struct tc90522_config cfg;
struct i2c_client *cl;
struct dvb_adapter *dvb_adap;
int ret;
info = &adap_conf[i].demod_info;
cfg = adap_conf[i].demod_cfg;
cfg.tuner_i2c = NULL;
ret = -ENODEV;
cl = dvb_module_probe("tc90522", info->type, &pt3->i2c_adap,
info->addr, &cfg);
if (!cl)
return -ENODEV;
pt3->adaps[i]->i2c_demod = cl;
if (!strncmp(cl->name, TC90522_I2C_DEV_SAT,
strlen(TC90522_I2C_DEV_SAT))) {
struct qm1d1c0042_config tcfg;
tcfg = adap_conf[i].tuner_cfg.qm1d1c0042;
tcfg.fe = cfg.fe;
info = &adap_conf[i].tuner_info;
cl = dvb_module_probe("qm1d1c0042", info->type, cfg.tuner_i2c,
info->addr, &tcfg);
} else {
struct mxl301rf_config tcfg;
tcfg = adap_conf[i].tuner_cfg.mxl301rf;
tcfg.fe = cfg.fe;
info = &adap_conf[i].tuner_info;
cl = dvb_module_probe("mxl301rf", info->type, cfg.tuner_i2c,
info->addr, &tcfg);
}
if (!cl)
goto err_demod_module_release;
pt3->adaps[i]->i2c_tuner = cl;
dvb_adap = &pt3->adaps[one_adapter ? 0 : i]->dvb_adap;
ret = dvb_register_frontend(dvb_adap, cfg.fe);
if (ret < 0)
goto err_tuner_module_release;
pt3->adaps[i]->fe = cfg.fe;
return 0;
err_tuner_module_release:
dvb_module_release(pt3->adaps[i]->i2c_tuner);
err_demod_module_release:
dvb_module_release(pt3->adaps[i]->i2c_demod);
return ret;
}
static int pt3_fetch_thread(void *data)
{
struct pt3_adapter *adap = data;
ktime_t delay;
bool was_frozen;
#define PT3_INITIAL_BUF_DROPS 4
#define PT3_FETCH_DELAY 10
#define PT3_FETCH_DELAY_DELTA 2
pt3_init_dmabuf(adap);
adap->num_discard = PT3_INITIAL_BUF_DROPS;
dev_dbg(adap->dvb_adap.device, "PT3: [%s] started\n",
adap->thread->comm);
set_freezable();
while (!kthread_freezable_should_stop(&was_frozen)) {
if (was_frozen)
adap->num_discard = PT3_INITIAL_BUF_DROPS;
pt3_proc_dma(adap);
delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
schedule_hrtimeout_range(&delay,
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
dev_dbg(adap->dvb_adap.device, "PT3: [%s] exited\n",
adap->thread->comm);
return 0;
}
static int pt3_start_streaming(struct pt3_adapter *adap)
{
struct task_struct *thread;
/* start fetching thread */
thread = kthread_run(pt3_fetch_thread, adap, "pt3-ad%i-dmx%i",
adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
if (IS_ERR(thread)) {
int ret = PTR_ERR(thread);
adap->thread = NULL;
dev_warn(adap->dvb_adap.device,
"PT3 (adap:%d, dmx:%d): failed to start kthread\n",
adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
return ret;
}
adap->thread = thread;
return pt3_start_dma(adap);
}
static int pt3_stop_streaming(struct pt3_adapter *adap)
{
int ret;
ret = pt3_stop_dma(adap);
if (ret)
dev_warn(adap->dvb_adap.device,
"PT3: failed to stop streaming of adap:%d/FE:%d\n",
adap->dvb_adap.num, adap->fe->id);
/* kill the fetching thread */
ret = kthread_stop(adap->thread);
adap->thread = NULL;
return ret;
}
static int pt3_start_feed(struct dvb_demux_feed *feed)
{
struct pt3_adapter *adap;
if (signal_pending(current))
return -EINTR;
adap = container_of(feed->demux, struct pt3_adapter, demux);
adap->num_feeds++;
if (adap->num_feeds > 1)
return 0;
return pt3_start_streaming(adap);
}
static int pt3_stop_feed(struct dvb_demux_feed *feed)
{
struct pt3_adapter *adap;
adap = container_of(feed->demux, struct pt3_adapter, demux);
adap->num_feeds--;
if (adap->num_feeds > 0 || !adap->thread)
return 0;
adap->num_feeds = 0;
return pt3_stop_streaming(adap);
}
static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
{
int ret;
struct pt3_adapter *adap;
struct dvb_adapter *da;
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (!adap)
return -ENOMEM;
pt3->adaps[index] = adap;
adap->adap_idx = index;
if (index == 0 || !one_adapter) {
ret = dvb_register_adapter(&adap->dvb_adap, "PT3 DVB",
THIS_MODULE, &pt3->pdev->dev, adapter_nr);
if (ret < 0) {
dev_err(&pt3->pdev->dev,
"failed to register adapter dev\n");
goto err_mem;
}
da = &adap->dvb_adap;
} else
da = &pt3->adaps[0]->dvb_adap;
adap->dvb_adap.priv = pt3;
adap->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
adap->demux.priv = adap;
adap->demux.feednum = 256;
adap->demux.filternum = 256;
adap->demux.start_feed = pt3_start_feed;
adap->demux.stop_feed = pt3_stop_feed;
ret = dvb_dmx_init(&adap->demux);
if (ret < 0) {
dev_err(&pt3->pdev->dev, "failed to init dmx dev\n");
goto err_adap;
}
adap->dmxdev.filternum = 256;
adap->dmxdev.demux = &adap->demux.dmx;
ret = dvb_dmxdev_init(&adap->dmxdev, da);
if (ret < 0) {
dev_err(&pt3->pdev->dev, "failed to init dmxdev\n");
goto err_demux;
}
ret = pt3_alloc_dmabuf(adap);
if (ret) {
dev_err(&pt3->pdev->dev, "failed to alloc DMA buffers\n");
goto err_dmabuf;
}
return 0;
err_dmabuf:
pt3_free_dmabuf(adap);
dvb_dmxdev_release(&adap->dmxdev);
err_demux:
dvb_dmx_release(&adap->demux);
err_adap:
if (index == 0 || !one_adapter)
dvb_unregister_adapter(da);
err_mem:
kfree(adap);
pt3->adaps[index] = NULL;
return ret;
}
static void pt3_cleanup_adapter(struct pt3_board *pt3, int index)
{
struct pt3_adapter *adap;
struct dmx_demux *dmx;
adap = pt3->adaps[index];
if (adap == NULL)
return;
/* stop demux kthread */
if (adap->thread)
pt3_stop_streaming(adap);
dmx = &adap->demux.dmx;
dmx->close(dmx);
if (adap->fe) {
adap->fe->callback = NULL;
if (adap->fe->frontend_priv)
dvb_unregister_frontend(adap->fe);
dvb_module_release(adap->i2c_tuner);
dvb_module_release(adap->i2c_demod);
}
pt3_free_dmabuf(adap);
dvb_dmxdev_release(&adap->dmxdev);
dvb_dmx_release(&adap->demux);
if (index == 0 || !one_adapter)
dvb_unregister_adapter(&adap->dvb_adap);
kfree(adap);
pt3->adaps[index] = NULL;
}
#ifdef CONFIG_PM_SLEEP
static int pt3_suspend(struct device *dev)
{
struct pt3_board *pt3 = dev_get_drvdata(dev);
int i;
struct pt3_adapter *adap;
for (i = 0; i < PT3_NUM_FE; i++) {
adap = pt3->adaps[i];
if (adap->num_feeds > 0)
pt3_stop_dma(adap);
dvb_frontend_suspend(adap->fe);
pt3_free_dmabuf(adap);
}
pt3_lnb_ctrl(pt3, false);
pt3_set_tuner_power(pt3, false, false);
return 0;
}
static int pt3_resume(struct device *dev)
{
struct pt3_board *pt3 = dev_get_drvdata(dev);
int i, ret;
struct pt3_adapter *adap;
ret = pt3_fe_init(pt3);
if (ret)
return ret;
if (pt3->lna_on_cnt > 0)
pt3_set_tuner_power(pt3, true, true);
if (pt3->lnb_on_cnt > 0)
pt3_lnb_ctrl(pt3, true);
for (i = 0; i < PT3_NUM_FE; i++) {
adap = pt3->adaps[i];
dvb_frontend_resume(adap->fe);
ret = pt3_alloc_dmabuf(adap);
if (ret) {
dev_err(&pt3->pdev->dev, "failed to alloc DMA bufs\n");
continue;
}
if (adap->num_feeds > 0)
pt3_start_dma(adap);
}
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static void pt3_remove(struct pci_dev *pdev)
{
struct pt3_board *pt3;
int i;
pt3 = pci_get_drvdata(pdev);
for (i = PT3_NUM_FE - 1; i >= 0; i--)
pt3_cleanup_adapter(pt3, i);
i2c_del_adapter(&pt3->i2c_adap);
}
static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 rev;
u32 ver;
int i, ret;
struct pt3_board *pt3;
struct i2c_adapter *i2c;
if (pci_read_config_byte(pdev, PCI_REVISION_ID, &rev) || rev != 1)
return -ENODEV;
ret = pcim_enable_device(pdev);
if (ret < 0)
return -ENODEV;
pci_set_master(pdev);
ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2), DRV_NAME);
if (ret < 0)
return ret;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pdev->dev, "Failed to set DMA mask\n");
return ret;
}
pt3 = devm_kzalloc(&pdev->dev, sizeof(*pt3), GFP_KERNEL);
if (!pt3)
return -ENOMEM;
pci_set_drvdata(pdev, pt3);
pt3->pdev = pdev;
mutex_init(&pt3->lock);
pt3->regs[0] = pcim_iomap_table(pdev)[0];
pt3->regs[1] = pcim_iomap_table(pdev)[2];
ver = ioread32(pt3->regs[0] + REG_VERSION);
if ((ver >> 16) != 0x0301) {
dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported\n",
ver >> 24, (ver & 0x00ff0000) >> 16);
return -ENODEV;
}
pt3->num_bufs = clamp_val(num_bufs, MIN_DATA_BUFS, MAX_DATA_BUFS);
pt3->i2c_buf = devm_kmalloc(&pdev->dev, sizeof(*pt3->i2c_buf), GFP_KERNEL);
if (!pt3->i2c_buf)
return -ENOMEM;
i2c = &pt3->i2c_adap;
i2c->owner = THIS_MODULE;
i2c->algo = &pt3_i2c_algo;
i2c->algo_data = NULL;
i2c->dev.parent = &pdev->dev;
strscpy(i2c->name, DRV_NAME, sizeof(i2c->name));
i2c_set_adapdata(i2c, pt3);
ret = i2c_add_adapter(i2c);
if (ret < 0)
return ret;
for (i = 0; i < PT3_NUM_FE; i++) {
ret = pt3_alloc_adapter(pt3, i);
if (ret < 0)
break;
ret = pt3_attach_fe(pt3, i);
if (ret < 0)
break;
}
if (i < PT3_NUM_FE) {
dev_err(&pdev->dev, "Failed to create FE%d\n", i);
goto err_cleanup_adapters;
}
ret = pt3_fe_init(pt3);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to init frontends\n");
i = PT3_NUM_FE - 1;
goto err_cleanup_adapters;
}
dev_info(&pdev->dev,
"successfully init'ed PT%d (fw:0x%02x, I/F:0x%02x)\n",
ver >> 24, (ver >> 8) & 0xff, (ver >> 16) & 0xff);
return 0;
err_cleanup_adapters:
while (i >= 0)
pt3_cleanup_adapter(pt3, i--);
i2c_del_adapter(i2c);
return ret;
}
static const struct pci_device_id pt3_id_table[] = {
{ PCI_DEVICE_SUB(0x1172, 0x4c15, 0xee8d, 0x0368) },
{ },
};
MODULE_DEVICE_TABLE(pci, pt3_id_table);
static SIMPLE_DEV_PM_OPS(pt3_pm_ops, pt3_suspend, pt3_resume);
static struct pci_driver pt3_driver = {
.name = DRV_NAME,
.probe = pt3_probe,
.remove = pt3_remove,
.id_table = pt3_id_table,
.driver.pm = &pt3_pm_ops,
};
module_pci_driver(pt3_driver);
MODULE_DESCRIPTION("Earthsoft PT3 Driver");
MODULE_AUTHOR("Akihiro TSUKADA");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/pt3/pt3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Earthsoft PT3 driver
*
* Copyright (C) 2014 Akihiro Tsukada <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/pci.h>
#include "pt3.h"
#define PT3_I2C_BASE 2048
#define PT3_CMD_ADDR_NORMAL 0
#define PT3_CMD_ADDR_INIT_DEMOD 4096
#define PT3_CMD_ADDR_INIT_TUNER (4096 + 2042)
/* masks for I2C status register */
#define STAT_SEQ_RUNNING 0x1
#define STAT_SEQ_ERROR 0x6
#define STAT_NO_SEQ 0x8
#define PT3_I2C_RUN (1 << 16)
#define PT3_I2C_RESET (1 << 17)
enum ctl_cmd {
I_END,
I_ADDRESS,
I_CLOCK_L,
I_CLOCK_H,
I_DATA_L,
I_DATA_H,
I_RESET,
I_SLEEP,
I_DATA_L_NOP = 0x08,
I_DATA_H_NOP = 0x0c,
I_DATA_H_READ = 0x0d,
I_DATA_H_ACK0 = 0x0e,
I_DATA_H_ACK1 = 0x0f,
};
static void cmdbuf_add(struct pt3_i2cbuf *cbuf, enum ctl_cmd cmd)
{
int buf_idx;
if ((cbuf->num_cmds % 2) == 0)
cbuf->tmp = cmd;
else {
cbuf->tmp |= cmd << 4;
buf_idx = cbuf->num_cmds / 2;
if (buf_idx < ARRAY_SIZE(cbuf->data))
cbuf->data[buf_idx] = cbuf->tmp;
}
cbuf->num_cmds++;
}
static void put_end(struct pt3_i2cbuf *cbuf)
{
cmdbuf_add(cbuf, I_END);
if (cbuf->num_cmds % 2)
cmdbuf_add(cbuf, I_END);
}
static void put_start(struct pt3_i2cbuf *cbuf)
{
cmdbuf_add(cbuf, I_DATA_H);
cmdbuf_add(cbuf, I_CLOCK_H);
cmdbuf_add(cbuf, I_DATA_L);
cmdbuf_add(cbuf, I_CLOCK_L);
}
static void put_byte_write(struct pt3_i2cbuf *cbuf, u8 val)
{
u8 mask;
for (mask = 0x80; mask > 0; mask >>= 1)
cmdbuf_add(cbuf, (val & mask) ? I_DATA_H_NOP : I_DATA_L_NOP);
cmdbuf_add(cbuf, I_DATA_H_ACK0);
}
static void put_byte_read(struct pt3_i2cbuf *cbuf, u32 size)
{
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < 8; j++)
cmdbuf_add(cbuf, I_DATA_H_READ);
cmdbuf_add(cbuf, (i == size - 1) ? I_DATA_H_NOP : I_DATA_L_NOP);
}
}
static void put_stop(struct pt3_i2cbuf *cbuf)
{
cmdbuf_add(cbuf, I_DATA_L);
cmdbuf_add(cbuf, I_CLOCK_H);
cmdbuf_add(cbuf, I_DATA_H);
}
/* translates msgs to internal commands for bit-banging */
static void translate(struct pt3_i2cbuf *cbuf, struct i2c_msg *msgs, int num)
{
int i, j;
bool rd;
cbuf->num_cmds = 0;
for (i = 0; i < num; i++) {
rd = !!(msgs[i].flags & I2C_M_RD);
put_start(cbuf);
put_byte_write(cbuf, msgs[i].addr << 1 | rd);
if (rd)
put_byte_read(cbuf, msgs[i].len);
else
for (j = 0; j < msgs[i].len; j++)
put_byte_write(cbuf, msgs[i].buf[j]);
}
if (num > 0) {
put_stop(cbuf);
put_end(cbuf);
}
}
static int wait_i2c_result(struct pt3_board *pt3, u32 *result, int max_wait)
{
int i;
u32 v;
for (i = 0; i < max_wait; i++) {
v = ioread32(pt3->regs[0] + REG_I2C_R);
if (!(v & STAT_SEQ_RUNNING))
break;
usleep_range(500, 750);
}
if (i >= max_wait)
return -EIO;
if (result)
*result = v;
return 0;
}
/* send [pre-]translated i2c msgs stored at addr */
static int send_i2c_cmd(struct pt3_board *pt3, u32 addr)
{
u32 ret;
/* make sure that previous transactions had finished */
if (wait_i2c_result(pt3, NULL, 50)) {
dev_warn(&pt3->pdev->dev, "(%s) prev. transaction stalled\n",
__func__);
return -EIO;
}
iowrite32(PT3_I2C_RUN | addr, pt3->regs[0] + REG_I2C_W);
usleep_range(200, 300);
/* wait for the current transaction to finish */
if (wait_i2c_result(pt3, &ret, 500) || (ret & STAT_SEQ_ERROR)) {
dev_warn(&pt3->pdev->dev, "(%s) failed.\n", __func__);
return -EIO;
}
return 0;
}
/* init commands for each demod are combined into one transaction
* and hidden in ROM with the address PT3_CMD_ADDR_INIT_DEMOD.
*/
int pt3_init_all_demods(struct pt3_board *pt3)
{
ioread32(pt3->regs[0] + REG_I2C_R);
return send_i2c_cmd(pt3, PT3_CMD_ADDR_INIT_DEMOD);
}
/* init commands for two ISDB-T tuners are hidden in ROM. */
int pt3_init_all_mxl301rf(struct pt3_board *pt3)
{
usleep_range(1000, 2000);
return send_i2c_cmd(pt3, PT3_CMD_ADDR_INIT_TUNER);
}
void pt3_i2c_reset(struct pt3_board *pt3)
{
iowrite32(PT3_I2C_RESET, pt3->regs[0] + REG_I2C_W);
}
/*
* I2C algorithm
*/
int
pt3_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct pt3_board *pt3;
struct pt3_i2cbuf *cbuf;
int i;
void __iomem *p;
pt3 = i2c_get_adapdata(adap);
cbuf = pt3->i2c_buf;
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RECV_LEN) {
dev_warn(&pt3->pdev->dev,
"(%s) I2C_M_RECV_LEN not supported.\n",
__func__);
return -EINVAL;
}
translate(cbuf, msgs, num);
memcpy_toio(pt3->regs[1] + PT3_I2C_BASE + PT3_CMD_ADDR_NORMAL / 2,
cbuf->data, cbuf->num_cmds);
if (send_i2c_cmd(pt3, PT3_CMD_ADDR_NORMAL) < 0)
return -EIO;
p = pt3->regs[1] + PT3_I2C_BASE;
for (i = 0; i < num; i++)
if ((msgs[i].flags & I2C_M_RD) && msgs[i].len > 0) {
memcpy_fromio(msgs[i].buf, p, msgs[i].len);
p += msgs[i].len;
}
return num;
}
u32 pt3_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C;
}
| linux-master | drivers/media/pci/pt3/pt3_i2c.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Earthsoft PT3 driver
*
* Copyright (C) 2014 Akihiro Tsukada <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pt3.h"
#define PT3_ACCESS_UNIT (TS_PACKET_SZ * 128)
#define PT3_BUF_CANARY (0x74)
static u32 get_dma_base(int idx)
{
int i;
i = (idx == 1 || idx == 2) ? 3 - idx : idx;
return REG_DMA_BASE + 0x18 * i;
}
int pt3_stop_dma(struct pt3_adapter *adap)
{
struct pt3_board *pt3 = adap->dvb_adap.priv;
u32 base;
u32 stat;
int retry;
base = get_dma_base(adap->adap_idx);
stat = ioread32(pt3->regs[0] + base + OFST_STATUS);
if (!(stat & 0x01))
return 0;
iowrite32(0x02, pt3->regs[0] + base + OFST_DMA_CTL);
for (retry = 0; retry < 5; retry++) {
stat = ioread32(pt3->regs[0] + base + OFST_STATUS);
if (!(stat & 0x01))
return 0;
msleep(50);
}
return -EIO;
}
int pt3_start_dma(struct pt3_adapter *adap)
{
struct pt3_board *pt3 = adap->dvb_adap.priv;
u32 base = get_dma_base(adap->adap_idx);
iowrite32(0x02, pt3->regs[0] + base + OFST_DMA_CTL);
iowrite32(lower_32_bits(adap->desc_buf[0].b_addr),
pt3->regs[0] + base + OFST_DMA_DESC_L);
iowrite32(upper_32_bits(adap->desc_buf[0].b_addr),
pt3->regs[0] + base + OFST_DMA_DESC_H);
iowrite32(0x01, pt3->regs[0] + base + OFST_DMA_CTL);
return 0;
}
static u8 *next_unit(struct pt3_adapter *adap, int *idx, int *ofs)
{
*ofs += PT3_ACCESS_UNIT;
if (*ofs >= DATA_BUF_SZ) {
*ofs -= DATA_BUF_SZ;
(*idx)++;
if (*idx == adap->num_bufs)
*idx = 0;
}
return &adap->buffer[*idx].data[*ofs];
}
int pt3_proc_dma(struct pt3_adapter *adap)
{
int idx, ofs;
idx = adap->buf_idx;
ofs = adap->buf_ofs;
if (adap->buffer[idx].data[ofs] == PT3_BUF_CANARY)
return 0;
while (*next_unit(adap, &idx, &ofs) != PT3_BUF_CANARY) {
u8 *p;
p = &adap->buffer[adap->buf_idx].data[adap->buf_ofs];
if (adap->num_discard > 0)
adap->num_discard--;
else if (adap->buf_ofs + PT3_ACCESS_UNIT > DATA_BUF_SZ) {
dvb_dmx_swfilter_packets(&adap->demux, p,
(DATA_BUF_SZ - adap->buf_ofs) / TS_PACKET_SZ);
dvb_dmx_swfilter_packets(&adap->demux,
adap->buffer[idx].data, ofs / TS_PACKET_SZ);
} else
dvb_dmx_swfilter_packets(&adap->demux, p,
PT3_ACCESS_UNIT / TS_PACKET_SZ);
*p = PT3_BUF_CANARY;
adap->buf_idx = idx;
adap->buf_ofs = ofs;
}
return 0;
}
void pt3_init_dmabuf(struct pt3_adapter *adap)
{
int idx, ofs;
u8 *p;
idx = 0;
ofs = 0;
p = adap->buffer[0].data;
/* mark the whole buffers as "not written yet" */
while (idx < adap->num_bufs) {
p[ofs] = PT3_BUF_CANARY;
ofs += PT3_ACCESS_UNIT;
if (ofs >= DATA_BUF_SZ) {
ofs -= DATA_BUF_SZ;
idx++;
p = adap->buffer[idx].data;
}
}
adap->buf_idx = 0;
adap->buf_ofs = 0;
}
void pt3_free_dmabuf(struct pt3_adapter *adap)
{
struct pt3_board *pt3;
int i;
pt3 = adap->dvb_adap.priv;
for (i = 0; i < adap->num_bufs; i++)
dma_free_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
adap->buffer[i].data, adap->buffer[i].b_addr);
adap->num_bufs = 0;
for (i = 0; i < adap->num_desc_bufs; i++)
dma_free_coherent(&pt3->pdev->dev, PAGE_SIZE,
adap->desc_buf[i].descs, adap->desc_buf[i].b_addr);
adap->num_desc_bufs = 0;
}
int pt3_alloc_dmabuf(struct pt3_adapter *adap)
{
struct pt3_board *pt3;
void *p;
int i, j;
int idx, ofs;
int num_desc_bufs;
dma_addr_t data_addr, desc_addr;
struct xfer_desc *d;
pt3 = adap->dvb_adap.priv;
adap->num_bufs = 0;
adap->num_desc_bufs = 0;
for (i = 0; i < pt3->num_bufs; i++) {
p = dma_alloc_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
&adap->buffer[i].b_addr, GFP_KERNEL);
if (p == NULL)
goto failed;
adap->buffer[i].data = p;
adap->num_bufs++;
}
pt3_init_dmabuf(adap);
/* build circular-linked pointers (xfer_desc) to the data buffers*/
idx = 0;
ofs = 0;
num_desc_bufs =
DIV_ROUND_UP(adap->num_bufs * DATA_BUF_XFERS, DESCS_IN_PAGE);
for (i = 0; i < num_desc_bufs; i++) {
p = dma_alloc_coherent(&pt3->pdev->dev, PAGE_SIZE,
&desc_addr, GFP_KERNEL);
if (p == NULL)
goto failed;
adap->num_desc_bufs++;
adap->desc_buf[i].descs = p;
adap->desc_buf[i].b_addr = desc_addr;
if (i > 0) {
d = &adap->desc_buf[i - 1].descs[DESCS_IN_PAGE - 1];
d->next_l = lower_32_bits(desc_addr);
d->next_h = upper_32_bits(desc_addr);
}
for (j = 0; j < DESCS_IN_PAGE; j++) {
data_addr = adap->buffer[idx].b_addr + ofs;
d = &adap->desc_buf[i].descs[j];
d->addr_l = lower_32_bits(data_addr);
d->addr_h = upper_32_bits(data_addr);
d->size = DATA_XFER_SZ;
desc_addr += sizeof(struct xfer_desc);
d->next_l = lower_32_bits(desc_addr);
d->next_h = upper_32_bits(desc_addr);
ofs += DATA_XFER_SZ;
if (ofs >= DATA_BUF_SZ) {
ofs -= DATA_BUF_SZ;
idx++;
if (idx >= adap->num_bufs) {
desc_addr = adap->desc_buf[0].b_addr;
d->next_l = lower_32_bits(desc_addr);
d->next_h = upper_32_bits(desc_addr);
return 0;
}
}
}
}
return 0;
failed:
pt3_free_dmabuf(adap);
return -ENOMEM;
}
| linux-master | drivers/media/pci/pt3/pt3_dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Mantis PCI bridge driver
Copyright (C) Manu Abraham ([email protected])
*/
#include <asm/io.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include <media/dmxdev.h>
#include <media/dvbdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
#include <media/dvb_net.h>
#include "mantis_common.h"
#include "mantis_reg.h"
#include "mantis_i2c.h"
#define TRIALS 10000
static int mantis_i2c_read(struct mantis_pci *mantis, const struct i2c_msg *msg)
{
u32 rxd, i, stat, trials;
dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <R>[ ",
__func__, msg->addr);
for (i = 0; i < msg->len; i++) {
rxd = (msg->addr << 25) | (1 << 24)
| MANTIS_I2C_RATE_3
| MANTIS_I2C_STOP
| MANTIS_I2C_PGMODE;
if (i == (msg->len - 1))
rxd &= ~MANTIS_I2C_STOP;
mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
mmwrite(rxd, MANTIS_I2CDATA_CTL);
/* wait for xfer completion */
for (trials = 0; trials < TRIALS; trials++) {
stat = mmread(MANTIS_INT_STAT);
if (stat & MANTIS_INT_I2CDONE)
break;
}
dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
/* wait for xfer completion */
for (trials = 0; trials < TRIALS; trials++) {
stat = mmread(MANTIS_INT_STAT);
if (stat & MANTIS_INT_I2CRACK)
break;
}
dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
rxd = mmread(MANTIS_I2CDATA_CTL);
msg->buf[i] = (u8)((rxd >> 8) & 0xFF);
dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
}
dprintk(MANTIS_INFO, 0, "]\n");
return 0;
}
static int mantis_i2c_write(struct mantis_pci *mantis, const struct i2c_msg *msg)
{
int i;
u32 txd = 0, stat, trials;
dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <W>[ ",
__func__, msg->addr);
for (i = 0; i < msg->len; i++) {
dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
txd = (msg->addr << 25) | (msg->buf[i] << 8)
| MANTIS_I2C_RATE_3
| MANTIS_I2C_STOP
| MANTIS_I2C_PGMODE;
if (i == (msg->len - 1))
txd &= ~MANTIS_I2C_STOP;
mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
mmwrite(txd, MANTIS_I2CDATA_CTL);
/* wait for xfer completion */
for (trials = 0; trials < TRIALS; trials++) {
stat = mmread(MANTIS_INT_STAT);
if (stat & MANTIS_INT_I2CDONE)
break;
}
dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
/* wait for xfer completion */
for (trials = 0; trials < TRIALS; trials++) {
stat = mmread(MANTIS_INT_STAT);
if (stat & MANTIS_INT_I2CRACK)
break;
}
dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
}
dprintk(MANTIS_INFO, 0, "]\n");
return 0;
}
static int mantis_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
int ret = 0, i = 0, trials;
u32 stat, data, txd;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
mantis = i2c_get_adapdata(adapter);
BUG_ON(!mantis);
config = mantis->hwconfig;
BUG_ON(!config);
dprintk(MANTIS_DEBUG, 1, "Messages:%d", num);
mutex_lock(&mantis->i2c_lock);
while (i < num) {
/* Byte MODE */
if ((config->i2c_mode & MANTIS_BYTE_MODE) &&
((i + 1) < num) &&
(msgs[i].len < 2) &&
(msgs[i + 1].len < 2) &&
(msgs[i + 1].flags & I2C_M_RD)) {
dprintk(MANTIS_DEBUG, 0, " Byte MODE:\n");
/* Read operation */
txd = msgs[i].addr << 25 | (0x1 << 24)
| (msgs[i].buf[0] << 16)
| MANTIS_I2C_RATE_3;
mmwrite(txd, MANTIS_I2CDATA_CTL);
/* wait for xfer completion */
for (trials = 0; trials < TRIALS; trials++) {
stat = mmread(MANTIS_INT_STAT);
if (stat & MANTIS_INT_I2CDONE)
break;
}
/* check for xfer completion */
if (stat & MANTIS_INT_I2CDONE) {
/* check xfer was acknowledged */
if (stat & MANTIS_INT_I2CRACK) {
data = mmread(MANTIS_I2CDATA_CTL);
msgs[i + 1].buf[0] = (data >> 8) & 0xff;
dprintk(MANTIS_DEBUG, 0, " Byte <%d> RXD=0x%02x [%02x]\n", 0x0, data, msgs[i + 1].buf[0]);
} else {
/* I/O error */
dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
ret = -EIO;
break;
}
} else {
/* I/O error */
dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
ret = -EIO;
break;
}
i += 2; /* Write/Read operation in one go */
}
if (i < num) {
if (msgs[i].flags & I2C_M_RD)
ret = mantis_i2c_read(mantis, &msgs[i]);
else
ret = mantis_i2c_write(mantis, &msgs[i]);
i++;
if (ret < 0)
goto bail_out;
}
}
mutex_unlock(&mantis->i2c_lock);
return num;
bail_out:
mutex_unlock(&mantis->i2c_lock);
return ret;
}
static u32 mantis_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm mantis_algo = {
.master_xfer = mantis_i2c_xfer,
.functionality = mantis_i2c_func,
};
int mantis_i2c_init(struct mantis_pci *mantis)
{
u32 intstat;
struct i2c_adapter *i2c_adapter = &mantis->adapter;
struct pci_dev *pdev = mantis->pdev;
init_waitqueue_head(&mantis->i2c_wq);
mutex_init(&mantis->i2c_lock);
strscpy(i2c_adapter->name, "Mantis I2C", sizeof(i2c_adapter->name));
i2c_set_adapdata(i2c_adapter, mantis);
i2c_adapter->owner = THIS_MODULE;
i2c_adapter->algo = &mantis_algo;
i2c_adapter->algo_data = NULL;
i2c_adapter->timeout = 500;
i2c_adapter->retries = 3;
i2c_adapter->dev.parent = &pdev->dev;
mantis->i2c_rc = i2c_add_adapter(i2c_adapter);
if (mantis->i2c_rc < 0)
return mantis->i2c_rc;
dprintk(MANTIS_DEBUG, 1, "Initializing I2C ..");
intstat = mmread(MANTIS_INT_STAT);
mmread(MANTIS_INT_MASK);
mmwrite(intstat, MANTIS_INT_STAT);
dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
mantis_mask_ints(mantis, MANTIS_INT_I2CDONE);
return 0;
}
EXPORT_SYMBOL_GPL(mantis_i2c_init);
int mantis_i2c_exit(struct mantis_pci *mantis)
{
dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
mantis_mask_ints(mantis, MANTIS_INT_I2CDONE);
dprintk(MANTIS_DEBUG, 1, "Removing I2C adapter");
i2c_del_adapter(&mantis->adapter);
return 0;
}
EXPORT_SYMBOL_GPL(mantis_i2c_exit);
| linux-master | drivers/media/pci/mantis/mantis_i2c.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Mantis PCI bridge driver
Copyright (C) Manu Abraham ([email protected])
*/
#include <media/rc-core.h>
#include <linux/pci.h>
#include <media/dmxdev.h>
#include <media/dvbdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
#include <media/dvb_net.h>
#include "mantis_common.h"
#include "mantis_input.h"
#define MODULE_NAME "mantis_core"
void mantis_input_process(struct mantis_pci *mantis, int scancode)
{
if (mantis->rc)
rc_keydown(mantis->rc, RC_PROTO_UNKNOWN, scancode, 0);
}
int mantis_input_init(struct mantis_pci *mantis)
{
struct rc_dev *dev;
int err;
dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev) {
dprintk(MANTIS_ERROR, 1, "Remote device allocation failed");
err = -ENOMEM;
goto out;
}
snprintf(mantis->device_name, sizeof(mantis->device_name),
"Mantis %s IR receiver", mantis->hwconfig->model_name);
snprintf(mantis->input_phys, sizeof(mantis->input_phys),
"pci-%s/ir0", pci_name(mantis->pdev));
dev->device_name = mantis->device_name;
dev->input_phys = mantis->input_phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.vendor = mantis->vendor_id;
dev->input_id.product = mantis->device_id;
dev->input_id.version = 1;
dev->driver_name = MODULE_NAME;
dev->map_name = mantis->rc_map_name ? : RC_MAP_EMPTY;
dev->dev.parent = &mantis->pdev->dev;
err = rc_register_device(dev);
if (err) {
dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
goto out_dev;
}
mantis->rc = dev;
return 0;
out_dev:
rc_free_device(dev);
out:
return err;
}
EXPORT_SYMBOL_GPL(mantis_input_init);
void mantis_input_exit(struct mantis_pci *mantis)
{
rc_unregister_device(mantis->rc);
}
EXPORT_SYMBOL_GPL(mantis_input_exit);
| linux-master | drivers/media/pci/mantis/mantis_input.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Mantis PCI bridge driver
Copyright (C) Manu Abraham ([email protected])
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/page.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <asm/irq.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <media/dmxdev.h>
#include <media/dvbdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
#include <media/dvb_net.h>
#include "mantis_common.h"
#include "mantis_reg.h"
#include "mantis_pci.h"
#define DRIVER_NAME "Mantis Core"
int mantis_pci_init(struct mantis_pci *mantis)
{
u8 latency;
struct mantis_hwconfig *config = mantis->hwconfig;
struct pci_dev *pdev = mantis->pdev;
int err, ret = 0;
dprintk(MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
config->model_name,
config->dev_type,
mantis->pdev->bus->number,
PCI_SLOT(mantis->pdev->devfn),
PCI_FUNC(mantis->pdev->devfn));
err = pci_enable_device(pdev);
if (err != 0) {
ret = -ENODEV;
dprintk(MANTIS_ERROR, 1, "ERROR: PCI enable failed <%i>", err);
goto fail0;
}
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err != 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Unable to obtain 32 bit DMA <%i>", err);
ret = -ENOMEM;
goto fail1;
}
pci_set_master(pdev);
if (!request_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0),
DRIVER_NAME)) {
dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 Request failed !");
ret = -ENODEV;
goto fail1;
}
mantis->mmio = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!mantis->mmio) {
dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 remap failed !");
ret = -ENODEV;
goto fail2;
}
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency);
mantis->latency = latency;
mantis->revision = pdev->revision;
dprintk(MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
mantis->revision,
mantis->pdev->subsystem_vendor,
mantis->pdev->subsystem_device);
dprintk(MANTIS_ERROR, 0,
"irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
mantis->pdev->irq,
mantis->latency,
mantis->mantis_addr,
mantis->mmio);
err = request_irq(pdev->irq,
config->irq_handler,
IRQF_SHARED,
DRIVER_NAME,
mantis);
if (err != 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: IRQ registration failed ! <%d>", err);
ret = -ENODEV;
goto fail3;
}
pci_set_drvdata(pdev, mantis);
return ret;
/* Error conditions */
fail3:
dprintk(MANTIS_ERROR, 1, "ERROR: <%d> I/O unmap", ret);
if (mantis->mmio)
iounmap(mantis->mmio);
fail2:
dprintk(MANTIS_ERROR, 1, "ERROR: <%d> releasing regions", ret);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
fail1:
dprintk(MANTIS_ERROR, 1, "ERROR: <%d> disabling device", ret);
pci_disable_device(pdev);
fail0:
dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret);
return ret;
}
EXPORT_SYMBOL_GPL(mantis_pci_init);
void mantis_pci_exit(struct mantis_pci *mantis)
{
struct pci_dev *pdev = mantis->pdev;
dprintk(MANTIS_NOTICE, 1, " mem: 0x%p", mantis->mmio);
free_irq(pdev->irq, mantis);
if (mantis->mmio) {
iounmap(mantis->mmio);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
}
pci_disable_device(pdev);
}
EXPORT_SYMBOL_GPL(mantis_pci_exit);
MODULE_DESCRIPTION("Mantis PCI DTV bridge driver");
MODULE_AUTHOR("Manu Abraham");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/mantis/mantis_pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Hopper PCI bridge driver
Copyright (C) Manu Abraham ([email protected])
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <media/dmxdev.h>
#include <media/dvbdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
#include <media/dvb_net.h>
#include "mantis_common.h"
#include "hopper_vp3028.h"
#include "mantis_dma.h"
#include "mantis_dvb.h"
#include "mantis_uart.h"
#include "mantis_ioc.h"
#include "mantis_pci.h"
#include "mantis_i2c.h"
#include "mantis_reg.h"
static unsigned int verbose;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "verbose startup messages, default is 0 (no)");
#define DRIVER_NAME "Hopper"
static char *label[10] = {
"DMA",
"IRQ-0",
"IRQ-1",
"OCERR",
"PABRT",
"RIPRR",
"PPERR",
"FTRGT",
"RISCI",
"RACK"
};
static int devs;
static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
{
u32 stat = 0, mask = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
if (unlikely(!mantis))
return IRQ_NONE;
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
if (!(stat & mask))
return IRQ_NONE;
rst_mask = MANTIS_GPIF_WRACK |
MANTIS_GPIF_OTHERR |
MANTIS_SBUF_WSTO |
MANTIS_GPIF_EXTIRQ;
rst_stat = mmread(MANTIS_GPIF_STATUS);
rst_stat &= rst_mask;
mmwrite(rst_stat, MANTIS_GPIF_STATUS);
mantis->mantis_int_stat = stat;
mantis->mantis_int_mask = mask;
dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
if (stat & MANTIS_INT_RISCEN) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
}
if (stat & MANTIS_INT_IRQ0) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
mantis->gpif_status = rst_stat;
wake_up(&ca->hif_write_wq);
schedule_work(&ca->hif_evm_work);
}
if (stat & MANTIS_INT_IRQ1) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
spin_lock(&mantis->intmask_lock);
mmwrite(mmread(MANTIS_INT_MASK) & ~MANTIS_INT_IRQ1,
MANTIS_INT_MASK);
spin_unlock(&mantis->intmask_lock);
schedule_work(&mantis->uart_work);
}
if (stat & MANTIS_INT_OCERR) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
}
if (stat & MANTIS_INT_PABORT) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
}
if (stat & MANTIS_INT_RIPERR) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
}
if (stat & MANTIS_INT_PPERR) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
}
if (stat & MANTIS_INT_FTRGT) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
}
if (stat & MANTIS_INT_RISCI) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
mantis->busy_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
tasklet_schedule(&mantis->tasklet);
}
if (stat & MANTIS_INT_I2CDONE) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
wake_up(&mantis->i2c_wq);
}
mmwrite(stat, MANTIS_INT_STAT);
stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
MANTIS_INT_RISCI);
if (stat)
dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
dprintk(MANTIS_DEBUG, 0, "\n");
return IRQ_HANDLED;
}
static int hopper_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
int err;
mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
if (!mantis) {
err = -ENOMEM;
goto fail0;
}
drvdata = (void *)pci_id->driver_data;
mantis->num = devs;
mantis->verbose = verbose;
mantis->pdev = pdev;
config = drvdata->hwconfig;
config->irq_handler = &hopper_irq_handler;
mantis->hwconfig = config;
mantis->rc_map_name = drvdata->rc_map_name;
spin_lock_init(&mantis->intmask_lock);
err = mantis_pci_init(mantis);
if (err) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
goto fail1;
}
err = mantis_stream_control(mantis, STREAM_TO_HIF);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
goto fail1;
}
err = mantis_i2c_init(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
goto fail2;
}
err = mantis_get_mac(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
goto fail2;
}
err = mantis_dma_init(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
goto fail3;
}
err = mantis_dvb_init(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
goto fail4;
}
devs++;
return err;
fail4:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
mantis_dma_exit(mantis);
fail3:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
mantis_i2c_exit(mantis);
fail2:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
mantis_pci_exit(mantis);
fail1:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
kfree(mantis);
fail0:
return err;
}
static void hopper_pci_remove(struct pci_dev *pdev)
{
struct mantis_pci *mantis = pci_get_drvdata(pdev);
if (mantis) {
mantis_dvb_exit(mantis);
mantis_dma_exit(mantis);
mantis_i2c_exit(mantis);
mantis_pci_exit(mantis);
kfree(mantis);
}
return;
}
static const struct pci_device_id hopper_pci_table[] = {
MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config,
NULL),
{ }
};
MODULE_DEVICE_TABLE(pci, hopper_pci_table);
static struct pci_driver hopper_pci_driver = {
.name = DRIVER_NAME,
.id_table = hopper_pci_table,
.probe = hopper_pci_probe,
.remove = hopper_pci_remove,
};
module_pci_driver(hopper_pci_driver);
MODULE_DESCRIPTION("HOPPER driver");
MODULE_AUTHOR("Manu Abraham");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/pci/mantis/hopper_cards.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Mantis VP-1033 driver
Copyright (C) Manu Abraham ([email protected])
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <media/dmxdev.h>
#include <media/dvbdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
#include <media/dvb_net.h>
#include "stv0299.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "mantis_vp1033.h"
#include "mantis_reg.h"
static u8 lgtdqcs001f_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x2a,
0x05, 0x85,
0x06, 0x02,
0x07, 0x00,
0x08, 0x00,
0x0c, 0x01,
0x0d, 0x81,
0x0e, 0x44,
0x0f, 0x94,
0x10, 0x3c,
0x11, 0x84,
0x12, 0xb9,
0x13, 0xb5,
0x14, 0x4f,
0x15, 0xc9,
0x16, 0x80,
0x17, 0x36,
0x18, 0xfb,
0x19, 0xcf,
0x1a, 0xbc,
0x1c, 0x2b,
0x1d, 0x27,
0x1e, 0x00,
0x1f, 0x0b,
0x20, 0xa1,
0x21, 0x60,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00,
0x29, 0x28,
0x2a, 0x14,
0x2b, 0x0f,
0x2c, 0x09,
0x2d, 0x05,
0x31, 0x1f,
0x32, 0x19,
0x33, 0xfc,
0x34, 0x13,
0xff, 0xff,
};
#define MANTIS_MODEL_NAME "VP-1033"
#define MANTIS_DEV_TYPE "DVB-S/DSS"
static int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
struct i2c_adapter *adapter = &mantis->adapter;
u8 buf[4];
u32 div;
struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf)};
div = p->frequency / 250;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x83;
buf[3] = 0xc0;
if (p->frequency < 1531000)
buf[3] |= 0x04;
else
buf[3] &= ~0x04;
if (i2c_transfer(adapter, &msg, 1) < 0) {
dprintk(MANTIS_ERROR, 1, "Write: I2C Transfer failed");
return -EIO;
}
msleep_interruptible(100);
return 0;
}
static int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
if (srate < 1500000) {
aclk = 0xb7;
bclk = 0x47;
} else if (srate < 3000000) {
aclk = 0xb7;
bclk = 0x4b;
} else if (srate < 7000000) {
aclk = 0xb7;
bclk = 0x4f;
} else if (srate < 14000000) {
aclk = 0xb7;
bclk = 0x53;
} else if (srate < 30000000) {
aclk = 0xb6;
bclk = 0x53;
} else if (srate < 45000000) {
aclk = 0xb4;
bclk = 0x51;
}
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, ratio & 0xf0);
return 0;
}
static struct stv0299_config lgtdqcs001f_config = {
.demod_address = 0x68,
.inittab = lgtdqcs001f_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = lgtdqcs001f_set_symbol_rate,
};
static int vp1033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
int err = 0;
err = mantis_frontend_power(mantis, POWER_ON);
if (err == 0) {
mantis_frontend_soft_reset(mantis);
msleep(250);
dprintk(MANTIS_ERROR, 1, "Probing for STV0299 (DVB-S)");
fe = dvb_attach(stv0299_attach, &lgtdqcs001f_config, adapter);
if (fe) {
fe->ops.tuner_ops.set_params = lgtdqcs001f_tuner_set;
dprintk(MANTIS_ERROR, 1, "found STV0299 DVB-S frontend @ 0x%02x",
lgtdqcs001f_config.demod_address);
dprintk(MANTIS_ERROR, 1, "Mantis DVB-S STV0299 frontend attach success");
} else {
return -1;
}
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
mantis->fe = fe;
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp1033_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_204,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp1033_frontend_init,
.power = GPIF_A12,
.reset = GPIF_A13,
};
| linux-master | drivers/media/pci/mantis/mantis_vp1033.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.