python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/mite.c
* Hardware driver for NI Mite PCI interface chip
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2002 David A. Schleef <[email protected]>
*/
/*
* The PCI-MIO E series driver was originally written by
* Tomasz Motylewski <...>, and ported to comedi by ds.
*
* References for specifications:
*
* 321747b.pdf Register Level Programmer Manual (obsolete)
* 321747c.pdf Register Level Programmer Manual (new)
* DAQ-STC reference manual
*
* Other possibly relevant info:
*
* 320517c.pdf User manual (obsolete)
* 320517f.pdf User manual (new)
* 320889a.pdf delete
* 320906c.pdf maximum signal ratings
* 321066a.pdf about 16x
* 321791a.pdf discontinuation of at-mio-16e-10 rev. c
* 321808a.pdf about at-mio-16e-10 rev P
* 321837a.pdf discontinuation of at-mio-16de-10 rev d
* 321838a.pdf about at-mio-16de-10 rev N
*
* ISSUES:
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/comedi/comedi_pci.h>
#include "mite.h"
/*
* Mite registers
*/
#define MITE_UNKNOWN_DMA_BURST_REG 0x28
#define UNKNOWN_DMA_BURST_ENABLE_BITS 0x600
#define MITE_PCI_CONFIG_OFFSET 0x300
#define MITE_CSIGR 0x460 /* chip signature */
#define CSIGR_TO_IOWINS(x) (((x) >> 29) & 0x7)
#define CSIGR_TO_WINS(x) (((x) >> 24) & 0x1f)
#define CSIGR_TO_WPDEP(x) (((x) >> 20) & 0x7)
#define CSIGR_TO_DMAC(x) (((x) >> 16) & 0xf)
#define CSIGR_TO_IMODE(x) (((x) >> 12) & 0x3) /* pci=0x3 */
#define CSIGR_TO_MMODE(x) (((x) >> 8) & 0x3) /* minimite=1 */
#define CSIGR_TO_TYPE(x) (((x) >> 4) & 0xf) /* mite=0, minimite=1 */
#define CSIGR_TO_VER(x) (((x) >> 0) & 0xf)
#define MITE_CHAN(x) (0x500 + 0x100 * (x))
#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
#define CHOR_DMARESET BIT(31)
#define CHOR_SET_SEND_TC BIT(11)
#define CHOR_CLR_SEND_TC BIT(10)
#define CHOR_SET_LPAUSE BIT(9)
#define CHOR_CLR_LPAUSE BIT(8)
#define CHOR_CLRDONE BIT(7)
#define CHOR_CLRRB BIT(6)
#define CHOR_CLRLC BIT(5)
#define CHOR_FRESET BIT(4)
#define CHOR_ABORT BIT(3) /* stop without emptying fifo */
#define CHOR_STOP BIT(2) /* stop after emptying fifo */
#define CHOR_CONT BIT(1)
#define CHOR_START BIT(0)
#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
#define CHCR_SET_DMA_IE BIT(31)
#define CHCR_CLR_DMA_IE BIT(30)
#define CHCR_SET_LINKP_IE BIT(29)
#define CHCR_CLR_LINKP_IE BIT(28)
#define CHCR_SET_SAR_IE BIT(27)
#define CHCR_CLR_SAR_IE BIT(26)
#define CHCR_SET_DONE_IE BIT(25)
#define CHCR_CLR_DONE_IE BIT(24)
#define CHCR_SET_MRDY_IE BIT(23)
#define CHCR_CLR_MRDY_IE BIT(22)
#define CHCR_SET_DRDY_IE BIT(21)
#define CHCR_CLR_DRDY_IE BIT(20)
#define CHCR_SET_LC_IE BIT(19)
#define CHCR_CLR_LC_IE BIT(18)
#define CHCR_SET_CONT_RB_IE BIT(17)
#define CHCR_CLR_CONT_RB_IE BIT(16)
#define CHCR_FIFO(x) (((x) & 0x1) << 15)
#define CHCR_FIFODIS CHCR_FIFO(1)
#define CHCR_FIFO_ON CHCR_FIFO(0)
#define CHCR_BURST(x) (((x) & 0x1) << 14)
#define CHCR_BURSTEN CHCR_BURST(1)
#define CHCR_NO_BURSTEN CHCR_BURST(0)
#define CHCR_BYTE_SWAP_DEVICE BIT(6)
#define CHCR_BYTE_SWAP_MEMORY BIT(4)
#define CHCR_DIR(x) (((x) & 0x1) << 3)
#define CHCR_DEV_TO_MEM CHCR_DIR(1)
#define CHCR_MEM_TO_DEV CHCR_DIR(0)
#define CHCR_MODE(x) (((x) & 0x7) << 0)
#define CHCR_NORMAL CHCR_MODE(0)
#define CHCR_CONTINUE CHCR_MODE(1)
#define CHCR_RINGBUFF CHCR_MODE(2)
#define CHCR_LINKSHORT CHCR_MODE(4)
#define CHCR_LINKLONG CHCR_MODE(5)
#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory config */
#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device config */
#define DCR_NORMAL BIT(29)
#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link config */
#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
#define CHSR_INT BIT(31)
#define CHSR_LPAUSES BIT(29)
#define CHSR_SARS BIT(27)
#define CHSR_DONE BIT(25)
#define CHSR_MRDY BIT(23)
#define CHSR_DRDY BIT(21)
#define CHSR_LINKC BIT(19)
#define CHSR_CONTS_RB BIT(17)
#define CHSR_ERROR BIT(15)
#define CHSR_SABORT BIT(14)
#define CHSR_HABORT BIT(13)
#define CHSR_STOPS BIT(12)
#define CHSR_OPERR(x) (((x) & 0x3) << 10)
#define CHSR_OPERR_MASK CHSR_OPERR(3)
#define CHSR_OPERR_NOERROR CHSR_OPERR(0)
#define CHSR_OPERR_FIFOERROR CHSR_OPERR(1)
#define CHSR_OPERR_LINKERROR CHSR_OPERR(1) /* ??? */
#define CHSR_XFERR BIT(9)
#define CHSR_END BIT(8)
#define CHSR_DRQ1 BIT(7)
#define CHSR_DRQ0 BIT(6)
#define CHSR_LERR(x) (((x) & 0x3) << 4)
#define CHSR_LERR_MASK CHSR_LERR(3)
#define CHSR_LBERR CHSR_LERR(1)
#define CHSR_LRERR CHSR_LERR(2)
#define CHSR_LOERR CHSR_LERR(3)
#define CHSR_MERR(x) (((x) & 0x3) << 2)
#define CHSR_MERR_MASK CHSR_MERR(3)
#define CHSR_MBERR CHSR_MERR(1)
#define CHSR_MRERR CHSR_MERR(2)
#define CHSR_MOERR CHSR_MERR(3)
#define CHSR_DERR(x) (((x) & 0x3) << 0)
#define CHSR_DERR_MASK CHSR_DERR(3)
#define CHSR_DBERR CHSR_DERR(1)
#define CHSR_DRERR CHSR_DERR(2)
#define CHSR_DOERR CHSR_DERR(3)
#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
/* common bits for the memory/device/link config registers */
#define CR_RL(x) (((x) & 0x7) << 21)
#define CR_REQS(x) (((x) & 0x7) << 16)
#define CR_REQS_MASK CR_REQS(7)
#define CR_ASEQ(x) (((x) & 0x3) << 10)
#define CR_ASEQDONT CR_ASEQ(0)
#define CR_ASEQUP CR_ASEQ(1)
#define CR_ASEQDOWN CR_ASEQ(2)
#define CR_ASEQ_MASK CR_ASEQ(3)
#define CR_PSIZE(x) (((x) & 0x3) << 8)
#define CR_PSIZE8 CR_PSIZE(1)
#define CR_PSIZE16 CR_PSIZE(2)
#define CR_PSIZE32 CR_PSIZE(3)
#define CR_PORT(x) (((x) & 0x3) << 6)
#define CR_PORTCPU CR_PORT(0)
#define CR_PORTIO CR_PORT(1)
#define CR_PORTVXI CR_PORT(2)
#define CR_PORTMXI CR_PORT(3)
#define CR_AMDEVICE BIT(0)
static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
{
return (ilog2(size) - 1) & 0x1f;
}
static unsigned int mite_retry_limit(unsigned int retry_limit)
{
unsigned int value = 0;
if (retry_limit)
value = 1 + ilog2(retry_limit);
if (value > 0x7)
value = 0x7;
return CR_RL(value);
}
static unsigned int mite_drq_reqs(unsigned int drq_line)
{
/* This also works on m-series when using channels (drq_line) 4 or 5. */
return CR_REQS((drq_line & 0x3) | 0x4);
}
static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
{
unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
unsigned int empty_count = (fcr_bits >> 16) & 0xff;
unsigned int full_count = fcr_bits & 0xff;
return empty_count + full_count;
}
static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
return readl(mite->mmio + MITE_DAR(mite_chan->channel));
}
/**
* mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
* @mite_chan: MITE dma channel.
*/
u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
}
EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
/* returns lower bound for number of bytes transferred from device to memory */
static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
{
u32 device_byte_count;
device_byte_count = mite_device_bytes_transferred(mite_chan);
return device_byte_count - mite_bytes_in_transit(mite_chan);
}
/* returns upper bound for number of bytes transferred from device to memory */
static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
{
u32 in_transit_count;
in_transit_count = mite_bytes_in_transit(mite_chan);
return mite_device_bytes_transferred(mite_chan) - in_transit_count;
}
/* returns lower bound for number of bytes read from memory to device */
static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
{
u32 device_byte_count;
device_byte_count = mite_device_bytes_transferred(mite_chan);
return device_byte_count + mite_bytes_in_transit(mite_chan);
}
/* returns upper bound for number of bytes read from memory to device */
static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
{
u32 in_transit_count;
in_transit_count = mite_bytes_in_transit(mite_chan);
return mite_device_bytes_transferred(mite_chan) + in_transit_count;
}
static void mite_sync_input_dma(struct mite_channel *mite_chan,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
int count;
unsigned int nbytes, old_alloc_count;
old_alloc_count = async->buf_write_alloc_count;
/* write alloc as much as we can */
comedi_buf_write_alloc(s, async->prealloc_bufsz);
nbytes = mite_bytes_written_to_memory_lb(mite_chan);
if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
old_alloc_count) > 0) {
dev_warn(s->device->class_dev,
"mite: DMA overwrite of free area\n");
async->events |= COMEDI_CB_OVERFLOW;
return;
}
count = nbytes - async->buf_write_count;
/*
* it's possible count will be negative due to conservative value
* returned by mite_bytes_written_to_memory_lb
*/
if (count > 0) {
comedi_buf_write_free(s, count);
comedi_inc_scan_progress(s, count);
async->events |= COMEDI_CB_BLOCK;
}
}
static void mite_sync_output_dma(struct mite_channel *mite_chan,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
unsigned int old_alloc_count = async->buf_read_alloc_count;
u32 nbytes_ub, nbytes_lb;
int count;
bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
/* read alloc as much as we can */
comedi_buf_read_alloc(s, async->prealloc_bufsz);
nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
nbytes_lb = stop_count;
nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
nbytes_ub = stop_count;
if ((!finite_regen || stop_count > old_alloc_count) &&
((int)(nbytes_ub - old_alloc_count) > 0)) {
dev_warn(s->device->class_dev, "mite: DMA underrun\n");
async->events |= COMEDI_CB_OVERFLOW;
return;
}
if (finite_regen) {
/*
* This is a special case where we continuously output a finite
* buffer. In this case, we do not free any of the memory,
* hence we expect that old_alloc_count will reach a maximum of
* stop_count bytes.
*/
return;
}
count = nbytes_lb - async->buf_read_count;
if (count > 0) {
comedi_buf_read_free(s, count);
async->events |= COMEDI_CB_BLOCK;
}
}
/**
* mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
* @mite_chan: MITE dma channel.
* @s: COMEDI subdevice.
*/
void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
{
if (mite_chan->dir == COMEDI_INPUT)
mite_sync_input_dma(mite_chan, s);
else
mite_sync_output_dma(mite_chan, s);
}
EXPORT_SYMBOL_GPL(mite_sync_dma);
static unsigned int mite_get_status(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
unsigned int status;
unsigned long flags;
spin_lock_irqsave(&mite->lock, flags);
status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
if (status & CHSR_DONE) {
mite_chan->done = 1;
writel(CHOR_CLRDONE,
mite->mmio + MITE_CHOR(mite_chan->channel));
}
spin_unlock_irqrestore(&mite->lock, flags);
return status;
}
/**
* mite_ack_linkc() - Check and ack the LINKC interrupt,
* @mite_chan: MITE dma channel.
* @s: COMEDI subdevice.
* @sync: flag to force a mite_sync_dma().
*
* This will also ack the DONE interrupt if active.
*/
void mite_ack_linkc(struct mite_channel *mite_chan,
struct comedi_subdevice *s,
bool sync)
{
struct mite *mite = mite_chan->mite;
unsigned int status;
status = mite_get_status(mite_chan);
if (status & CHSR_LINKC) {
writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
sync = true;
}
if (sync)
mite_sync_dma(mite_chan, s);
if (status & CHSR_XFERR) {
dev_err(s->device->class_dev,
"mite: transfer error %08x\n", status);
s->async->events |= COMEDI_CB_ERROR;
}
}
EXPORT_SYMBOL_GPL(mite_ack_linkc);
/**
* mite_done() - Check is a MITE dma transfer is complete.
* @mite_chan: MITE dma channel.
*
* This will also ack the DONE interrupt if active.
*/
int mite_done(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
unsigned long flags;
int done;
mite_get_status(mite_chan);
spin_lock_irqsave(&mite->lock, flags);
done = mite_chan->done;
spin_unlock_irqrestore(&mite->lock, flags);
return done;
}
EXPORT_SYMBOL_GPL(mite_done);
static void mite_dma_reset(struct mite_channel *mite_chan)
{
writel(CHOR_DMARESET | CHOR_FRESET,
mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
}
/**
* mite_dma_arm() - Start a MITE dma transfer.
* @mite_chan: MITE dma channel.
*/
void mite_dma_arm(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
unsigned long flags;
/*
* memory barrier is intended to insure any twiddling with the buffer
* is done before writing to the mite to arm dma transfer
*/
smp_mb();
spin_lock_irqsave(&mite->lock, flags);
mite_chan->done = 0;
/* arm */
writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
spin_unlock_irqrestore(&mite->lock, flags);
}
EXPORT_SYMBOL_GPL(mite_dma_arm);
/**
* mite_dma_disarm() - Stop a MITE dma transfer.
* @mite_chan: MITE dma channel.
*/
void mite_dma_disarm(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
/* disarm */
writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_dma_disarm);
/**
* mite_prep_dma() - Prepare a MITE dma channel for transfers.
* @mite_chan: MITE dma channel.
* @num_device_bits: device transfer size (8, 16, or 32-bits).
* @num_memory_bits: memory transfer size (8, 16, or 32-bits).
*/
void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int num_device_bits, unsigned int num_memory_bits)
{
struct mite *mite = mite_chan->mite;
unsigned int chcr, mcr, dcr, lkcr;
mite_dma_reset(mite_chan);
/* short link chaining mode */
chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
CHCR_BURSTEN;
/*
* Link Complete Interrupt: interrupt every time a link
* in MITE_RING is completed. This can generate a lot of
* extra interrupts, but right now we update the values
* of buf_int_ptr and buf_int_count at each interrupt. A
* better method is to poll the MITE before each user
* "read()" to calculate the number of bytes available.
*/
chcr |= CHCR_SET_LC_IE;
if (num_memory_bits == 32 && num_device_bits == 16) {
/*
* Doing a combined 32 and 16 bit byteswap gets the 16 bit
* samples into the fifo in the right order. Tested doing 32 bit
* memory to 16 bit device transfers to the analog out of a
* pxi-6281, which has mite version = 1, type = 4. This also
* works for dma reads from the counters on e-series boards.
*/
chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
}
if (mite_chan->dir == COMEDI_INPUT)
chcr |= CHCR_DEV_TO_MEM;
writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
/* to/from memory */
mcr = mite_retry_limit(64) | CR_ASEQUP;
switch (num_memory_bits) {
case 8:
mcr |= CR_PSIZE8;
break;
case 16:
mcr |= CR_PSIZE16;
break;
case 32:
mcr |= CR_PSIZE32;
break;
default:
pr_warn("bug! invalid mem bit width for dma transfer\n");
break;
}
writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
/* from/to device */
dcr = mite_retry_limit(64) | CR_ASEQUP;
dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
switch (num_device_bits) {
case 8:
dcr |= CR_PSIZE8;
break;
case 16:
dcr |= CR_PSIZE16;
break;
case 32:
dcr |= CR_PSIZE32;
break;
default:
pr_warn("bug! invalid dev bit width for dma transfer\n");
break;
}
writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
/* reset the DAR */
writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
/* the link is 32bits */
lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
/* starting address for link chaining */
writel(mite_chan->ring->dma_addr,
mite->mmio + MITE_LKAR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_prep_dma);
/**
* mite_request_channel_in_range() - Request a MITE dma channel.
* @mite: MITE device.
* @ring: MITE dma ring.
* @min_channel: minimum channel index to use.
* @max_channel: maximum channel index to use.
*/
struct mite_channel *mite_request_channel_in_range(struct mite *mite,
struct mite_ring *ring,
unsigned int min_channel,
unsigned int max_channel)
{
struct mite_channel *mite_chan = NULL;
unsigned long flags;
int i;
/*
* spin lock so mite_release_channel can be called safely
* from interrupts
*/
spin_lock_irqsave(&mite->lock, flags);
for (i = min_channel; i <= max_channel; ++i) {
mite_chan = &mite->channels[i];
if (!mite_chan->ring) {
mite_chan->ring = ring;
break;
}
mite_chan = NULL;
}
spin_unlock_irqrestore(&mite->lock, flags);
return mite_chan;
}
EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
/**
* mite_request_channel() - Request a MITE dma channel.
* @mite: MITE device.
* @ring: MITE dma ring.
*/
struct mite_channel *mite_request_channel(struct mite *mite,
struct mite_ring *ring)
{
return mite_request_channel_in_range(mite, ring, 0,
mite->num_channels - 1);
}
EXPORT_SYMBOL_GPL(mite_request_channel);
/**
* mite_release_channel() - Release a MITE dma channel.
* @mite_chan: MITE dma channel.
*/
void mite_release_channel(struct mite_channel *mite_chan)
{
struct mite *mite = mite_chan->mite;
unsigned long flags;
/* spin lock to prevent races with mite_request_channel */
spin_lock_irqsave(&mite->lock, flags);
if (mite_chan->ring) {
mite_dma_disarm(mite_chan);
mite_dma_reset(mite_chan);
/*
* disable all channel's interrupts (do it after disarm/reset so
* MITE_CHCR reg isn't changed while dma is still active!)
*/
writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
mite->mmio + MITE_CHCR(mite_chan->channel));
mite_chan->ring = NULL;
}
spin_unlock_irqrestore(&mite->lock, flags);
}
EXPORT_SYMBOL_GPL(mite_release_channel);
/**
* mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
* @ring: MITE dma ring.
* @s: COMEDI subdevice.
* @nbytes: the size of the dma ring (in bytes).
*
* Initializes the ring buffer descriptors to provide correct DMA transfer
* links to the exact amount of memory required. When the ring buffer is
* allocated by mite_buf_change(), the default is to initialize the ring
* to refer to the entire DMA data buffer. A command may call this function
* later to re-initialize and shorten the amount of memory that will be
* transferred.
*/
int mite_init_ring_descriptors(struct mite_ring *ring,
struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
struct mite_dma_desc *desc = NULL;
unsigned int n_full_links = nbytes >> PAGE_SHIFT;
unsigned int remainder = nbytes % PAGE_SIZE;
int i;
dev_dbg(s->device->class_dev,
"mite: init ring buffer to %u bytes\n", nbytes);
if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
dev_err(s->device->class_dev,
"mite: ring buffer too small for requested init\n");
return -ENOMEM;
}
/* We set the descriptors for all full links. */
for (i = 0; i < n_full_links; ++i) {
desc = &ring->descs[i];
desc->count = cpu_to_le32(PAGE_SIZE);
desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
desc->next = cpu_to_le32(ring->dma_addr +
(i + 1) * sizeof(*desc));
}
/* the last link is either a remainder or was a full link. */
if (remainder > 0) {
desc = &ring->descs[i];
/* set the lesser count for the remainder link */
desc->count = cpu_to_le32(remainder);
desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
}
/* Assign the last link->next to point back to the head of the list. */
desc->next = cpu_to_le32(ring->dma_addr);
/*
* barrier is meant to insure that all the writes to the dma descriptors
* have completed before the dma controller is commanded to read them
*/
smp_wmb();
return 0;
}
EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
static void mite_free_dma_descs(struct mite_ring *ring)
{
struct mite_dma_desc *descs = ring->descs;
if (descs) {
dma_free_coherent(ring->hw_dev,
ring->n_links * sizeof(*descs),
descs, ring->dma_addr);
ring->descs = NULL;
ring->dma_addr = 0;
ring->n_links = 0;
}
}
/**
* mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
* @ring: MITE dma ring.
* @s: COMEDI subdevice.
*/
int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct mite_dma_desc *descs;
unsigned int n_links;
mite_free_dma_descs(ring);
if (async->prealloc_bufsz == 0)
return 0;
n_links = async->prealloc_bufsz >> PAGE_SHIFT;
descs = dma_alloc_coherent(ring->hw_dev,
n_links * sizeof(*descs),
&ring->dma_addr, GFP_KERNEL);
if (!descs) {
dev_err(s->device->class_dev,
"mite: ring buffer allocation failed\n");
return -ENOMEM;
}
ring->descs = descs;
ring->n_links = n_links;
return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(mite_buf_change);
/**
* mite_alloc_ring() - Allocate a MITE dma ring.
* @mite: MITE device.
*/
struct mite_ring *mite_alloc_ring(struct mite *mite)
{
struct mite_ring *ring;
ring = kmalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return NULL;
ring->hw_dev = get_device(&mite->pcidev->dev);
if (!ring->hw_dev) {
kfree(ring);
return NULL;
}
ring->n_links = 0;
ring->descs = NULL;
ring->dma_addr = 0;
return ring;
}
EXPORT_SYMBOL_GPL(mite_alloc_ring);
/**
* mite_free_ring() - Free a MITE dma ring and its descriptors.
* @ring: MITE dma ring.
*/
void mite_free_ring(struct mite_ring *ring)
{
if (ring) {
mite_free_dma_descs(ring);
put_device(ring->hw_dev);
kfree(ring);
}
}
EXPORT_SYMBOL_GPL(mite_free_ring);
static int mite_setup(struct comedi_device *dev, struct mite *mite,
bool use_win1)
{
resource_size_t daq_phys_addr;
unsigned long length;
int i;
u32 csigr_bits;
unsigned int unknown_dma_burst_bits;
unsigned int wpdep;
pci_set_master(mite->pcidev);
mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
if (!mite->mmio)
return -ENOMEM;
dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
if (!dev->mmio)
return -ENOMEM;
daq_phys_addr = pci_resource_start(mite->pcidev, 1);
length = pci_resource_len(mite->pcidev, 1);
if (use_win1) {
writel(0, mite->mmio + MITE_IODWBSR);
dev_dbg(dev->class_dev,
"mite: using I/O Window Base Size register 1\n");
writel(daq_phys_addr | WENAB |
MITE_IODWBSR_1_WSIZE_bits(length),
mite->mmio + MITE_IODWBSR_1);
writel(0, mite->mmio + MITE_IODWCR_1);
} else {
writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
}
/*
* Make sure dma bursts work. I got this from running a bus analyzer
* on a pxi-6281 and a pxi-6713. 6713 powered up with register value
* of 0x61f and bursts worked. 6281 powered up with register value of
* 0x1f and bursts didn't work. The NI windows driver reads the
* register, then does a bitwise-or of 0x600 with it and writes it back.
*
* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
* written and read back. The bits 0x1f always read as 1.
* The rest always read as zero.
*/
unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
csigr_bits = readl(mite->mmio + MITE_CSIGR);
mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
dev_warn(dev->class_dev,
"mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
mite->num_channels, MAX_MITE_DMA_CHANNELS);
mite->num_channels = MAX_MITE_DMA_CHANNELS;
}
/* get the wpdep bits and convert it to the write port fifo depth */
wpdep = CSIGR_TO_WPDEP(csigr_bits);
if (wpdep)
wpdep = BIT(wpdep);
dev_dbg(dev->class_dev,
"mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
dev_dbg(dev->class_dev,
"mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
CSIGR_TO_DMAC(csigr_bits), wpdep,
CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
for (i = 0; i < mite->num_channels; i++) {
writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
/* disable interrupts */
writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
mite->mmio + MITE_CHCR(i));
}
mite->fifo_size = mite_fifo_size(mite, 0);
dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
return 0;
}
/**
* mite_attach() - Allocate and initialize a MITE device for a comedi driver.
* @dev: COMEDI device.
* @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
*
* Called by a COMEDI drivers (*auto_attach).
*
* Returns a pointer to the MITE device on success, or NULL if the MITE cannot
* be allocated or remapped.
*/
struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct mite *mite;
unsigned int i;
int ret;
mite = kzalloc(sizeof(*mite), GFP_KERNEL);
if (!mite)
return NULL;
spin_lock_init(&mite->lock);
mite->pcidev = pcidev;
for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
mite->channels[i].mite = mite;
mite->channels[i].channel = i;
mite->channels[i].done = 1;
}
ret = mite_setup(dev, mite, use_win1);
if (ret) {
if (mite->mmio)
iounmap(mite->mmio);
kfree(mite);
return NULL;
}
return mite;
}
EXPORT_SYMBOL_GPL(mite_attach);
/**
* mite_detach() - Unmap and free a MITE device for a comedi driver.
* @mite: MITE device.
*
* Called by a COMEDI drivers (*detach).
*/
void mite_detach(struct mite *mite)
{
if (!mite)
return;
if (mite->mmio)
iounmap(mite->mmio);
kfree(mite);
}
EXPORT_SYMBOL_GPL(mite_detach);
static int __init mite_module_init(void)
{
return 0;
}
module_init(mite_module_init);
static void __exit mite_module_exit(void)
{
}
module_exit(mite_module_exit);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi helper for NI Mite PCI interface chip");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/mite.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* pcl711.c
* Comedi driver for PC-LabCard PCL-711 and AdSys ACL-8112 and compatibles
* Copyright (C) 1998 David A. Schleef <[email protected]>
* Janne Jalkanen <[email protected]>
* Eric Bunn <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <[email protected]>
*/
/*
* Driver: pcl711
* Description: Advantech PCL-711 and 711b, ADLink ACL-8112
* Devices: [Advantech] PCL-711 (pcl711), PCL-711B (pcl711b),
* [ADLink] ACL-8112HG (acl8112hg), ACL-8112DG (acl8112dg)
* Author: David A. Schleef <[email protected]>
* Janne Jalkanen <[email protected]>
* Eric Bunn <[email protected]>
* Updated:
* Status: mostly complete
*
* Configuration Options:
* [0] - I/O port base
* [1] - IRQ, optional
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8254.h>
/*
* I/O port register map
*/
#define PCL711_TIMER_BASE 0x00
#define PCL711_AI_LSB_REG 0x04
#define PCL711_AI_MSB_REG 0x05
#define PCL711_AI_MSB_DRDY BIT(4)
#define PCL711_AO_LSB_REG(x) (0x04 + ((x) * 2))
#define PCL711_AO_MSB_REG(x) (0x05 + ((x) * 2))
#define PCL711_DI_LSB_REG 0x06
#define PCL711_DI_MSB_REG 0x07
#define PCL711_INT_STAT_REG 0x08
#define PCL711_INT_STAT_CLR (0 << 0) /* any value will work */
#define PCL711_AI_GAIN_REG 0x09
#define PCL711_AI_GAIN(x) (((x) & 0xf) << 0)
#define PCL711_MUX_REG 0x0a
#define PCL711_MUX_CHAN(x) (((x) & 0xf) << 0)
#define PCL711_MUX_CS0 BIT(4)
#define PCL711_MUX_CS1 BIT(5)
#define PCL711_MUX_DIFF (PCL711_MUX_CS0 | PCL711_MUX_CS1)
#define PCL711_MODE_REG 0x0b
#define PCL711_MODE(x) (((x) & 0x7) << 0)
#define PCL711_MODE_DEFAULT PCL711_MODE(0)
#define PCL711_MODE_SOFTTRIG PCL711_MODE(1)
#define PCL711_MODE_EXT PCL711_MODE(2)
#define PCL711_MODE_EXT_IRQ PCL711_MODE(3)
#define PCL711_MODE_PACER PCL711_MODE(4)
#define PCL711_MODE_PACER_IRQ PCL711_MODE(6)
#define PCL711_MODE_IRQ(x) (((x) & 0x7) << 4)
#define PCL711_SOFTTRIG_REG 0x0c
#define PCL711_SOFTTRIG (0 << 0) /* any value will work */
#define PCL711_DO_LSB_REG 0x0d
#define PCL711_DO_MSB_REG 0x0e
static const struct comedi_lrange range_pcl711b_ai = {
5, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_acl8112hg_ai = {
12, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.005),
UNI_RANGE(10),
UNI_RANGE(1),
UNI_RANGE(0.1),
UNI_RANGE(0.01),
BIP_RANGE(10),
BIP_RANGE(1),
BIP_RANGE(0.1),
BIP_RANGE(0.01)
}
};
static const struct comedi_lrange range_acl8112dg_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
BIP_RANGE(10)
}
};
struct pcl711_board {
const char *name;
int n_aichan;
int n_aochan;
int maxirq;
const struct comedi_lrange *ai_range_type;
};
static const struct pcl711_board boardtypes[] = {
{
.name = "pcl711",
.n_aichan = 8,
.n_aochan = 1,
.ai_range_type = &range_bipolar5,
}, {
.name = "pcl711b",
.n_aichan = 8,
.n_aochan = 1,
.maxirq = 7,
.ai_range_type = &range_pcl711b_ai,
}, {
.name = "acl8112hg",
.n_aichan = 16,
.n_aochan = 2,
.maxirq = 15,
.ai_range_type = &range_acl8112hg_ai,
}, {
.name = "acl8112dg",
.n_aichan = 16,
.n_aochan = 2,
.maxirq = 15,
.ai_range_type = &range_acl8112dg_ai,
},
};
static void pcl711_ai_set_mode(struct comedi_device *dev, unsigned int mode)
{
/*
* The pcl711b board uses bits in the mode register to select the
* interrupt. The other boards supported by this driver all use
* jumpers on the board.
*
* Enables the interrupt when needed on the pcl711b board. These
* bits do nothing on the other boards.
*/
if (mode == PCL711_MODE_EXT_IRQ || mode == PCL711_MODE_PACER_IRQ)
mode |= PCL711_MODE_IRQ(dev->irq);
outb(mode, dev->iobase + PCL711_MODE_REG);
}
static unsigned int pcl711_ai_get_sample(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int val;
val = inb(dev->iobase + PCL711_AI_MSB_REG) << 8;
val |= inb(dev->iobase + PCL711_AI_LSB_REG);
return val & s->maxdata;
}
static int pcl711_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
return 0;
}
static irqreturn_t pcl711_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned short data;
if (!dev->attached) {
dev_err(dev->class_dev, "spurious interrupt\n");
return IRQ_HANDLED;
}
data = pcl711_ai_get_sample(dev, s);
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
comedi_buf_write_samples(s, &data, 1);
if (cmd->stop_src == TRIG_COUNT &&
s->async->scans_done >= cmd->stop_arg)
s->async->events |= COMEDI_CB_EOA;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static void pcl711_set_changain(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int chanspec)
{
unsigned int chan = CR_CHAN(chanspec);
unsigned int range = CR_RANGE(chanspec);
unsigned int aref = CR_AREF(chanspec);
unsigned int mux = 0;
outb(PCL711_AI_GAIN(range), dev->iobase + PCL711_AI_GAIN_REG);
if (s->n_chan > 8) {
/* Select the correct MPC508A chip */
if (aref == AREF_DIFF) {
chan &= 0x7;
mux |= PCL711_MUX_DIFF;
} else {
if (chan < 8)
mux |= PCL711_MUX_CS0;
else
mux |= PCL711_MUX_CS1;
}
}
outb(mux | PCL711_MUX_CHAN(chan), dev->iobase + PCL711_MUX_REG);
}
static int pcl711_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + PCL711_AI_MSB_REG);
if ((status & PCL711_AI_MSB_DRDY) == 0)
return 0;
return -EBUSY;
}
static int pcl711_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int ret;
int i;
pcl711_set_changain(dev, s, insn->chanspec);
pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
for (i = 0; i < insn->n; i++) {
outb(PCL711_SOFTTRIG, dev->iobase + PCL711_SOFTTRIG_REG);
ret = comedi_timeout(dev, s, insn, pcl711_ai_eoc, 0);
if (ret)
return ret;
data[i] = pcl711_ai_get_sample(dev, s);
}
return insn->n;
}
static int pcl711_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->scan_begin_src == TRIG_EXT) {
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
} else {
#define MAX_SPEED 1000
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
MAX_SPEED);
}
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4 */
if (cmd->scan_begin_src == TRIG_TIMER) {
unsigned int arg = cmd->scan_begin_arg;
comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
}
if (err)
return 4;
return 0;
}
static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
pcl711_set_changain(dev, s, cmd->chanlist[0]);
if (cmd->scan_begin_src == TRIG_TIMER) {
comedi_8254_update_divisors(dev->pacer);
comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
pcl711_ai_set_mode(dev, PCL711_MODE_PACER_IRQ);
} else {
pcl711_ai_set_mode(dev, PCL711_MODE_EXT_IRQ);
}
return 0;
}
static void pcl711_ao_write(struct comedi_device *dev,
unsigned int chan, unsigned int val)
{
outb(val & 0xff, dev->iobase + PCL711_AO_LSB_REG(chan));
outb((val >> 8) & 0xff, dev->iobase + PCL711_AO_MSB_REG(chan));
}
static int pcl711_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
for (i = 0; i < insn->n; i++) {
val = data[i];
pcl711_ao_write(dev, chan, val);
}
s->readback[chan] = val;
return insn->n;
}
static int pcl711_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int val;
val = inb(dev->iobase + PCL711_DI_LSB_REG);
val |= (inb(dev->iobase + PCL711_DI_MSB_REG) << 8);
data[1] = val;
return insn->n;
}
static int pcl711_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int mask;
mask = comedi_dio_update_state(s, data);
if (mask) {
if (mask & 0x00ff)
outb(s->state & 0xff, dev->iobase + PCL711_DO_LSB_REG);
if (mask & 0xff00)
outb((s->state >> 8), dev->iobase + PCL711_DO_MSB_REG);
}
data[1] = s->state;
return insn->n;
}
static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl711_board *board = dev->board_ptr;
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
if (it->options[1] && it->options[1] <= board->maxirq) {
ret = request_irq(it->options[1], pcl711_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
dev->pacer = comedi_8254_init(dev->iobase + PCL711_TIMER_BASE,
I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
if (board->n_aichan > 8)
s->subdev_flags |= SDF_DIFF;
s->n_chan = board->n_aichan;
s->maxdata = 0xfff;
s->range_table = board->ai_range_type;
s->insn_read = pcl711_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 1;
s->do_cmdtest = pcl711_ai_cmdtest;
s->do_cmd = pcl711_ai_cmd;
s->cancel = pcl711_ai_cancel;
}
/* Analog Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->n_aochan;
s->maxdata = 0xfff;
s->range_table = &range_bipolar5;
s->insn_write = pcl711_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
/* Digital Input subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pcl711_di_insn_bits;
/* Digital Output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pcl711_do_insn_bits;
/* clear DAC */
pcl711_ao_write(dev, 0, 0x0);
pcl711_ao_write(dev, 1, 0x0);
return 0;
}
static struct comedi_driver pcl711_driver = {
.driver_name = "pcl711",
.module = THIS_MODULE,
.attach = pcl711_attach,
.detach = comedi_legacy_detach,
.board_name = &boardtypes[0].name,
.num_names = ARRAY_SIZE(boardtypes),
.offset = sizeof(struct pcl711_board),
};
module_comedi_driver(pcl711_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for PCL-711 compatible boards");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/pcl711.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/rti800.c
* Hardware driver for Analog Devices RTI-800/815 board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <[email protected]>
*/
/*
* Driver: rti800
* Description: Analog Devices RTI-800/815
* Devices: [Analog Devices] RTI-800 (rti800), RTI-815 (rti815)
* Author: David A. Schleef <[email protected]>
* Status: unknown
* Updated: Fri, 05 Sep 2008 14:50:44 +0100
*
* Configuration options:
* [0] - I/O port base address
* [1] - IRQ (not supported / unused)
* [2] - A/D mux/reference (number of channels)
* 0 = differential
* 1 = pseudodifferential (common)
* 2 = single-ended
* [3] - A/D range
* 0 = [-10,10]
* 1 = [-5,5]
* 2 = [0,10]
* [4] - A/D encoding
* 0 = two's complement
* 1 = straight binary
* [5] - DAC 0 range
* 0 = [-10,10]
* 1 = [0,10]
* [6] - DAC 0 encoding
* 0 = two's complement
* 1 = straight binary
* [7] - DAC 1 range (same as DAC 0)
* [8] - DAC 1 encoding (same as DAC 0)
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
/*
* Register map
*/
#define RTI800_CSR 0x00
#define RTI800_CSR_BUSY BIT(7)
#define RTI800_CSR_DONE BIT(6)
#define RTI800_CSR_OVERRUN BIT(5)
#define RTI800_CSR_TCR BIT(4)
#define RTI800_CSR_DMA_ENAB BIT(3)
#define RTI800_CSR_INTR_TC BIT(2)
#define RTI800_CSR_INTR_EC BIT(1)
#define RTI800_CSR_INTR_OVRN BIT(0)
#define RTI800_MUXGAIN 0x01
#define RTI800_CONVERT 0x02
#define RTI800_ADCLO 0x03
#define RTI800_ADCHI 0x04
#define RTI800_DAC0LO 0x05
#define RTI800_DAC0HI 0x06
#define RTI800_DAC1LO 0x07
#define RTI800_DAC1HI 0x08
#define RTI800_CLRFLAGS 0x09
#define RTI800_DI 0x0a
#define RTI800_DO 0x0b
#define RTI800_9513A_DATA 0x0c
#define RTI800_9513A_CNTRL 0x0d
#define RTI800_9513A_STATUS 0x0d
static const struct comedi_lrange range_rti800_ai_10_bipolar = {
4, {
BIP_RANGE(10),
BIP_RANGE(1),
BIP_RANGE(0.1),
BIP_RANGE(0.02)
}
};
static const struct comedi_lrange range_rti800_ai_5_bipolar = {
4, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.01)
}
};
static const struct comedi_lrange range_rti800_ai_unipolar = {
4, {
UNI_RANGE(10),
UNI_RANGE(1),
UNI_RANGE(0.1),
UNI_RANGE(0.02)
}
};
static const struct comedi_lrange *const rti800_ai_ranges[] = {
&range_rti800_ai_10_bipolar,
&range_rti800_ai_5_bipolar,
&range_rti800_ai_unipolar,
};
static const struct comedi_lrange *const rti800_ao_ranges[] = {
&range_bipolar10,
&range_unipolar10,
};
struct rti800_board {
const char *name;
int has_ao;
};
static const struct rti800_board rti800_boardtypes[] = {
{
.name = "rti800",
}, {
.name = "rti815",
.has_ao = 1,
},
};
struct rti800_private {
bool adc_2comp;
bool dac_2comp[2];
const struct comedi_lrange *ao_range_type_list[2];
unsigned char muxgain_bits;
};
static int rti800_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned char status;
status = inb(dev->iobase + RTI800_CSR);
if (status & RTI800_CSR_OVERRUN) {
outb(0, dev->iobase + RTI800_CLRFLAGS);
return -EOVERFLOW;
}
if (status & RTI800_CSR_DONE)
return 0;
return -EBUSY;
}
static int rti800_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct rti800_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int gain = CR_RANGE(insn->chanspec);
unsigned char muxgain_bits;
int ret;
int i;
inb(dev->iobase + RTI800_ADCHI);
outb(0, dev->iobase + RTI800_CLRFLAGS);
muxgain_bits = chan | (gain << 5);
if (muxgain_bits != devpriv->muxgain_bits) {
devpriv->muxgain_bits = muxgain_bits;
outb(devpriv->muxgain_bits, dev->iobase + RTI800_MUXGAIN);
/*
* Without a delay here, the RTI_CSR_OVERRUN bit
* gets set, and you will have an error.
*/
if (insn->n > 0) {
int delay = (gain == 0) ? 10 :
(gain == 1) ? 20 :
(gain == 2) ? 40 : 80;
udelay(delay);
}
}
for (i = 0; i < insn->n; i++) {
unsigned int val;
outb(0, dev->iobase + RTI800_CONVERT);
ret = comedi_timeout(dev, s, insn, rti800_ai_eoc, 0);
if (ret)
return ret;
val = inb(dev->iobase + RTI800_ADCLO);
val |= (inb(dev->iobase + RTI800_ADCHI) & 0xf) << 8;
if (devpriv->adc_2comp)
val = comedi_offset_munge(s, val);
data[i] = val;
}
return insn->n;
}
static int rti800_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct rti800_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int reg_lo = chan ? RTI800_DAC1LO : RTI800_DAC0LO;
int reg_hi = chan ? RTI800_DAC1HI : RTI800_DAC0HI;
int i;
for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
s->readback[chan] = val;
if (devpriv->dac_2comp[chan])
val = comedi_offset_munge(s, val);
outb(val & 0xff, dev->iobase + reg_lo);
outb((val >> 8) & 0xff, dev->iobase + reg_hi);
}
return insn->n;
}
static int rti800_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inb(dev->iobase + RTI800_DI);
return insn->n;
}
static int rti800_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
/* Outputs are inverted... */
outb(s->state ^ 0xff, dev->iobase + RTI800_DO);
}
data[1] = s->state;
return insn->n;
}
static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct rti800_board *board = dev->board_ptr;
struct rti800_private *devpriv;
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
outb(0, dev->iobase + RTI800_CSR);
inb(dev->iobase + RTI800_ADCHI);
outb(0, dev->iobase + RTI800_CLRFLAGS);
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
devpriv->adc_2comp = (it->options[4] == 0);
devpriv->dac_2comp[0] = (it->options[6] == 0);
devpriv->dac_2comp[1] = (it->options[8] == 0);
/* invalid, forces the MUXGAIN register to be set when first used */
devpriv->muxgain_bits = 0xff;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = (it->options[2] ? 16 : 8);
s->insn_read = rti800_ai_insn_read;
s->maxdata = 0x0fff;
s->range_table = (it->options[3] < ARRAY_SIZE(rti800_ai_ranges))
? rti800_ai_ranges[it->options[3]]
: &range_unknown;
s = &dev->subdevices[1];
if (board->has_ao) {
/* ao subdevice (only on rti815) */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table_list = devpriv->ao_range_type_list;
devpriv->ao_range_type_list[0] =
(it->options[5] < ARRAY_SIZE(rti800_ao_ranges))
? rti800_ao_ranges[it->options[5]]
: &range_unknown;
devpriv->ao_range_type_list[1] =
(it->options[7] < ARRAY_SIZE(rti800_ao_ranges))
? rti800_ao_ranges[it->options[7]]
: &range_unknown;
s->insn_write = rti800_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
s = &dev->subdevices[2];
/* di */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 8;
s->insn_bits = rti800_di_insn_bits;
s->maxdata = 1;
s->range_table = &range_digital;
s = &dev->subdevices[3];
/* do */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->insn_bits = rti800_do_insn_bits;
s->maxdata = 1;
s->range_table = &range_digital;
/*
* There is also an Am9513 timer on these boards. This subdevice
* is not currently supported.
*/
return 0;
}
static struct comedi_driver rti800_driver = {
.driver_name = "rti800",
.module = THIS_MODULE,
.attach = rti800_attach,
.detach = comedi_legacy_detach,
.num_names = ARRAY_SIZE(rti800_boardtypes),
.board_name = &rti800_boardtypes[0].name,
.offset = sizeof(struct rti800_board),
};
module_comedi_driver(rti800_driver);
MODULE_DESCRIPTION("Comedi: RTI-800 Multifunction Analog/Digital board");
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/rti800.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/rtd520.c
* Comedi driver for Real Time Devices (RTD) PCI4520/DM7520
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2001 David A. Schleef <[email protected]>
*/
/*
* Driver: rtd520
* Description: Real Time Devices PCI4520/DM7520
* Devices: [Real Time Devices] DM7520HR-1 (DM7520), DM7520HR-8,
* PCI4520 (PCI4520), PCI4520-8
* Author: Dan Christian
* Status: Works. Only tested on DM7520-8. Not SMP safe.
*
* Configuration options: not applicable, uses PCI auto config
*/
/*
* Created by Dan Christian, NASA Ames Research Center.
*
* The PCI4520 is a PCI card. The DM7520 is a PC/104-plus card.
* Both have:
* 8/16 12 bit ADC with FIFO and channel gain table
* 8 bits high speed digital out (for external MUX) (or 8 in or 8 out)
* 8 bits high speed digital in with FIFO and interrupt on change (or 8 IO)
* 2 12 bit DACs with FIFOs
* 2 bits output
* 2 bits input
* bus mastering DMA
* timers: ADC sample, pacer, burst, about, delay, DA1, DA2
* sample counter
* 3 user timer/counters (8254)
* external interrupt
*
* The DM7520 has slightly fewer features (fewer gain steps).
*
* These boards can support external multiplexors and multi-board
* synchronization, but this driver doesn't support that.
*
* Board docs: http://www.rtdusa.com/PC104/DM/analog%20IO/dm7520.htm
* Data sheet: http://www.rtdusa.com/pdf/dm7520.pdf
* Example source: http://www.rtdusa.com/examples/dm/dm7520.zip
* Call them and ask for the register level manual.
* PCI chip: http://www.plxtech.com/products/io/pci9080
*
* Notes:
* This board is memory mapped. There is some IO stuff, but it isn't needed.
*
* I use a pretty loose naming style within the driver (rtd_blah).
* All externally visible names should be rtd520_blah.
* I use camelCase for structures (and inside them).
* I may also use upper CamelCase for function names (old habit).
*
* This board is somewhat related to the RTD PCI4400 board.
*
* I borrowed heavily from the ni_mio_common, ni_atmio16d, mite, and
* das1800, since they have the best documented code. Driver cb_pcidas64.c
* uses the same DMA controller.
*
* As far as I can tell, the About interrupt doesn't work if Sample is
* also enabled. It turns out that About really isn't needed, since
* we always count down samples read.
*/
/*
* driver status:
*
* Analog-In supports instruction and command mode.
*
* With DMA, you can sample at 1.15Mhz with 70% idle on a 400Mhz K6-2
* (single channel, 64K read buffer). I get random system lockups when
* using DMA with ALI-15xx based systems. I haven't been able to test
* any other chipsets. The lockups happen soon after the start of an
* acquistion, not in the middle of a long run.
*
* Without DMA, you can do 620Khz sampling with 20% idle on a 400Mhz K6-2
* (with a 256K read buffer).
*
* Digital-IO and Analog-Out only support instruction mode.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include <linux/comedi/comedi_8254.h>
#include "plx9080.h"
/*
* Local Address Space 0 Offsets
*/
#define LAS0_USER_IO 0x0008 /* User I/O */
#define LAS0_ADC 0x0010 /* FIFO Status/Software A/D Start */
#define FS_DAC1_NOT_EMPTY BIT(0) /* DAC1 FIFO not empty */
#define FS_DAC1_HEMPTY BIT(1) /* DAC1 FIFO half empty */
#define FS_DAC1_NOT_FULL BIT(2) /* DAC1 FIFO not full */
#define FS_DAC2_NOT_EMPTY BIT(4) /* DAC2 FIFO not empty */
#define FS_DAC2_HEMPTY BIT(5) /* DAC2 FIFO half empty */
#define FS_DAC2_NOT_FULL BIT(6) /* DAC2 FIFO not full */
#define FS_ADC_NOT_EMPTY BIT(8) /* ADC FIFO not empty */
#define FS_ADC_HEMPTY BIT(9) /* ADC FIFO half empty */
#define FS_ADC_NOT_FULL BIT(10) /* ADC FIFO not full */
#define FS_DIN_NOT_EMPTY BIT(12) /* DIN FIFO not empty */
#define FS_DIN_HEMPTY BIT(13) /* DIN FIFO half empty */
#define FS_DIN_NOT_FULL BIT(14) /* DIN FIFO not full */
#define LAS0_UPDATE_DAC(x) (0x0014 + ((x) * 0x4)) /* D/Ax Update (w) */
#define LAS0_DAC 0x0024 /* Software Simultaneous Update (w) */
#define LAS0_PACER 0x0028 /* Software Pacer Start/Stop */
#define LAS0_TIMER 0x002c /* Timer Status/HDIN Software Trig. */
#define LAS0_IT 0x0030 /* Interrupt Status/Enable */
#define IRQM_ADC_FIFO_WRITE BIT(0) /* ADC FIFO Write */
#define IRQM_CGT_RESET BIT(1) /* Reset CGT */
#define IRQM_CGT_PAUSE BIT(3) /* Pause CGT */
#define IRQM_ADC_ABOUT_CNT BIT(4) /* About Counter out */
#define IRQM_ADC_DELAY_CNT BIT(5) /* Delay Counter out */
#define IRQM_ADC_SAMPLE_CNT BIT(6) /* ADC Sample Counter */
#define IRQM_DAC1_UCNT BIT(7) /* DAC1 Update Counter */
#define IRQM_DAC2_UCNT BIT(8) /* DAC2 Update Counter */
#define IRQM_UTC1 BIT(9) /* User TC1 out */
#define IRQM_UTC1_INV BIT(10) /* User TC1 out, inverted */
#define IRQM_UTC2 BIT(11) /* User TC2 out */
#define IRQM_DIGITAL_IT BIT(12) /* Digital Interrupt */
#define IRQM_EXTERNAL_IT BIT(13) /* External Interrupt */
#define IRQM_ETRIG_RISING BIT(14) /* Ext Trigger rising-edge */
#define IRQM_ETRIG_FALLING BIT(15) /* Ext Trigger falling-edge */
#define LAS0_CLEAR 0x0034 /* Clear/Set Interrupt Clear Mask */
#define LAS0_OVERRUN 0x0038 /* Pending interrupts/Clear Overrun */
#define LAS0_PCLK 0x0040 /* Pacer Clock (24bit) */
#define LAS0_BCLK 0x0044 /* Burst Clock (10bit) */
#define LAS0_ADC_SCNT 0x0048 /* A/D Sample counter (10bit) */
#define LAS0_DAC1_UCNT 0x004c /* D/A1 Update counter (10 bit) */
#define LAS0_DAC2_UCNT 0x0050 /* D/A2 Update counter (10 bit) */
#define LAS0_DCNT 0x0054 /* Delay counter (16 bit) */
#define LAS0_ACNT 0x0058 /* About counter (16 bit) */
#define LAS0_DAC_CLK 0x005c /* DAC clock (16bit) */
#define LAS0_8254_TIMER_BASE 0x0060 /* 8254 timer/counter base */
#define LAS0_DIO0 0x0070 /* Digital I/O Port 0 */
#define LAS0_DIO1 0x0074 /* Digital I/O Port 1 */
#define LAS0_DIO0_CTRL 0x0078 /* Digital I/O Control */
#define LAS0_DIO_STATUS 0x007c /* Digital I/O Status */
#define LAS0_BOARD_RESET 0x0100 /* Board reset */
#define LAS0_DMA0_SRC 0x0104 /* DMA 0 Sources select */
#define LAS0_DMA1_SRC 0x0108 /* DMA 1 Sources select */
#define LAS0_ADC_CONVERSION 0x010c /* A/D Conversion Signal select */
#define LAS0_BURST_START 0x0110 /* Burst Clock Start Trigger select */
#define LAS0_PACER_START 0x0114 /* Pacer Clock Start Trigger select */
#define LAS0_PACER_STOP 0x0118 /* Pacer Clock Stop Trigger select */
#define LAS0_ACNT_STOP_ENABLE 0x011c /* About Counter Stop Enable */
#define LAS0_PACER_REPEAT 0x0120 /* Pacer Start Trigger Mode select */
#define LAS0_DIN_START 0x0124 /* HiSpd DI Sampling Signal select */
#define LAS0_DIN_FIFO_CLEAR 0x0128 /* Digital Input FIFO Clear */
#define LAS0_ADC_FIFO_CLEAR 0x012c /* A/D FIFO Clear */
#define LAS0_CGT_WRITE 0x0130 /* Channel Gain Table Write */
#define LAS0_CGL_WRITE 0x0134 /* Channel Gain Latch Write */
#define LAS0_CG_DATA 0x0138 /* Digital Table Write */
#define LAS0_CGT_ENABLE 0x013c /* Channel Gain Table Enable */
#define LAS0_CG_ENABLE 0x0140 /* Digital Table Enable */
#define LAS0_CGT_PAUSE 0x0144 /* Table Pause Enable */
#define LAS0_CGT_RESET 0x0148 /* Reset Channel Gain Table */
#define LAS0_CGT_CLEAR 0x014c /* Clear Channel Gain Table */
#define LAS0_DAC_CTRL(x) (0x0150 + ((x) * 0x14)) /* D/Ax type/range */
#define LAS0_DAC_SRC(x) (0x0154 + ((x) * 0x14)) /* D/Ax update source */
#define LAS0_DAC_CYCLE(x) (0x0158 + ((x) * 0x14)) /* D/Ax cycle mode */
#define LAS0_DAC_RESET(x) (0x015c + ((x) * 0x14)) /* D/Ax FIFO reset */
#define LAS0_DAC_FIFO_CLEAR(x) (0x0160 + ((x) * 0x14)) /* D/Ax FIFO clear */
#define LAS0_ADC_SCNT_SRC 0x0178 /* A/D Sample Counter Source select */
#define LAS0_PACER_SELECT 0x0180 /* Pacer Clock select */
#define LAS0_SBUS0_SRC 0x0184 /* SyncBus 0 Source select */
#define LAS0_SBUS0_ENABLE 0x0188 /* SyncBus 0 enable */
#define LAS0_SBUS1_SRC 0x018c /* SyncBus 1 Source select */
#define LAS0_SBUS1_ENABLE 0x0190 /* SyncBus 1 enable */
#define LAS0_SBUS2_SRC 0x0198 /* SyncBus 2 Source select */
#define LAS0_SBUS2_ENABLE 0x019c /* SyncBus 2 enable */
#define LAS0_ETRG_POLARITY 0x01a4 /* Ext. Trigger polarity select */
#define LAS0_EINT_POLARITY 0x01a8 /* Ext. Interrupt polarity select */
#define LAS0_8254_CLK_SEL(x) (0x01ac + ((x) * 0x8)) /* 8254 clock select */
#define LAS0_8254_GATE_SEL(x) (0x01b0 + ((x) * 0x8)) /* 8254 gate select */
#define LAS0_UOUT0_SELECT 0x01c4 /* User Output 0 source select */
#define LAS0_UOUT1_SELECT 0x01c8 /* User Output 1 source select */
#define LAS0_DMA0_RESET 0x01cc /* DMA0 Request state machine reset */
#define LAS0_DMA1_RESET 0x01d0 /* DMA1 Request state machine reset */
/*
* Local Address Space 1 Offsets
*/
#define LAS1_ADC_FIFO 0x0000 /* A/D FIFO (16bit) */
#define LAS1_HDIO_FIFO 0x0004 /* HiSpd DI FIFO (16bit) */
#define LAS1_DAC_FIFO(x) (0x0008 + ((x) * 0x4)) /* D/Ax FIFO (16bit) */
/*
* Driver specific stuff (tunable)
*/
/*
* We really only need 2 buffers. More than that means being much
* smarter about knowing which ones are full.
*/
#define DMA_CHAIN_COUNT 2 /* max DMA segments/buffers in a ring (min 2) */
/* Target period for periodic transfers. This sets the user read latency. */
/* Note: There are certain rates where we give this up and transfer 1/2 FIFO */
/* If this is too low, efficiency is poor */
#define TRANS_TARGET_PERIOD 10000000 /* 10 ms (in nanoseconds) */
/* Set a practical limit on how long a list to support (affects memory use) */
/* The board support a channel list up to the FIFO length (1K or 8K) */
#define RTD_MAX_CHANLIST 128 /* max channel list that we allow */
/*
* Board specific stuff
*/
#define RTD_CLOCK_RATE 8000000 /* 8Mhz onboard clock */
#define RTD_CLOCK_BASE 125 /* clock period in ns */
/* Note: these speed are slower than the spec, but fit the counter resolution*/
#define RTD_MAX_SPEED 1625 /* when sampling, in nanoseconds */
/* max speed if we don't have to wait for settling */
#define RTD_MAX_SPEED_1 875 /* if single channel, in nanoseconds */
#define RTD_MIN_SPEED 2097151875 /* (24bit counter) in nanoseconds */
/* min speed when only 1 channel (no burst counter) */
#define RTD_MIN_SPEED_1 5000000 /* 200Hz, in nanoseconds */
/* Setup continuous ring of 1/2 FIFO transfers. See RTD manual p91 */
#define DMA_MODE_BITS (\
PLX_LOCAL_BUS_16_WIDE_BITS \
| PLX_DMA_EN_READYIN_BIT \
| PLX_DMA_LOCAL_BURST_EN_BIT \
| PLX_EN_CHAIN_BIT \
| PLX_DMA_INTR_PCI_BIT \
| PLX_LOCAL_ADDR_CONST_BIT \
| PLX_DEMAND_MODE_BIT)
#define DMA_TRANSFER_BITS (\
/* descriptors in PCI memory*/ PLX_DESC_IN_PCI_BIT \
/* interrupt at end of block */ | PLX_INTR_TERM_COUNT \
/* from board to PCI */ | PLX_XFER_LOCAL_TO_PCI)
/*
* Comedi specific stuff
*/
/*
* The board has 3 input modes and the gains of 1,2,4,...32 (, 64, 128)
*/
static const struct comedi_lrange rtd_ai_7520_range = {
18, {
/* +-5V input range gain steps */
BIP_RANGE(5.0),
BIP_RANGE(5.0 / 2),
BIP_RANGE(5.0 / 4),
BIP_RANGE(5.0 / 8),
BIP_RANGE(5.0 / 16),
BIP_RANGE(5.0 / 32),
/* +-10V input range gain steps */
BIP_RANGE(10.0),
BIP_RANGE(10.0 / 2),
BIP_RANGE(10.0 / 4),
BIP_RANGE(10.0 / 8),
BIP_RANGE(10.0 / 16),
BIP_RANGE(10.0 / 32),
/* +10V input range gain steps */
UNI_RANGE(10.0),
UNI_RANGE(10.0 / 2),
UNI_RANGE(10.0 / 4),
UNI_RANGE(10.0 / 8),
UNI_RANGE(10.0 / 16),
UNI_RANGE(10.0 / 32),
}
};
/* PCI4520 has two more gains (6 more entries) */
static const struct comedi_lrange rtd_ai_4520_range = {
24, {
/* +-5V input range gain steps */
BIP_RANGE(5.0),
BIP_RANGE(5.0 / 2),
BIP_RANGE(5.0 / 4),
BIP_RANGE(5.0 / 8),
BIP_RANGE(5.0 / 16),
BIP_RANGE(5.0 / 32),
BIP_RANGE(5.0 / 64),
BIP_RANGE(5.0 / 128),
/* +-10V input range gain steps */
BIP_RANGE(10.0),
BIP_RANGE(10.0 / 2),
BIP_RANGE(10.0 / 4),
BIP_RANGE(10.0 / 8),
BIP_RANGE(10.0 / 16),
BIP_RANGE(10.0 / 32),
BIP_RANGE(10.0 / 64),
BIP_RANGE(10.0 / 128),
/* +10V input range gain steps */
UNI_RANGE(10.0),
UNI_RANGE(10.0 / 2),
UNI_RANGE(10.0 / 4),
UNI_RANGE(10.0 / 8),
UNI_RANGE(10.0 / 16),
UNI_RANGE(10.0 / 32),
UNI_RANGE(10.0 / 64),
UNI_RANGE(10.0 / 128),
}
};
/* Table order matches range values */
static const struct comedi_lrange rtd_ao_range = {
4, {
UNI_RANGE(5),
UNI_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(10),
}
};
enum rtd_boardid {
BOARD_DM7520,
BOARD_PCI4520,
};
struct rtd_boardinfo {
const char *name;
int range_bip10; /* start of +-10V range */
int range_uni10; /* start of +10V range */
const struct comedi_lrange *ai_range;
};
static const struct rtd_boardinfo rtd520_boards[] = {
[BOARD_DM7520] = {
.name = "DM7520",
.range_bip10 = 6,
.range_uni10 = 12,
.ai_range = &rtd_ai_7520_range,
},
[BOARD_PCI4520] = {
.name = "PCI4520",
.range_bip10 = 8,
.range_uni10 = 16,
.ai_range = &rtd_ai_4520_range,
},
};
struct rtd_private {
/* memory mapped board structures */
void __iomem *las1;
void __iomem *lcfg;
long ai_count; /* total transfer size (samples) */
int xfer_count; /* # to transfer data. 0->1/2FIFO */
int flags; /* flag event modes */
unsigned int fifosz;
/* 8254 Timer/Counter gate and clock sources */
unsigned char timer_gate_src[3];
unsigned char timer_clk_src[3];
};
/* bit defines for "flags" */
#define SEND_EOS 0x01 /* send End Of Scan events */
#define DMA0_ACTIVE 0x02 /* DMA0 is active */
#define DMA1_ACTIVE 0x04 /* DMA1 is active */
/*
* Given a desired period and the clock period (both in ns), return the
* proper counter value (divider-1). Sets the original period to be the
* true value.
* Note: you have to check if the value is larger than the counter range!
*/
static int rtd_ns_to_timer_base(unsigned int *nanosec,
unsigned int flags, int base)
{
int divider;
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
divider = DIV_ROUND_CLOSEST(*nanosec, base);
break;
case CMDF_ROUND_DOWN:
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
divider = DIV_ROUND_UP(*nanosec, base);
break;
}
if (divider < 2)
divider = 2; /* min is divide by 2 */
/*
* Note: we don't check for max, because different timers
* have different ranges
*/
*nanosec = base * divider;
return divider - 1; /* countdown is divisor+1 */
}
/*
* Given a desired period (in ns), return the proper counter value
* (divider-1) for the internal clock. Sets the original period to
* be the true value.
*/
static int rtd_ns_to_timer(unsigned int *ns, unsigned int flags)
{
return rtd_ns_to_timer_base(ns, flags, RTD_CLOCK_BASE);
}
/* Convert a single comedi channel-gain entry to a RTD520 table entry */
static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
unsigned int chanspec, int index)
{
const struct rtd_boardinfo *board = dev->board_ptr;
unsigned int chan = CR_CHAN(chanspec);
unsigned int range = CR_RANGE(chanspec);
unsigned int aref = CR_AREF(chanspec);
unsigned short r = 0;
r |= chan & 0xf;
/* Note: we also setup the channel list bipolar flag array */
if (range < board->range_bip10) {
/* +-5 range */
r |= 0x000;
r |= (range & 0x7) << 4;
} else if (range < board->range_uni10) {
/* +-10 range */
r |= 0x100;
r |= ((range - board->range_bip10) & 0x7) << 4;
} else {
/* +10 range */
r |= 0x200;
r |= ((range - board->range_uni10) & 0x7) << 4;
}
switch (aref) {
case AREF_GROUND: /* on-board ground */
break;
case AREF_COMMON:
r |= 0x80; /* ref external analog common */
break;
case AREF_DIFF:
r |= 0x400; /* differential inputs */
break;
case AREF_OTHER: /* ??? */
break;
}
return r;
}
/* Setup the channel-gain table from a comedi list */
static void rtd_load_channelgain_list(struct comedi_device *dev,
unsigned int n_chan, unsigned int *list)
{
if (n_chan > 1) { /* setup channel gain table */
int ii;
writel(0, dev->mmio + LAS0_CGT_CLEAR);
writel(1, dev->mmio + LAS0_CGT_ENABLE);
for (ii = 0; ii < n_chan; ii++) {
writel(rtd_convert_chan_gain(dev, list[ii], ii),
dev->mmio + LAS0_CGT_WRITE);
}
} else { /* just use the channel gain latch */
writel(0, dev->mmio + LAS0_CGT_ENABLE);
writel(rtd_convert_chan_gain(dev, list[0], 0),
dev->mmio + LAS0_CGL_WRITE);
}
}
/*
* Determine fifo size by doing adc conversions until the fifo half
* empty status flag clears.
*/
static int rtd520_probe_fifo_depth(struct comedi_device *dev)
{
unsigned int chanspec = CR_PACK(0, 0, AREF_GROUND);
unsigned int i;
static const unsigned int limit = 0x2000;
unsigned int fifo_size = 0;
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
rtd_load_channelgain_list(dev, 1, &chanspec);
/* ADC conversion trigger source: SOFTWARE */
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
/* convert samples */
for (i = 0; i < limit; ++i) {
unsigned int fifo_status;
/* trigger conversion */
writew(0, dev->mmio + LAS0_ADC);
usleep_range(1, 1000);
fifo_status = readl(dev->mmio + LAS0_ADC);
if ((fifo_status & FS_ADC_HEMPTY) == 0) {
fifo_size = 2 * i;
break;
}
}
if (i == limit) {
dev_info(dev->class_dev, "failed to probe fifo size.\n");
return -EIO;
}
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
if (fifo_size != 0x400 && fifo_size != 0x2000) {
dev_info(dev->class_dev,
"unexpected fifo size of %i, expected 1024 or 8192.\n",
fifo_size);
return -EIO;
}
return fifo_size;
}
static int rtd_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = readl(dev->mmio + LAS0_ADC);
if (status & FS_ADC_NOT_EMPTY)
return 0;
return -EBUSY;
}
static int rtd_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
unsigned int range = CR_RANGE(insn->chanspec);
int ret;
int n;
/* clear any old fifo data */
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
/* write channel to multiplexer and clear channel gain table */
rtd_load_channelgain_list(dev, 1, &insn->chanspec);
/* ADC conversion trigger source: SOFTWARE */
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
/* convert n samples */
for (n = 0; n < insn->n; n++) {
unsigned short d;
/* trigger conversion */
writew(0, dev->mmio + LAS0_ADC);
ret = comedi_timeout(dev, s, insn, rtd_ai_eoc, 0);
if (ret)
return ret;
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d >>= 3; /* low 3 bits are marker lines */
/* convert bipolar data to comedi unsigned data */
if (comedi_range_is_bipolar(s, range))
d = comedi_offset_munge(s, d);
data[n] = d & s->maxdata;
}
/* return the number of samples read/written */
return n;
}
static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
int count)
{
struct rtd_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int ii;
for (ii = 0; ii < count; ii++) {
unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]);
unsigned short d;
if (devpriv->ai_count == 0) { /* done */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
continue;
}
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d >>= 3; /* low 3 bits are marker lines */
/* convert bipolar data to comedi unsigned data */
if (comedi_range_is_bipolar(s, range))
d = comedi_offset_munge(s, d);
d &= s->maxdata;
if (!comedi_buf_write_samples(s, &d, 1))
return -1;
if (devpriv->ai_count > 0) /* < 0, means read forever */
devpriv->ai_count--;
}
return 0;
}
static irqreturn_t rtd_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
struct rtd_private *devpriv = dev->private;
u32 overrun;
u16 status;
u16 fifo_status;
if (!dev->attached)
return IRQ_NONE;
fifo_status = readl(dev->mmio + LAS0_ADC);
/* check for FIFO full, this automatically halts the ADC! */
if (!(fifo_status & FS_ADC_NOT_FULL)) /* 0 -> full */
goto xfer_abort;
status = readw(dev->mmio + LAS0_IT);
/* if interrupt was not caused by our board, or handled above */
if (status == 0)
return IRQ_HANDLED;
if (status & IRQM_ADC_ABOUT_CNT) { /* sample count -> read FIFO */
/*
* since the priority interrupt controller may have queued
* a sample counter interrupt, even though we have already
* finished, we must handle the possibility that there is
* no data here
*/
if (!(fifo_status & FS_ADC_HEMPTY)) {
/* FIFO half full */
if (ai_read_n(dev, s, devpriv->fifosz / 2) < 0)
goto xfer_abort;
if (devpriv->ai_count == 0)
goto xfer_done;
} else if (devpriv->xfer_count > 0) {
if (fifo_status & FS_ADC_NOT_EMPTY) {
/* FIFO not empty */
if (ai_read_n(dev, s, devpriv->xfer_count) < 0)
goto xfer_abort;
if (devpriv->ai_count == 0)
goto xfer_done;
}
}
}
overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
if (overrun)
goto xfer_abort;
/* clear the interrupt */
writew(status, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
comedi_handle_events(dev, s);
return IRQ_HANDLED;
xfer_abort:
s->async->events |= COMEDI_CB_ERROR;
xfer_done:
s->async->events |= COMEDI_CB_EOA;
/* clear the interrupt */
status = readw(dev->mmio + LAS0_IT);
writew(status, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
fifo_status = readl(dev->mmio + LAS0_ADC);
overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int rtd_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src,
TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
err |= comedi_check_trigger_is_unique(cmd->convert_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Note: these are time periods, not actual rates */
if (cmd->chanlist_len == 1) { /* no scanning */
if (comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
RTD_MAX_SPEED_1)) {
rtd_ns_to_timer(&cmd->scan_begin_arg,
CMDF_ROUND_UP);
err |= -EINVAL;
}
if (comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
RTD_MIN_SPEED_1)) {
rtd_ns_to_timer(&cmd->scan_begin_arg,
CMDF_ROUND_DOWN);
err |= -EINVAL;
}
} else {
if (comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
RTD_MAX_SPEED)) {
rtd_ns_to_timer(&cmd->scan_begin_arg,
CMDF_ROUND_UP);
err |= -EINVAL;
}
if (comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
RTD_MIN_SPEED)) {
rtd_ns_to_timer(&cmd->scan_begin_arg,
CMDF_ROUND_DOWN);
err |= -EINVAL;
}
}
} else {
/* external trigger */
/* should be level/edge, hi/lo specification here */
/* should specify multiple external triggers */
err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
}
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->chanlist_len == 1) { /* no scanning */
if (comedi_check_trigger_arg_min(&cmd->convert_arg,
RTD_MAX_SPEED_1)) {
rtd_ns_to_timer(&cmd->convert_arg,
CMDF_ROUND_UP);
err |= -EINVAL;
}
if (comedi_check_trigger_arg_max(&cmd->convert_arg,
RTD_MIN_SPEED_1)) {
rtd_ns_to_timer(&cmd->convert_arg,
CMDF_ROUND_DOWN);
err |= -EINVAL;
}
} else {
if (comedi_check_trigger_arg_min(&cmd->convert_arg,
RTD_MAX_SPEED)) {
rtd_ns_to_timer(&cmd->convert_arg,
CMDF_ROUND_UP);
err |= -EINVAL;
}
if (comedi_check_trigger_arg_max(&cmd->convert_arg,
RTD_MIN_SPEED)) {
rtd_ns_to_timer(&cmd->convert_arg,
CMDF_ROUND_DOWN);
err |= -EINVAL;
}
}
} else {
/* external trigger */
/* see above */
err |= comedi_check_trigger_arg_max(&cmd->convert_arg, 9);
}
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->scan_begin_arg;
rtd_ns_to_timer(&arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
}
if (cmd->convert_src == TRIG_TIMER) {
arg = cmd->convert_arg;
rtd_ns_to_timer(&arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->convert_arg * cmd->scan_end_arg;
err |= comedi_check_trigger_arg_min(
&cmd->scan_begin_arg, arg);
}
}
if (err)
return 4;
return 0;
}
static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct rtd_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int timer;
/* stop anything currently running */
/* pacer stop source: SOFTWARE */
writel(0, dev->mmio + LAS0_PACER_STOP);
writel(0, dev->mmio + LAS0_PACER); /* stop pacer */
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
writew(0, dev->mmio + LAS0_IT);
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
writel(0, dev->mmio + LAS0_OVERRUN);
/* start configuration */
/* load channel list and reset CGT */
rtd_load_channelgain_list(dev, cmd->chanlist_len, cmd->chanlist);
/* setup the common case and override if needed */
if (cmd->chanlist_len > 1) {
/* pacer start source: SOFTWARE */
writel(0, dev->mmio + LAS0_PACER_START);
/* burst trigger source: PACER */
writel(1, dev->mmio + LAS0_BURST_START);
/* ADC conversion trigger source: BURST */
writel(2, dev->mmio + LAS0_ADC_CONVERSION);
} else { /* single channel */
/* pacer start source: SOFTWARE */
writel(0, dev->mmio + LAS0_PACER_START);
/* ADC conversion trigger source: PACER */
writel(1, dev->mmio + LAS0_ADC_CONVERSION);
}
writel((devpriv->fifosz / 2 - 1) & 0xffff, dev->mmio + LAS0_ACNT);
if (cmd->scan_begin_src == TRIG_TIMER) {
/* scan_begin_arg is in nanoseconds */
/* find out how many samples to wait before transferring */
if (cmd->flags & CMDF_WAKE_EOS) {
/*
* this may generate un-sustainable interrupt rates
* the application is responsible for doing the
* right thing
*/
devpriv->xfer_count = cmd->chanlist_len;
devpriv->flags |= SEND_EOS;
} else {
/* arrange to transfer data periodically */
devpriv->xfer_count =
(TRANS_TARGET_PERIOD * cmd->chanlist_len) /
cmd->scan_begin_arg;
if (devpriv->xfer_count < cmd->chanlist_len) {
/* transfer after each scan (and avoid 0) */
devpriv->xfer_count = cmd->chanlist_len;
} else { /* make a multiple of scan length */
devpriv->xfer_count =
DIV_ROUND_UP(devpriv->xfer_count,
cmd->chanlist_len);
devpriv->xfer_count *= cmd->chanlist_len;
}
devpriv->flags |= SEND_EOS;
}
if (devpriv->xfer_count >= (devpriv->fifosz / 2)) {
/* out of counter range, use 1/2 fifo instead */
devpriv->xfer_count = 0;
devpriv->flags &= ~SEND_EOS;
} else {
/* interrupt for each transfer */
writel((devpriv->xfer_count - 1) & 0xffff,
dev->mmio + LAS0_ACNT);
}
} else { /* unknown timing, just use 1/2 FIFO */
devpriv->xfer_count = 0;
devpriv->flags &= ~SEND_EOS;
}
/* pacer clock source: INTERNAL 8MHz */
writel(1, dev->mmio + LAS0_PACER_SELECT);
/* just interrupt, don't stop */
writel(1, dev->mmio + LAS0_ACNT_STOP_ENABLE);
/* BUG??? these look like enumerated values, but they are bit fields */
/* First, setup when to stop */
switch (cmd->stop_src) {
case TRIG_COUNT: /* stop after N scans */
devpriv->ai_count = cmd->stop_arg * cmd->chanlist_len;
if ((devpriv->xfer_count > 0) &&
(devpriv->xfer_count > devpriv->ai_count)) {
devpriv->xfer_count = devpriv->ai_count;
}
break;
case TRIG_NONE: /* stop when cancel is called */
devpriv->ai_count = -1; /* read forever */
break;
}
/* Scan timing */
switch (cmd->scan_begin_src) {
case TRIG_TIMER: /* periodic scanning */
timer = rtd_ns_to_timer(&cmd->scan_begin_arg,
CMDF_ROUND_NEAREST);
/* set PACER clock */
writel(timer & 0xffffff, dev->mmio + LAS0_PCLK);
break;
case TRIG_EXT:
/* pacer start source: EXTERNAL */
writel(1, dev->mmio + LAS0_PACER_START);
break;
}
/* Sample timing within a scan */
switch (cmd->convert_src) {
case TRIG_TIMER: /* periodic */
if (cmd->chanlist_len > 1) {
/* only needed for multi-channel */
timer = rtd_ns_to_timer(&cmd->convert_arg,
CMDF_ROUND_NEAREST);
/* setup BURST clock */
writel(timer & 0x3ff, dev->mmio + LAS0_BCLK);
}
break;
case TRIG_EXT: /* external */
/* burst trigger source: EXTERNAL */
writel(2, dev->mmio + LAS0_BURST_START);
break;
}
/* end configuration */
/*
* This doesn't seem to work. There is no way to clear an interrupt
* that the priority controller has queued!
*/
writew(~0, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
/* TODO: allow multiple interrupt sources */
/* transfer every N samples */
writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT);
/* BUG: start_src is ASSUMED to be TRIG_NOW */
/* BUG? it seems like things are running before the "start" */
readl(dev->mmio + LAS0_PACER); /* start pacer */
return 0;
}
static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct rtd_private *devpriv = dev->private;
/* pacer stop source: SOFTWARE */
writel(0, dev->mmio + LAS0_PACER_STOP);
writel(0, dev->mmio + LAS0_PACER); /* stop pacer */
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
writew(0, dev->mmio + LAS0_IT);
devpriv->ai_count = 0; /* stop and don't transfer any more */
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
return 0;
}
static int rtd_ao_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int bit = (chan == 0) ? FS_DAC1_NOT_EMPTY : FS_DAC2_NOT_EMPTY;
unsigned int status;
status = readl(dev->mmio + LAS0_ADC);
if (status & bit)
return 0;
return -EBUSY;
}
static int rtd_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
int ret;
int i;
/* Configure the output range (table index matches the range values) */
writew(range & 7, dev->mmio + LAS0_DAC_CTRL(chan));
for (i = 0; i < insn->n; ++i) {
unsigned int val = data[i];
/* bipolar uses 2's complement values with an extended sign */
if (comedi_range_is_bipolar(s, range)) {
val = comedi_offset_munge(s, val);
val |= (val & ((s->maxdata + 1) >> 1)) << 1;
}
/* shift the 12-bit data (+ sign) to match the register */
val <<= 3;
writew(val, devpriv->las1 + LAS1_DAC_FIFO(chan));
writew(0, dev->mmio + LAS0_UPDATE_DAC(chan));
ret = comedi_timeout(dev, s, insn, rtd_ao_eoc, 0);
if (ret)
return ret;
s->readback[chan] = data[i];
}
return insn->n;
}
static int rtd_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
writew(s->state & 0xff, dev->mmio + LAS0_DIO0);
data[1] = readw(dev->mmio + LAS0_DIO0) & 0xff;
return insn->n;
}
static int rtd_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
/* TODO support digital match interrupts and strobes */
/* set direction */
writew(0x01, dev->mmio + LAS0_DIO_STATUS);
writew(s->io_bits & 0xff, dev->mmio + LAS0_DIO0_CTRL);
/* clear interrupts */
writew(0x00, dev->mmio + LAS0_DIO_STATUS);
/* port1 can only be all input or all output */
/* there are also 2 user input lines and 2 user output lines */
return insn->n;
}
static int rtd_counter_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int max_src;
unsigned int src;
switch (data[0]) {
case INSN_CONFIG_SET_GATE_SRC:
/*
* 8254 Timer/Counter gate sources:
*
* 0 = Not gated, free running (reset state)
* 1 = Gated, off
* 2 = Ext. TC Gate 1
* 3 = Ext. TC Gate 2
* 4 = Previous TC out (chan 1 and 2 only)
*/
src = data[2];
max_src = (chan == 0) ? 3 : 4;
if (src > max_src)
return -EINVAL;
devpriv->timer_gate_src[chan] = src;
writeb(src, dev->mmio + LAS0_8254_GATE_SEL(chan));
break;
case INSN_CONFIG_GET_GATE_SRC:
data[2] = devpriv->timer_gate_src[chan];
break;
case INSN_CONFIG_SET_CLOCK_SRC:
/*
* 8254 Timer/Counter clock sources:
*
* 0 = 8 MHz (reset state)
* 1 = Ext. TC Clock 1
* 2 = Ext. TX Clock 2
* 3 = Ext. Pacer Clock
* 4 = Previous TC out (chan 1 and 2 only)
* 5 = High-Speed Digital Input Sampling signal (chan 1 only)
*/
src = data[1];
switch (chan) {
case 0:
max_src = 3;
break;
case 1:
max_src = 5;
break;
case 2:
max_src = 4;
break;
default:
return -EINVAL;
}
if (src > max_src)
return -EINVAL;
devpriv->timer_clk_src[chan] = src;
writeb(src, dev->mmio + LAS0_8254_CLK_SEL(chan));
break;
case INSN_CONFIG_GET_CLOCK_SRC:
src = devpriv->timer_clk_src[chan];
data[1] = devpriv->timer_clk_src[chan];
data[2] = (src == 0) ? RTD_CLOCK_BASE : 0;
break;
default:
return -EINVAL;
}
return insn->n;
}
static void rtd_reset(struct comedi_device *dev)
{
struct rtd_private *devpriv = dev->private;
writel(0, dev->mmio + LAS0_BOARD_RESET);
usleep_range(100, 1000); /* needed? */
writel(0, devpriv->lcfg + PLX_REG_INTCSR);
writew(0, dev->mmio + LAS0_IT);
writew(~0, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
}
/*
* initialize board, per RTD spec
* also, initialize shadow registers
*/
static void rtd_init_board(struct comedi_device *dev)
{
rtd_reset(dev);
writel(0, dev->mmio + LAS0_OVERRUN);
writel(0, dev->mmio + LAS0_CGT_CLEAR);
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
writel(0, dev->mmio + LAS0_DAC_RESET(0));
writel(0, dev->mmio + LAS0_DAC_RESET(1));
/* clear digital IO fifo */
writew(0, dev->mmio + LAS0_DIO_STATUS);
/* TODO: set user out source ??? */
}
/* The RTD driver does this */
static void rtd_pci_latency_quirk(struct comedi_device *dev,
struct pci_dev *pcidev)
{
unsigned char pci_latency;
pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 32) {
dev_info(dev->class_dev,
"PCI latency changed from %d to %d\n",
pci_latency, 32);
pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, 32);
}
}
static int rtd_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct rtd_boardinfo *board = NULL;
struct rtd_private *devpriv;
struct comedi_subdevice *s;
int ret;
if (context < ARRAY_SIZE(rtd520_boards))
board = &rtd520_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->mmio = pci_ioremap_bar(pcidev, 2);
devpriv->las1 = pci_ioremap_bar(pcidev, 3);
devpriv->lcfg = pci_ioremap_bar(pcidev, 0);
if (!dev->mmio || !devpriv->las1 || !devpriv->lcfg)
return -ENOMEM;
rtd_pci_latency_quirk(dev, pcidev);
if (pcidev->irq) {
ret = request_irq(pcidev->irq, rtd_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0)
dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | SDF_DIFF;
s->n_chan = 16;
s->maxdata = 0x0fff;
s->range_table = board->ai_range;
s->len_chanlist = RTD_MAX_CHANLIST;
s->insn_read = rtd_ai_rinsn;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = rtd_ai_cmd;
s->do_cmdtest = rtd_ai_cmdtest;
s->cancel = rtd_ai_cancel;
}
s = &dev->subdevices[1];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = &rtd_ao_range;
s->insn_write = rtd_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[2];
/* digital i/o subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
/* we only support port 0 right now. Ignoring port 1 and user IO */
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = rtd_dio_insn_bits;
s->insn_config = rtd_dio_insn_config;
/* 8254 Timer/Counter subdevice */
s = &dev->subdevices[3];
dev->pacer = comedi_8254_mm_init(dev->mmio + LAS0_8254_TIMER_BASE,
RTD_CLOCK_BASE, I8254_IO8, 2);
if (!dev->pacer)
return -ENOMEM;
comedi_8254_subdevice_init(s, dev->pacer);
dev->pacer->insn_config = rtd_counter_insn_config;
rtd_init_board(dev);
ret = rtd520_probe_fifo_depth(dev);
if (ret < 0)
return ret;
devpriv->fifosz = ret;
if (dev->irq)
writel(PLX_INTCSR_PIEN | PLX_INTCSR_PLIEN,
devpriv->lcfg + PLX_REG_INTCSR);
return 0;
}
static void rtd_detach(struct comedi_device *dev)
{
struct rtd_private *devpriv = dev->private;
if (devpriv) {
/* Shut down any board ops by resetting it */
if (dev->mmio && devpriv->lcfg)
rtd_reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->mmio)
iounmap(dev->mmio);
if (devpriv->las1)
iounmap(devpriv->las1);
if (devpriv->lcfg)
iounmap(devpriv->lcfg);
}
comedi_pci_disable(dev);
}
static struct comedi_driver rtd520_driver = {
.driver_name = "rtd520",
.module = THIS_MODULE,
.auto_attach = rtd_auto_attach,
.detach = rtd_detach,
};
static int rtd520_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &rtd520_driver, id->driver_data);
}
static const struct pci_device_id rtd520_pci_table[] = {
{ PCI_VDEVICE(RTD, 0x7520), BOARD_DM7520 },
{ PCI_VDEVICE(RTD, 0x4520), BOARD_PCI4520 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, rtd520_pci_table);
static struct pci_driver rtd520_pci_driver = {
.name = "rtd520",
.id_table = rtd520_pci_table,
.probe = rtd520_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(rtd520_driver, rtd520_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/rtd520.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for NI 670x devices
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2001 David A. Schleef <[email protected]>
*/
/*
* Driver: ni_670x
* Description: National Instruments 670x
* Author: Bart Joris <[email protected]>
* Updated: Wed, 11 Dec 2002 18:25:35 -0800
* Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
* Status: unknown
*
* Commands are not supported.
*
* Manuals:
* 322110a.pdf PCI/PXI-6704 User Manual
* 322110b.pdf PCI/PXI-6703/6704 User Manual
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/comedi/comedi_pci.h>
#define AO_VALUE_OFFSET 0x00
#define AO_CHAN_OFFSET 0x0c
#define AO_STATUS_OFFSET 0x10
#define AO_CONTROL_OFFSET 0x10
#define DIO_PORT0_DIR_OFFSET 0x20
#define DIO_PORT0_DATA_OFFSET 0x24
#define DIO_PORT1_DIR_OFFSET 0x28
#define DIO_PORT1_DATA_OFFSET 0x2c
#define MISC_STATUS_OFFSET 0x14
#define MISC_CONTROL_OFFSET 0x14
enum ni_670x_boardid {
BOARD_PCI6703,
BOARD_PXI6704,
BOARD_PCI6704,
};
struct ni_670x_board {
const char *name;
unsigned short ao_chans;
};
static const struct ni_670x_board ni_670x_boards[] = {
[BOARD_PCI6703] = {
.name = "PCI-6703",
.ao_chans = 16,
},
[BOARD_PXI6704] = {
.name = "PXI-6704",
.ao_chans = 32,
},
[BOARD_PCI6704] = {
.name = "PCI-6704",
.ao_chans = 32,
},
};
struct ni_670x_private {
int boardtype;
int dio;
};
static int ni_670x_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
/*
* Channel number mapping:
*
* NI 6703/ NI 6704 | NI 6704 Only
* -------------------------------
* vch(0) : 0 | ich(16) : 1
* vch(1) : 2 | ich(17) : 3
* ... | ...
* vch(15) : 30 | ich(31) : 31
*/
for (i = 0; i < insn->n; i++) {
val = data[i];
/* First write in channel register which channel to use */
writel(((chan & 15) << 1) | ((chan & 16) >> 4),
dev->mmio + AO_CHAN_OFFSET);
/* write channel value */
writel(val, dev->mmio + AO_VALUE_OFFSET);
}
s->readback[chan] = val;
return insn->n;
}
static int ni_670x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
writel(s->state, dev->mmio + DIO_PORT0_DATA_OFFSET);
data[1] = readl(dev->mmio + DIO_PORT0_DATA_OFFSET);
return insn->n;
}
static int ni_670x_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
writel(s->io_bits, dev->mmio + DIO_PORT0_DIR_OFFSET);
return insn->n;
}
/* ripped from mite.h and mite_setup2() to avoid mite dependency */
#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
#define WENAB BIT(7) /* window enable */
static int ni_670x_mite_init(struct pci_dev *pcidev)
{
void __iomem *mite_base;
u32 main_phys_addr;
/* ioremap the MITE registers (BAR 0) temporarily */
mite_base = pci_ioremap_bar(pcidev, 0);
if (!mite_base)
return -ENOMEM;
/* set data window to main registers (BAR 1) */
main_phys_addr = pci_resource_start(pcidev, 1);
writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
/* finished with MITE registers */
iounmap(mite_base);
return 0;
}
static int ni_670x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_670x_board *board = NULL;
struct ni_670x_private *devpriv;
struct comedi_subdevice *s;
int ret;
int i;
if (context < ARRAY_SIZE(ni_670x_boards))
board = &ni_670x_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = ni_670x_mite_init(pcidev);
if (ret)
return ret;
dev->mmio = pci_ioremap_bar(pcidev, 1);
if (!dev->mmio)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->ao_chans;
s->maxdata = 0xffff;
if (s->n_chan == 32) {
const struct comedi_lrange **range_table_list;
range_table_list = kmalloc_array(32,
sizeof(struct comedi_lrange *),
GFP_KERNEL);
if (!range_table_list)
return -ENOMEM;
s->range_table_list = range_table_list;
for (i = 0; i < 16; i++) {
range_table_list[i] = &range_bipolar10;
range_table_list[16 + i] = &range_0_20mA;
}
} else {
s->range_table = &range_bipolar10;
}
s->insn_write = ni_670x_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[1];
/* digital i/o subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = ni_670x_dio_insn_bits;
s->insn_config = ni_670x_dio_insn_config;
/* Config of misc registers */
writel(0x10, dev->mmio + MISC_CONTROL_OFFSET);
/* Config of ao registers */
writel(0x00, dev->mmio + AO_CONTROL_OFFSET);
return 0;
}
static void ni_670x_detach(struct comedi_device *dev)
{
struct comedi_subdevice *s;
comedi_pci_detach(dev);
if (dev->n_subdevices) {
s = &dev->subdevices[0];
if (s)
kfree(s->range_table_list);
}
}
static struct comedi_driver ni_670x_driver = {
.driver_name = "ni_670x",
.module = THIS_MODULE,
.auto_attach = ni_670x_auto_attach,
.detach = ni_670x_detach,
};
static int ni_670x_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &ni_670x_driver, id->driver_data);
}
static const struct pci_device_id ni_670x_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1290), BOARD_PCI6704 },
{ PCI_VDEVICE(NI, 0x1920), BOARD_PXI6704 },
{ PCI_VDEVICE(NI, 0x2c90), BOARD_PCI6703 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ni_670x_pci_table);
static struct pci_driver ni_670x_pci_driver = {
.name = "ni_670x",
.id_table = ni_670x_pci_table,
.probe = ni_670x_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_670x_driver, ni_670x_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ni_670x.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Hardware driver for NI 660x devices
*/
/*
* Driver: ni_660x
* Description: National Instruments 660x counter/timer boards
* Devices: [National Instruments] PCI-6601 (ni_660x), PCI-6602, PXI-6602,
* PCI-6608, PXI-6608, PCI-6624, PXI-6624
* Author: J.P. Mellor <[email protected]>,
* [email protected],
* [email protected],
* [email protected],
* Frank Mori Hess <[email protected]>
* Updated: Mon, 16 Jan 2017 14:00:43 +0000
* Status: experimental
*
* Encoders work. PulseGeneration (both single pulse and pulse train)
* works. Buffered commands work for input but not output.
*
* References:
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include "mite.h"
#include "ni_tio.h"
#include "ni_routes.h"
/* See Register-Level Programmer Manual page 3.1 */
enum ni_660x_register {
/* see enum ni_gpct_register */
NI660X_STC_DIO_PARALLEL_INPUT = NITIO_NUM_REGS,
NI660X_STC_DIO_OUTPUT,
NI660X_STC_DIO_CONTROL,
NI660X_STC_DIO_SERIAL_INPUT,
NI660X_DIO32_INPUT,
NI660X_DIO32_OUTPUT,
NI660X_CLK_CFG,
NI660X_GLOBAL_INT_STATUS,
NI660X_DMA_CFG,
NI660X_GLOBAL_INT_CFG,
NI660X_IO_CFG_0_1,
NI660X_IO_CFG_2_3,
NI660X_IO_CFG_4_5,
NI660X_IO_CFG_6_7,
NI660X_IO_CFG_8_9,
NI660X_IO_CFG_10_11,
NI660X_IO_CFG_12_13,
NI660X_IO_CFG_14_15,
NI660X_IO_CFG_16_17,
NI660X_IO_CFG_18_19,
NI660X_IO_CFG_20_21,
NI660X_IO_CFG_22_23,
NI660X_IO_CFG_24_25,
NI660X_IO_CFG_26_27,
NI660X_IO_CFG_28_29,
NI660X_IO_CFG_30_31,
NI660X_IO_CFG_32_33,
NI660X_IO_CFG_34_35,
NI660X_IO_CFG_36_37,
NI660X_IO_CFG_38_39,
NI660X_NUM_REGS,
};
#define NI660X_CLK_CFG_COUNTER_SWAP BIT(21)
#define NI660X_GLOBAL_INT_COUNTER0 BIT(8)
#define NI660X_GLOBAL_INT_COUNTER1 BIT(9)
#define NI660X_GLOBAL_INT_COUNTER2 BIT(10)
#define NI660X_GLOBAL_INT_COUNTER3 BIT(11)
#define NI660X_GLOBAL_INT_CASCADE BIT(29)
#define NI660X_GLOBAL_INT_GLOBAL_POL BIT(30)
#define NI660X_GLOBAL_INT_GLOBAL BIT(31)
#define NI660X_DMA_CFG_SEL(_c, _s) (((_s) & 0x1f) << (8 * (_c)))
#define NI660X_DMA_CFG_SEL_MASK(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
#define NI660X_DMA_CFG_SEL_NONE(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
#define NI660X_DMA_CFG_RESET(_c) NI660X_DMA_CFG_SEL((_c), 0x80)
#define NI660X_IO_CFG(x) (NI660X_IO_CFG_0_1 + ((x) / 2))
#define NI660X_IO_CFG_OUT_SEL(_c, _s) (((_s) & 0x3) << (((_c) % 2) ? 0 : 8))
#define NI660X_IO_CFG_OUT_SEL_MASK(_c) NI660X_IO_CFG_OUT_SEL((_c), 0x3)
#define NI660X_IO_CFG_IN_SEL(_c, _s) (((_s) & 0x7) << (((_c) % 2) ? 4 : 12))
#define NI660X_IO_CFG_IN_SEL_MASK(_c) NI660X_IO_CFG_IN_SEL((_c), 0x7)
struct ni_660x_register_data {
int offset; /* Offset from base address from GPCT chip */
char size; /* 2 or 4 bytes */
};
static const struct ni_660x_register_data ni_660x_reg_data[NI660X_NUM_REGS] = {
[NITIO_G0_INT_ACK] = { 0x004, 2 }, /* write */
[NITIO_G0_STATUS] = { 0x004, 2 }, /* read */
[NITIO_G1_INT_ACK] = { 0x006, 2 }, /* write */
[NITIO_G1_STATUS] = { 0x006, 2 }, /* read */
[NITIO_G01_STATUS] = { 0x008, 2 }, /* read */
[NITIO_G0_CMD] = { 0x00c, 2 }, /* write */
[NI660X_STC_DIO_PARALLEL_INPUT] = { 0x00e, 2 }, /* read */
[NITIO_G1_CMD] = { 0x00e, 2 }, /* write */
[NITIO_G0_HW_SAVE] = { 0x010, 4 }, /* read */
[NITIO_G1_HW_SAVE] = { 0x014, 4 }, /* read */
[NI660X_STC_DIO_OUTPUT] = { 0x014, 2 }, /* write */
[NI660X_STC_DIO_CONTROL] = { 0x016, 2 }, /* write */
[NITIO_G0_SW_SAVE] = { 0x018, 4 }, /* read */
[NITIO_G1_SW_SAVE] = { 0x01c, 4 }, /* read */
[NITIO_G0_MODE] = { 0x034, 2 }, /* write */
[NITIO_G01_STATUS1] = { 0x036, 2 }, /* read */
[NITIO_G1_MODE] = { 0x036, 2 }, /* write */
[NI660X_STC_DIO_SERIAL_INPUT] = { 0x038, 2 }, /* read */
[NITIO_G0_LOADA] = { 0x038, 4 }, /* write */
[NITIO_G01_STATUS2] = { 0x03a, 2 }, /* read */
[NITIO_G0_LOADB] = { 0x03c, 4 }, /* write */
[NITIO_G1_LOADA] = { 0x040, 4 }, /* write */
[NITIO_G1_LOADB] = { 0x044, 4 }, /* write */
[NITIO_G0_INPUT_SEL] = { 0x048, 2 }, /* write */
[NITIO_G1_INPUT_SEL] = { 0x04a, 2 }, /* write */
[NITIO_G0_AUTO_INC] = { 0x088, 2 }, /* write */
[NITIO_G1_AUTO_INC] = { 0x08a, 2 }, /* write */
[NITIO_G01_RESET] = { 0x090, 2 }, /* write */
[NITIO_G0_INT_ENA] = { 0x092, 2 }, /* write */
[NITIO_G1_INT_ENA] = { 0x096, 2 }, /* write */
[NITIO_G0_CNT_MODE] = { 0x0b0, 2 }, /* write */
[NITIO_G1_CNT_MODE] = { 0x0b2, 2 }, /* write */
[NITIO_G0_GATE2] = { 0x0b4, 2 }, /* write */
[NITIO_G1_GATE2] = { 0x0b6, 2 }, /* write */
[NITIO_G0_DMA_CFG] = { 0x0b8, 2 }, /* write */
[NITIO_G0_DMA_STATUS] = { 0x0b8, 2 }, /* read */
[NITIO_G1_DMA_CFG] = { 0x0ba, 2 }, /* write */
[NITIO_G1_DMA_STATUS] = { 0x0ba, 2 }, /* read */
[NITIO_G2_INT_ACK] = { 0x104, 2 }, /* write */
[NITIO_G2_STATUS] = { 0x104, 2 }, /* read */
[NITIO_G3_INT_ACK] = { 0x106, 2 }, /* write */
[NITIO_G3_STATUS] = { 0x106, 2 }, /* read */
[NITIO_G23_STATUS] = { 0x108, 2 }, /* read */
[NITIO_G2_CMD] = { 0x10c, 2 }, /* write */
[NITIO_G3_CMD] = { 0x10e, 2 }, /* write */
[NITIO_G2_HW_SAVE] = { 0x110, 4 }, /* read */
[NITIO_G3_HW_SAVE] = { 0x114, 4 }, /* read */
[NITIO_G2_SW_SAVE] = { 0x118, 4 }, /* read */
[NITIO_G3_SW_SAVE] = { 0x11c, 4 }, /* read */
[NITIO_G2_MODE] = { 0x134, 2 }, /* write */
[NITIO_G23_STATUS1] = { 0x136, 2 }, /* read */
[NITIO_G3_MODE] = { 0x136, 2 }, /* write */
[NITIO_G2_LOADA] = { 0x138, 4 }, /* write */
[NITIO_G23_STATUS2] = { 0x13a, 2 }, /* read */
[NITIO_G2_LOADB] = { 0x13c, 4 }, /* write */
[NITIO_G3_LOADA] = { 0x140, 4 }, /* write */
[NITIO_G3_LOADB] = { 0x144, 4 }, /* write */
[NITIO_G2_INPUT_SEL] = { 0x148, 2 }, /* write */
[NITIO_G3_INPUT_SEL] = { 0x14a, 2 }, /* write */
[NITIO_G2_AUTO_INC] = { 0x188, 2 }, /* write */
[NITIO_G3_AUTO_INC] = { 0x18a, 2 }, /* write */
[NITIO_G23_RESET] = { 0x190, 2 }, /* write */
[NITIO_G2_INT_ENA] = { 0x192, 2 }, /* write */
[NITIO_G3_INT_ENA] = { 0x196, 2 }, /* write */
[NITIO_G2_CNT_MODE] = { 0x1b0, 2 }, /* write */
[NITIO_G3_CNT_MODE] = { 0x1b2, 2 }, /* write */
[NITIO_G2_GATE2] = { 0x1b4, 2 }, /* write */
[NITIO_G3_GATE2] = { 0x1b6, 2 }, /* write */
[NITIO_G2_DMA_CFG] = { 0x1b8, 2 }, /* write */
[NITIO_G2_DMA_STATUS] = { 0x1b8, 2 }, /* read */
[NITIO_G3_DMA_CFG] = { 0x1ba, 2 }, /* write */
[NITIO_G3_DMA_STATUS] = { 0x1ba, 2 }, /* read */
[NI660X_DIO32_INPUT] = { 0x414, 4 }, /* read */
[NI660X_DIO32_OUTPUT] = { 0x510, 4 }, /* write */
[NI660X_CLK_CFG] = { 0x73c, 4 }, /* write */
[NI660X_GLOBAL_INT_STATUS] = { 0x754, 4 }, /* read */
[NI660X_DMA_CFG] = { 0x76c, 4 }, /* write */
[NI660X_GLOBAL_INT_CFG] = { 0x770, 4 }, /* write */
[NI660X_IO_CFG_0_1] = { 0x77c, 2 }, /* read/write */
[NI660X_IO_CFG_2_3] = { 0x77e, 2 }, /* read/write */
[NI660X_IO_CFG_4_5] = { 0x780, 2 }, /* read/write */
[NI660X_IO_CFG_6_7] = { 0x782, 2 }, /* read/write */
[NI660X_IO_CFG_8_9] = { 0x784, 2 }, /* read/write */
[NI660X_IO_CFG_10_11] = { 0x786, 2 }, /* read/write */
[NI660X_IO_CFG_12_13] = { 0x788, 2 }, /* read/write */
[NI660X_IO_CFG_14_15] = { 0x78a, 2 }, /* read/write */
[NI660X_IO_CFG_16_17] = { 0x78c, 2 }, /* read/write */
[NI660X_IO_CFG_18_19] = { 0x78e, 2 }, /* read/write */
[NI660X_IO_CFG_20_21] = { 0x790, 2 }, /* read/write */
[NI660X_IO_CFG_22_23] = { 0x792, 2 }, /* read/write */
[NI660X_IO_CFG_24_25] = { 0x794, 2 }, /* read/write */
[NI660X_IO_CFG_26_27] = { 0x796, 2 }, /* read/write */
[NI660X_IO_CFG_28_29] = { 0x798, 2 }, /* read/write */
[NI660X_IO_CFG_30_31] = { 0x79a, 2 }, /* read/write */
[NI660X_IO_CFG_32_33] = { 0x79c, 2 }, /* read/write */
[NI660X_IO_CFG_34_35] = { 0x79e, 2 }, /* read/write */
[NI660X_IO_CFG_36_37] = { 0x7a0, 2 }, /* read/write */
[NI660X_IO_CFG_38_39] = { 0x7a2, 2 } /* read/write */
};
#define NI660X_CHIP_OFFSET 0x800
enum ni_660x_boardid {
BOARD_PCI6601,
BOARD_PCI6602,
BOARD_PXI6602,
BOARD_PCI6608,
BOARD_PXI6608,
BOARD_PCI6624,
BOARD_PXI6624
};
struct ni_660x_board {
const char *name;
unsigned int n_chips; /* total number of TIO chips */
};
static const struct ni_660x_board ni_660x_boards[] = {
[BOARD_PCI6601] = {
.name = "PCI-6601",
.n_chips = 1,
},
[BOARD_PCI6602] = {
.name = "PCI-6602",
.n_chips = 2,
},
[BOARD_PXI6602] = {
.name = "PXI-6602",
.n_chips = 2,
},
[BOARD_PCI6608] = {
.name = "PCI-6608",
.n_chips = 2,
},
[BOARD_PXI6608] = {
.name = "PXI-6608",
.n_chips = 2,
},
[BOARD_PCI6624] = {
.name = "PCI-6624",
.n_chips = 2,
},
[BOARD_PXI6624] = {
.name = "PXI-6624",
.n_chips = 2,
},
};
#define NI660X_NUM_PFI_CHANNELS 40
/* there are only up to 3 dma channels, but the register layout allows for 4 */
#define NI660X_MAX_DMA_CHANNEL 4
#define NI660X_COUNTERS_PER_CHIP 4
#define NI660X_MAX_CHIPS 2
#define NI660X_MAX_COUNTERS (NI660X_MAX_CHIPS * \
NI660X_COUNTERS_PER_CHIP)
struct ni_660x_private {
struct mite *mite;
struct ni_gpct_device *counter_dev;
struct mite_ring *ring[NI660X_MAX_CHIPS][NI660X_COUNTERS_PER_CHIP];
/* protects mite channel request/release */
spinlock_t mite_channel_lock;
/* prevents races between interrupt and comedi_poll */
spinlock_t interrupt_lock;
unsigned int dma_cfg[NI660X_MAX_CHIPS];
unsigned int io_cfg[NI660X_NUM_PFI_CHANNELS];
u64 io_dir;
struct ni_route_tables routing_tables;
};
static void ni_660x_write(struct comedi_device *dev, unsigned int chip,
unsigned int bits, unsigned int reg)
{
unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
ni_660x_reg_data[reg].offset;
if (ni_660x_reg_data[reg].size == 2)
writew(bits, dev->mmio + addr);
else
writel(bits, dev->mmio + addr);
}
static unsigned int ni_660x_read(struct comedi_device *dev,
unsigned int chip, unsigned int reg)
{
unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
ni_660x_reg_data[reg].offset;
if (ni_660x_reg_data[reg].size == 2)
return readw(dev->mmio + addr);
return readl(dev->mmio + addr);
}
static void ni_660x_gpct_write(struct ni_gpct *counter, unsigned int bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
ni_660x_write(dev, counter->chip_index, bits, reg);
}
static unsigned int ni_660x_gpct_read(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
return ni_660x_read(dev, counter->chip_index, reg);
}
static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
unsigned int chip = counter->chip_index;
devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL(mite_channel,
counter->counter_index);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
NI660X_DMA_CFG_RESET(mite_channel),
NI660X_DMA_CFG);
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
unsigned int chip = counter->chip_index;
devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
struct ni_gpct *counter,
enum comedi_io_direction direction)
{
struct ni_660x_private *devpriv = dev->private;
struct mite_ring *ring;
struct mite_channel *mite_chan;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
ring = devpriv->ring[counter->chip_index][counter->counter_index];
mite_chan = mite_request_channel(devpriv->mite, ring);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for counter\n");
return -EBUSY;
}
mite_chan->dir = direction;
ni_tio_set_mite_channel(counter, mite_chan);
ni_660x_set_dma_channel(dev, mite_chan->channel, counter);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static void ni_660x_release_mite_channel(struct comedi_device *dev,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (counter->mite_chan) {
struct mite_channel *mite_chan = counter->mite_chan;
ni_660x_unset_dma_channel(dev, mite_chan->channel, counter);
ni_tio_set_mite_channel(counter, NULL);
mite_release_channel(mite_chan);
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
int retval;
retval = ni_660x_request_mite_channel(dev, counter, COMEDI_INPUT);
if (retval) {
dev_err(dev->class_dev,
"no dma channel available for use by counter\n");
return retval;
}
ni_tio_acknowledge(counter);
return ni_tio_cmd(dev, s);
}
static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
int retval;
retval = ni_tio_cancel(counter);
ni_660x_release_mite_channel(dev, counter);
return retval;
}
static void set_tio_counterswap(struct comedi_device *dev, int chip)
{
unsigned int bits = 0;
/*
* See P. 3.5 of the Register-Level Programming manual.
* The CounterSwap bit has to be set on the second chip,
* otherwise it will try to use the same pins as the
* first chip.
*/
if (chip)
bits = NI660X_CLK_CFG_COUNTER_SWAP;
ni_660x_write(dev, chip, bits, NI660X_CLK_CFG);
}
static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
ni_tio_handle_interrupt(counter, s);
comedi_handle_events(dev, s);
}
static irqreturn_t ni_660x_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct ni_660x_private *devpriv = dev->private;
struct comedi_subdevice *s;
unsigned int i;
unsigned long flags;
if (!dev->attached)
return IRQ_NONE;
/* make sure dev->attached is checked before doing anything else */
smp_mb();
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
for (i = 0; i < dev->n_subdevices; ++i) {
s = &dev->subdevices[i];
if (s->type == COMEDI_SUBD_COUNTER)
ni_660x_handle_gpct_interrupt(dev, s);
}
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return IRQ_HANDLED;
}
static int ni_660x_input_poll(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_660x_private *devpriv = dev->private;
struct ni_gpct *counter = s->private;
unsigned long flags;
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
mite_sync_dma(counter->mite_chan, s);
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return comedi_buf_read_n_available(s);
}
static int ni_660x_buf_change(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_660x_private *devpriv = dev->private;
struct ni_gpct *counter = s->private;
struct mite_ring *ring;
int ret;
ring = devpriv->ring[counter->chip_index][counter->counter_index];
ret = mite_buf_change(ring, s);
if (ret < 0)
return ret;
return 0;
}
static int ni_660x_allocate_private(struct comedi_device *dev)
{
struct ni_660x_private *devpriv;
unsigned int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
spin_lock_init(&devpriv->mite_channel_lock);
spin_lock_init(&devpriv->interrupt_lock);
for (i = 0; i < NI660X_NUM_PFI_CHANNELS; ++i)
devpriv->io_cfg[i] = NI_660X_PFI_OUTPUT_COUNTER;
return 0;
}
static int ni_660x_alloc_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
unsigned int i;
unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j) {
devpriv->ring[i][j] = mite_alloc_ring(devpriv->mite);
if (!devpriv->ring[i][j])
return -ENOMEM;
}
}
return 0;
}
static void ni_660x_free_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
unsigned int i;
unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j)
mite_free_ring(devpriv->ring[i][j]);
}
}
static int ni_660x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int shift = CR_CHAN(insn->chanspec);
unsigned int mask = data[0] << shift;
unsigned int bits = data[1] << shift;
/*
* There are 40 channels in this subdevice but only 32 are usable
* as DIO. The shift adjusts the mask/bits to account for the base
* channel in insn->chanspec. The state update can then be handled
* normally for the 32 usable channels.
*/
if (mask) {
s->state &= ~mask;
s->state |= (bits & mask);
ni_660x_write(dev, 0, s->state, NI660X_DIO32_OUTPUT);
}
/*
* Return the input channels, shifted back to account for the base
* channel.
*/
data[1] = ni_660x_read(dev, 0, NI660X_DIO32_INPUT) >> shift;
return insn->n;
}
static void ni_660x_select_pfi_output(struct comedi_device *dev,
unsigned int chan, unsigned int out_sel)
{
const struct ni_660x_board *board = dev->board_ptr;
unsigned int active_chip = 0;
unsigned int idle_chip = 0;
unsigned int bits;
if (chan >= NI_PFI(0))
/* allow new and old names of pfi channels to work. */
chan -= NI_PFI(0);
if (board->n_chips > 1) {
if (out_sel == NI_660X_PFI_OUTPUT_COUNTER &&
chan >= 8 && chan <= 23) {
/* counters 4-7 pfi channels */
active_chip = 1;
idle_chip = 0;
} else {
/* counters 0-3 pfi channels */
active_chip = 0;
idle_chip = 1;
}
}
if (idle_chip != active_chip) {
/* set the pfi channel to high-z on the inactive chip */
bits = ni_660x_read(dev, idle_chip, NI660X_IO_CFG(chan));
bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
bits |= NI660X_IO_CFG_OUT_SEL(chan, 0); /* high-z */
ni_660x_write(dev, idle_chip, bits, NI660X_IO_CFG(chan));
}
/* set the pfi channel output on the active chip */
bits = ni_660x_read(dev, active_chip, NI660X_IO_CFG(chan));
bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
bits |= NI660X_IO_CFG_OUT_SEL(chan, out_sel);
ni_660x_write(dev, active_chip, bits, NI660X_IO_CFG(chan));
}
static void ni_660x_set_pfi_direction(struct comedi_device *dev,
unsigned int chan,
unsigned int direction)
{
struct ni_660x_private *devpriv = dev->private;
u64 bit;
if (chan >= NI_PFI(0))
/* allow new and old names of pfi channels to work. */
chan -= NI_PFI(0);
bit = 1ULL << chan;
if (direction == COMEDI_OUTPUT) {
devpriv->io_dir |= bit;
/* reset the output to currently assigned output value */
ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
} else {
devpriv->io_dir &= ~bit;
/* set pin to high-z; do not change currently assigned route */
ni_660x_select_pfi_output(dev, chan, 0);
}
}
static unsigned int ni_660x_get_pfi_direction(struct comedi_device *dev,
unsigned int chan)
{
struct ni_660x_private *devpriv = dev->private;
u64 bit;
if (chan >= NI_PFI(0))
/* allow new and old names of pfi channels to work. */
chan -= NI_PFI(0);
bit = 1ULL << chan;
return (devpriv->io_dir & bit) ? COMEDI_OUTPUT : COMEDI_INPUT;
}
static int ni_660x_set_pfi_routing(struct comedi_device *dev,
unsigned int chan, unsigned int source)
{
struct ni_660x_private *devpriv = dev->private;
if (chan >= NI_PFI(0))
/* allow new and old names of pfi channels to work. */
chan -= NI_PFI(0);
switch (source) {
case NI_660X_PFI_OUTPUT_COUNTER:
if (chan < 8)
return -EINVAL;
break;
case NI_660X_PFI_OUTPUT_DIO:
if (chan > 31)
return -EINVAL;
break;
default:
return -EINVAL;
}
devpriv->io_cfg[chan] = source;
if (ni_660x_get_pfi_direction(dev, chan) == COMEDI_OUTPUT)
ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
return 0;
}
static int ni_660x_get_pfi_routing(struct comedi_device *dev, unsigned int chan)
{
struct ni_660x_private *devpriv = dev->private;
if (chan >= NI_PFI(0))
/* allow new and old names of pfi channels to work. */
chan -= NI_PFI(0);
return devpriv->io_cfg[chan];
}
static void ni_660x_set_pfi_filter(struct comedi_device *dev,
unsigned int chan, unsigned int value)
{
unsigned int val;
if (chan >= NI_PFI(0))
/* allow new and old names of pfi channels to work. */
chan -= NI_PFI(0);
val = ni_660x_read(dev, 0, NI660X_IO_CFG(chan));
val &= ~NI660X_IO_CFG_IN_SEL_MASK(chan);
val |= NI660X_IO_CFG_IN_SEL(chan, value);
ni_660x_write(dev, 0, val, NI660X_IO_CFG(chan));
}
static int ni_660x_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int ret;
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
ni_660x_set_pfi_direction(dev, chan, COMEDI_OUTPUT);
break;
case INSN_CONFIG_DIO_INPUT:
ni_660x_set_pfi_direction(dev, chan, COMEDI_INPUT);
break;
case INSN_CONFIG_DIO_QUERY:
data[1] = ni_660x_get_pfi_direction(dev, chan);
break;
case INSN_CONFIG_SET_ROUTING:
ret = ni_660x_set_pfi_routing(dev, chan, data[1]);
if (ret)
return ret;
break;
case INSN_CONFIG_GET_ROUTING:
data[1] = ni_660x_get_pfi_routing(dev, chan);
break;
case INSN_CONFIG_FILTER:
ni_660x_set_pfi_filter(dev, chan, data[1]);
break;
default:
return -EINVAL;
}
return insn->n;
}
static unsigned int _ni_get_valid_routes(struct comedi_device *dev,
unsigned int n_pairs,
unsigned int *pair_data)
{
struct ni_660x_private *devpriv = dev->private;
return ni_get_valid_routes(&devpriv->routing_tables, n_pairs,
pair_data);
}
/*
* Retrieves the current source of the output selector for the given
* destination. If the terminal for the destination is not already configured
* as an output, this function returns -EINVAL as error.
*
* Return: The register value of the destination output selector;
* -EINVAL if terminal is not configured for output.
*/
static inline int get_output_select_source(int dest, struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
int reg = -1;
if (channel_is_pfi(dest)) {
if (ni_660x_get_pfi_direction(dev, dest) == COMEDI_OUTPUT)
reg = ni_660x_get_pfi_routing(dev, dest);
} else if (channel_is_rtsi(dest)) {
dev_dbg(dev->class_dev,
"%s: unhandled rtsi destination (%d) queried\n",
__func__, dest);
/*
* The following can be enabled when RTSI routing info is
* determined (not currently documented):
* if (ni_get_rtsi_direction(dev, dest) == COMEDI_OUTPUT) {
* reg = ni_get_rtsi_routing(dev, dest);
* if (reg == NI_RTSI_OUTPUT_RGOUT0) {
* dest = NI_RGOUT0; ** prepare for lookup below **
* reg = get_rgout0_reg(dev);
* } else if (reg >= NI_RTSI_OUTPUT_RTSI_BRD(0) &&
* reg <= NI_RTSI_OUTPUT_RTSI_BRD(3)) {
* const int i = reg - NI_RTSI_OUTPUT_RTSI_BRD(0);
* dest = NI_RTSI_BRD(i); ** prepare for lookup **
* reg = get_ith_rtsi_brd_reg(i, dev);
* }
* }
*/
} else if (channel_is_ctr(dest)) {
reg = ni_tio_get_routing(devpriv->counter_dev, dest);
} else {
dev_dbg(dev->class_dev,
"%s: unhandled destination (%d) queried\n",
__func__, dest);
}
if (reg >= 0)
return ni_find_route_source(CR_CHAN(reg), dest,
&devpriv->routing_tables);
return -EINVAL;
}
/*
* Test a route:
*
* Return: -1 if not connectible;
* 0 if connectible and not connected;
* 1 if connectible and connected.
*/
static inline int test_route(unsigned int src, unsigned int dest,
struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
s8 reg = ni_route_to_register(CR_CHAN(src), dest,
&devpriv->routing_tables);
if (reg < 0)
return -1;
if (get_output_select_source(dest, dev) != CR_CHAN(src))
return 0;
return 1;
}
/* Connect the actual route. */
static inline int connect_route(unsigned int src, unsigned int dest,
struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
s8 reg = ni_route_to_register(CR_CHAN(src), dest,
&devpriv->routing_tables);
s8 current_src;
if (reg < 0)
/* route is not valid */
return -EINVAL;
current_src = get_output_select_source(dest, dev);
if (current_src == CR_CHAN(src))
return -EALREADY;
if (current_src >= 0)
/* destination mux is already busy. complain, don't overwrite */
return -EBUSY;
/* The route is valid and available. Now connect... */
if (channel_is_pfi(CR_CHAN(dest))) {
/*
* set routing and then direction so that the output does not
* first get generated with the wrong pin
*/
ni_660x_set_pfi_routing(dev, dest, reg);
ni_660x_set_pfi_direction(dev, dest, COMEDI_OUTPUT);
} else if (channel_is_rtsi(CR_CHAN(dest))) {
dev_dbg(dev->class_dev, "%s: unhandled rtsi destination (%d)\n",
__func__, dest);
return -EINVAL;
/*
* The following can be enabled when RTSI routing info is
* determined (not currently documented):
* if (reg == NI_RTSI_OUTPUT_RGOUT0) {
* int ret = incr_rgout0_src_use(src, dev);
* if (ret < 0)
* return ret;
* } else if (ni_rtsi_route_requires_mux(reg)) {
* ** Attempt to allocate and route (src->brd) **
* int brd = incr_rtsi_brd_src_use(src, dev);
* if (brd < 0)
* return brd;
* ** Now lookup the register value for (brd->dest) **
* reg = ni_lookup_route_register(brd, CR_CHAN(dest),
* &devpriv->routing_tables);
* }
* ni_set_rtsi_direction(dev, dest, COMEDI_OUTPUT);
* ni_set_rtsi_routing(dev, dest, reg);
*/
} else if (channel_is_ctr(CR_CHAN(dest))) {
/*
* we are adding back the channel modifier info to set
* invert/edge info passed by the user
*/
ni_tio_set_routing(devpriv->counter_dev, dest,
reg | (src & ~CR_CHAN(-1)));
} else {
return -EINVAL;
}
return 0;
}
static inline int disconnect_route(unsigned int src, unsigned int dest,
struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
s8 reg = ni_route_to_register(CR_CHAN(src), CR_CHAN(dest),
&devpriv->routing_tables);
if (reg < 0)
/* route is not valid */
return -EINVAL;
if (get_output_select_source(dest, dev) != CR_CHAN(src))
/* cannot disconnect something not connected */
return -EINVAL;
/* The route is valid and is connected. Now disconnect... */
if (channel_is_pfi(CR_CHAN(dest))) {
unsigned int source = ((CR_CHAN(dest) - NI_PFI(0)) < 8)
? NI_660X_PFI_OUTPUT_DIO
: NI_660X_PFI_OUTPUT_COUNTER;
/* set the pfi to high impedance, and disconnect */
ni_660x_set_pfi_direction(dev, dest, COMEDI_INPUT);
ni_660x_set_pfi_routing(dev, dest, source);
} else if (channel_is_rtsi(CR_CHAN(dest))) {
dev_dbg(dev->class_dev, "%s: unhandled rtsi destination (%d)\n",
__func__, dest);
return -EINVAL;
/*
* The following can be enabled when RTSI routing info is
* determined (not currently documented):
* if (reg == NI_RTSI_OUTPUT_RGOUT0) {
* int ret = decr_rgout0_src_use(src, dev);
* if (ret < 0)
* return ret;
* } else if (ni_rtsi_route_requires_mux(reg)) {
* ** find which RTSI_BRD line is source for rtsi pin **
* int brd = ni_find_route_source(
* ni_get_rtsi_routing(dev, dest), CR_CHAN(dest),
* &devpriv->routing_tables);
* if (brd < 0)
* return brd;
* ** decrement/disconnect RTSI_BRD line from source **
* decr_rtsi_brd_src_use(src, brd, dev);
* }
* ** set rtsi output selector to default state **
* reg = default_rtsi_routing[CR_CHAN(dest) - TRIGGER_LINE(0)];
* ni_set_rtsi_direction(dev, dest, COMEDI_INPUT);
* ni_set_rtsi_routing(dev, dest, reg);
*/
} else if (channel_is_ctr(CR_CHAN(dest))) {
ni_tio_unset_routing(devpriv->counter_dev, dest);
} else {
return -EINVAL;
}
return 0;
}
static int ni_global_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
switch (data[0]) {
case INSN_DEVICE_CONFIG_TEST_ROUTE:
data[0] = test_route(data[1], data[2], dev);
return 2;
case INSN_DEVICE_CONFIG_CONNECT_ROUTE:
return connect_route(data[1], data[2], dev);
case INSN_DEVICE_CONFIG_DISCONNECT_ROUTE:
return disconnect_route(data[1], data[2], dev);
/*
* This case is already handled one level up.
* case INSN_DEVICE_CONFIG_GET_ROUTES:
*/
default:
return -EINVAL;
}
return 1;
}
static void ni_660x_init_tio_chips(struct comedi_device *dev,
unsigned int n_chips)
{
struct ni_660x_private *devpriv = dev->private;
unsigned int chip;
unsigned int chan;
/*
* We use the ioconfig registers to control dio direction, so zero
* output enables in stc dio control reg.
*/
ni_660x_write(dev, 0, 0, NI660X_STC_DIO_CONTROL);
for (chip = 0; chip < n_chips; ++chip) {
/* init dma configuration register */
devpriv->dma_cfg[chip] = 0;
for (chan = 0; chan < NI660X_MAX_DMA_CHANNEL; ++chan)
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(chan);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip],
NI660X_DMA_CFG);
/* init ioconfig registers */
for (chan = 0; chan < NI660X_NUM_PFI_CHANNELS; ++chan)
ni_660x_write(dev, chip, 0, NI660X_IO_CFG(chan));
}
}
static int ni_660x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_660x_board *board = NULL;
struct ni_660x_private *devpriv;
struct comedi_subdevice *s;
struct ni_gpct_device *gpct_dev;
unsigned int n_counters;
int subdev;
int ret;
unsigned int i;
unsigned int global_interrupt_config_bits;
if (context < ARRAY_SIZE(ni_660x_boards))
board = &ni_660x_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
ret = ni_660x_allocate_private(dev);
if (ret < 0)
return ret;
devpriv = dev->private;
devpriv->mite = mite_attach(dev, true); /* use win1 */
if (!devpriv->mite)
return -ENOMEM;
ret = ni_660x_alloc_mite_rings(dev);
if (ret < 0)
return ret;
ni_660x_init_tio_chips(dev, board->n_chips);
/* prepare the device for globally-named routes. */
if (ni_assign_device_routes("ni_660x", board->name, NULL,
&devpriv->routing_tables) < 0) {
dev_warn(dev->class_dev, "%s: %s device has no signal routing table.\n",
__func__, board->name);
dev_warn(dev->class_dev, "%s: High level NI signal names will not be available for this %s board.\n",
__func__, board->name);
} else {
/*
* only(?) assign insn_device_config if we have global names for
* this device.
*/
dev->insn_device_config = ni_global_insn_config;
dev->get_valid_routes = _ni_get_valid_routes;
}
n_counters = board->n_chips * NI660X_COUNTERS_PER_CHIP;
gpct_dev = ni_gpct_device_construct(dev,
ni_660x_gpct_write,
ni_660x_gpct_read,
ni_gpct_variant_660x,
n_counters,
NI660X_COUNTERS_PER_CHIP,
&devpriv->routing_tables);
if (!gpct_dev)
return -ENOMEM;
devpriv->counter_dev = gpct_dev;
ret = comedi_alloc_subdevices(dev, 2 + NI660X_MAX_COUNTERS);
if (ret)
return ret;
subdev = 0;
s = &dev->subdevices[subdev++];
/* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */
s->type = COMEDI_SUBD_UNUSED;
/*
* Digital I/O subdevice
*
* There are 40 channels but only the first 32 can be digital I/Os.
* The last 8 are dedicated to counters 0 and 1.
*
* Counter 0-3 signals are from the first TIO chip.
* Counter 4-7 signals are from the second TIO chip.
*
* Comedi External
* PFI Chan DIO Chan Counter Signal
* ------- -------- --------------
* 0 0
* 1 1
* 2 2
* 3 3
* 4 4
* 5 5
* 6 6
* 7 7
* 8 8 CTR 7 OUT
* 9 9 CTR 7 AUX
* 10 10 CTR 7 GATE
* 11 11 CTR 7 SOURCE
* 12 12 CTR 6 OUT
* 13 13 CTR 6 AUX
* 14 14 CTR 6 GATE
* 15 15 CTR 6 SOURCE
* 16 16 CTR 5 OUT
* 17 17 CTR 5 AUX
* 18 18 CTR 5 GATE
* 19 19 CTR 5 SOURCE
* 20 20 CTR 4 OUT
* 21 21 CTR 4 AUX
* 22 22 CTR 4 GATE
* 23 23 CTR 4 SOURCE
* 24 24 CTR 3 OUT
* 25 25 CTR 3 AUX
* 26 26 CTR 3 GATE
* 27 27 CTR 3 SOURCE
* 28 28 CTR 2 OUT
* 29 29 CTR 2 AUX
* 30 30 CTR 2 GATE
* 31 31 CTR 2 SOURCE
* 32 CTR 1 OUT
* 33 CTR 1 AUX
* 34 CTR 1 GATE
* 35 CTR 1 SOURCE
* 36 CTR 0 OUT
* 37 CTR 0 AUX
* 38 CTR 0 GATE
* 39 CTR 0 SOURCE
*/
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = NI660X_NUM_PFI_CHANNELS;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = ni_660x_dio_insn_bits;
s->insn_config = ni_660x_dio_insn_config;
/*
* Default the DIO channels as:
* chan 0-7: DIO inputs
* chan 8-39: counter signal inputs
*/
for (i = 0; i < s->n_chan; ++i) {
unsigned int source = (i < 8) ? NI_660X_PFI_OUTPUT_DIO
: NI_660X_PFI_OUTPUT_COUNTER;
ni_660x_set_pfi_routing(dev, i, source);
ni_660x_set_pfi_direction(dev, i, COMEDI_INPUT);/* high-z */
}
/* Counter subdevices (4 NI TIO General Purpose Counters per chip) */
for (i = 0; i < NI660X_MAX_COUNTERS; ++i) {
s = &dev->subdevices[subdev++];
if (i < n_counters) {
struct ni_gpct *counter = &gpct_dev->counters[i];
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
SDF_LSAMPL | SDF_CMD_READ;
s->n_chan = 3;
s->maxdata = 0xffffffff;
s->insn_read = ni_tio_insn_read;
s->insn_write = ni_tio_insn_write;
s->insn_config = ni_tio_insn_config;
s->len_chanlist = 1;
s->do_cmd = ni_660x_cmd;
s->do_cmdtest = ni_tio_cmdtest;
s->cancel = ni_660x_cancel;
s->poll = ni_660x_input_poll;
s->buf_change = ni_660x_buf_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
s->private = counter;
ni_tio_init_counter(counter);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
}
/*
* To be safe, set counterswap bits on tio chips after all the counter
* outputs have been set to high impedance mode.
*/
for (i = 0; i < board->n_chips; ++i)
set_tio_counterswap(dev, i);
ret = request_irq(pcidev->irq, ni_660x_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret < 0) {
dev_warn(dev->class_dev, " irq not available\n");
return ret;
}
dev->irq = pcidev->irq;
global_interrupt_config_bits = NI660X_GLOBAL_INT_GLOBAL;
if (board->n_chips > 1)
global_interrupt_config_bits |= NI660X_GLOBAL_INT_CASCADE;
ni_660x_write(dev, 0, global_interrupt_config_bits,
NI660X_GLOBAL_INT_CFG);
return 0;
}
static void ni_660x_detach(struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
if (dev->irq) {
ni_660x_write(dev, 0, 0, NI660X_GLOBAL_INT_CFG);
free_irq(dev->irq, dev);
}
if (devpriv) {
ni_gpct_device_destroy(devpriv->counter_dev);
ni_660x_free_mite_rings(dev);
mite_detach(devpriv->mite);
}
if (dev->mmio)
iounmap(dev->mmio);
comedi_pci_disable(dev);
}
static struct comedi_driver ni_660x_driver = {
.driver_name = "ni_660x",
.module = THIS_MODULE,
.auto_attach = ni_660x_auto_attach,
.detach = ni_660x_detach,
};
static int ni_660x_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &ni_660x_driver, id->driver_data);
}
static const struct pci_device_id ni_660x_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1310), BOARD_PCI6602 },
{ PCI_VDEVICE(NI, 0x1360), BOARD_PXI6602 },
{ PCI_VDEVICE(NI, 0x2c60), BOARD_PCI6601 },
{ PCI_VDEVICE(NI, 0x2db0), BOARD_PCI6608 },
{ PCI_VDEVICE(NI, 0x2cc0), BOARD_PXI6608 },
{ PCI_VDEVICE(NI, 0x1e30), BOARD_PCI6624 },
{ PCI_VDEVICE(NI, 0x1e40), BOARD_PXI6624 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ni_660x_pci_table);
static struct pci_driver ni_660x_pci_driver = {
.name = "ni_660x",
.id_table = ni_660x_pci_table,
.probe = ni_660x_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for NI 660x counter/timer boards");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ni_660x.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aio_aio12_8.c
* Driver for Access I/O Products PC-104 AIO12-8 Analog I/O Board
* Copyright (C) 2006 C&C Technologies, Inc.
*/
/*
* Driver: aio_aio12_8
* Description: Access I/O Products PC-104 AIO12-8 Analog I/O Board
* Author: Pablo Mejia <[email protected]>
* Devices: [Access I/O] PC-104 AIO12-8 (aio_aio12_8),
* [Access I/O] PC-104 AI12-8 (aio_ai12_8),
* [Access I/O] PC-104 AO12-4 (aio_ao12_4)
* Status: experimental
*
* Configuration Options:
* [0] - I/O port base address
*
* Notes:
* Only synchronous operations are supported.
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8255.h>
#include <linux/comedi/comedi_8254.h>
/*
* Register map
*/
#define AIO12_8_STATUS_REG 0x00
#define AIO12_8_STATUS_ADC_EOC BIT(7)
#define AIO12_8_STATUS_PORT_C_COS BIT(6)
#define AIO12_8_STATUS_IRQ_ENA BIT(2)
#define AIO12_8_INTERRUPT_REG 0x01
#define AIO12_8_INTERRUPT_ADC BIT(7)
#define AIO12_8_INTERRUPT_COS BIT(6)
#define AIO12_8_INTERRUPT_COUNTER1 BIT(5)
#define AIO12_8_INTERRUPT_PORT_C3 BIT(4)
#define AIO12_8_INTERRUPT_PORT_C0 BIT(3)
#define AIO12_8_INTERRUPT_ENA BIT(2)
#define AIO12_8_ADC_REG 0x02
#define AIO12_8_ADC_MODE(x) (((x) & 0x3) << 6)
#define AIO12_8_ADC_MODE_NORMAL AIO12_8_ADC_MODE(0)
#define AIO12_8_ADC_MODE_INT_CLK AIO12_8_ADC_MODE(1)
#define AIO12_8_ADC_MODE_STANDBY AIO12_8_ADC_MODE(2)
#define AIO12_8_ADC_MODE_POWERDOWN AIO12_8_ADC_MODE(3)
#define AIO12_8_ADC_ACQ(x) (((x) & 0x1) << 5)
#define AIO12_8_ADC_ACQ_3USEC AIO12_8_ADC_ACQ(0)
#define AIO12_8_ADC_ACQ_PROGRAM AIO12_8_ADC_ACQ(1)
#define AIO12_8_ADC_RANGE(x) ((x) << 3)
#define AIO12_8_ADC_CHAN(x) ((x) << 0)
#define AIO12_8_DAC_REG(x) (0x04 + (x) * 2)
#define AIO12_8_8254_BASE_REG 0x0c
#define AIO12_8_8255_BASE_REG 0x10
#define AIO12_8_DIO_CONTROL_REG 0x14
#define AIO12_8_DIO_CONTROL_TST BIT(0)
#define AIO12_8_ADC_TRIGGER_REG 0x15
#define AIO12_8_ADC_TRIGGER_RANGE(x) ((x) << 3)
#define AIO12_8_ADC_TRIGGER_CHAN(x) ((x) << 0)
#define AIO12_8_TRIGGER_REG 0x16
#define AIO12_8_TRIGGER_ADTRIG BIT(1)
#define AIO12_8_TRIGGER_DACTRIG BIT(0)
#define AIO12_8_COS_REG 0x17
#define AIO12_8_DAC_ENABLE_REG 0x18
#define AIO12_8_DAC_ENABLE_REF_ENA BIT(0)
static const struct comedi_lrange aio_aio12_8_range = {
4, {
UNI_RANGE(5),
BIP_RANGE(5),
UNI_RANGE(10),
BIP_RANGE(10)
}
};
struct aio12_8_boardtype {
const char *name;
unsigned int has_ai:1;
unsigned int has_ao:1;
};
static const struct aio12_8_boardtype board_types[] = {
{
.name = "aio_aio12_8",
.has_ai = 1,
.has_ao = 1,
}, {
.name = "aio_ai12_8",
.has_ai = 1,
}, {
.name = "aio_ao12_4",
.has_ao = 1,
},
};
static int aio_aio12_8_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + AIO12_8_STATUS_REG);
if (status & AIO12_8_STATUS_ADC_EOC)
return 0;
return -EBUSY;
}
static int aio_aio12_8_ai_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val;
unsigned char control;
int ret;
int i;
/*
* Setup the control byte for internal 2MHz clock, 3uS conversion,
* at the desired range of the requested channel.
*/
control = AIO12_8_ADC_MODE_NORMAL | AIO12_8_ADC_ACQ_3USEC |
AIO12_8_ADC_RANGE(range) | AIO12_8_ADC_CHAN(chan);
/* Read status to clear EOC latch */
inb(dev->iobase + AIO12_8_STATUS_REG);
for (i = 0; i < insn->n; i++) {
/* Setup and start conversion */
outb(control, dev->iobase + AIO12_8_ADC_REG);
/* Wait for conversion to complete */
ret = comedi_timeout(dev, s, insn, aio_aio12_8_ai_eoc, 0);
if (ret)
return ret;
val = inw(dev->iobase + AIO12_8_ADC_REG) & s->maxdata;
/* munge bipolar 2's complement data to offset binary */
if (comedi_range_is_bipolar(s, range))
val = comedi_offset_munge(s, val);
data[i] = val;
}
return insn->n;
}
static int aio_aio12_8_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
/* enable DACs */
outb(AIO12_8_DAC_ENABLE_REF_ENA, dev->iobase + AIO12_8_DAC_ENABLE_REG);
for (i = 0; i < insn->n; i++) {
val = data[i];
outw(val, dev->iobase + AIO12_8_DAC_REG(chan));
}
s->readback[chan] = val;
return insn->n;
}
static int aio_aio12_8_counter_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
switch (data[0]) {
case INSN_CONFIG_GET_CLOCK_SRC:
/*
* Channels 0 and 2 have external clock sources.
* Channel 1 has a fixed 1 MHz clock source.
*/
data[0] = 0;
data[1] = (chan == 1) ? I8254_OSC_BASE_1MHZ : 0;
break;
default:
return -EINVAL;
}
return insn->n;
}
static int aio_aio12_8_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
const struct aio12_8_boardtype *board = dev->board_ptr;
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 32);
if (ret)
return ret;
dev->pacer = comedi_8254_init(dev->iobase + AIO12_8_8254_BASE_REG,
0, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
if (board->has_ai) {
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 8;
s->maxdata = 0x0fff;
s->range_table = &aio_aio12_8_range;
s->insn_read = aio_aio12_8_ai_read;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Analog Output subdevice */
s = &dev->subdevices[1];
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = 4;
s->maxdata = 0x0fff;
s->range_table = &aio_aio12_8_range;
s->insn_write = aio_aio12_8_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Digital I/O subdevice (8255) */
s = &dev->subdevices[2];
ret = subdev_8255_init(dev, s, NULL, AIO12_8_8255_BASE_REG);
if (ret)
return ret;
/* Counter subdevice (8254) */
s = &dev->subdevices[3];
comedi_8254_subdevice_init(s, dev->pacer);
dev->pacer->insn_config = aio_aio12_8_counter_insn_config;
return 0;
}
static struct comedi_driver aio_aio12_8_driver = {
.driver_name = "aio_aio12_8",
.module = THIS_MODULE,
.attach = aio_aio12_8_attach,
.detach = comedi_legacy_detach,
.board_name = &board_types[0].name,
.num_names = ARRAY_SIZE(board_types),
.offset = sizeof(struct aio12_8_boardtype),
};
module_comedi_driver(aio_aio12_8_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for Access I/O AIO12-8 Analog I/O Board");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/aio_aio12_8.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* s526.c
* Sensoray s526 Comedi driver
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: s526
* Description: Sensoray 526 driver
* Devices: [Sensoray] 526 (s526)
* Author: Richie
* Everett Wang <[email protected]>
* Updated: Thu, 14 Sep. 2006
* Status: experimental
*
* Encoder works
* Analog input works
* Analog output works
* PWM output works
* Commands are not supported yet.
*
* Configuration Options:
* [0] - I/O port base address
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
/*
* Register I/O map
*/
#define S526_TIMER_REG 0x00
#define S526_TIMER_LOAD(x) (((x) & 0xff) << 8)
#define S526_TIMER_MODE ((x) << 1)
#define S526_TIMER_MANUAL S526_TIMER_MODE(0)
#define S526_TIMER_AUTO S526_TIMER_MODE(1)
#define S526_TIMER_RESTART BIT(0)
#define S526_WDOG_REG 0x02
#define S526_WDOG_INVERTED BIT(4)
#define S526_WDOG_ENA BIT(3)
#define S526_WDOG_INTERVAL(x) (((x) & 0x7) << 0)
#define S526_AO_CTRL_REG 0x04
#define S526_AO_CTRL_RESET BIT(3)
#define S526_AO_CTRL_CHAN(x) (((x) & 0x3) << 1)
#define S526_AO_CTRL_START BIT(0)
#define S526_AI_CTRL_REG 0x06
#define S526_AI_CTRL_DELAY BIT(15)
#define S526_AI_CTRL_CONV(x) (1 << (5 + ((x) & 0x9)))
#define S526_AI_CTRL_READ(x) (((x) & 0xf) << 1)
#define S526_AI_CTRL_START BIT(0)
#define S526_AO_REG 0x08
#define S526_AI_REG 0x08
#define S526_DIO_CTRL_REG 0x0a
#define S526_DIO_CTRL_DIO3_NEG BIT(15) /* irq on DIO3 neg/pos edge */
#define S526_DIO_CTRL_DIO2_NEG BIT(14) /* irq on DIO2 neg/pos edge */
#define S526_DIO_CTRL_DIO1_NEG BIT(13) /* irq on DIO1 neg/pos edge */
#define S526_DIO_CTRL_DIO0_NEG BIT(12) /* irq on DIO0 neg/pos edge */
#define S526_DIO_CTRL_GRP2_OUT BIT(11)
#define S526_DIO_CTRL_GRP1_OUT BIT(10)
#define S526_DIO_CTRL_GRP2_NEG BIT(8) /* irq on DIO[4-7] neg/pos edge */
#define S526_INT_ENA_REG 0x0c
#define S526_INT_STATUS_REG 0x0e
#define S526_INT_DIO(x) BIT(8 + ((x) & 0x7))
#define S526_INT_EEPROM BIT(7) /* status only */
#define S526_INT_CNTR(x) BIT(3 + (3 - ((x) & 0x3)))
#define S526_INT_AI BIT(2)
#define S526_INT_AO BIT(1)
#define S526_INT_TIMER BIT(0)
#define S526_MISC_REG 0x10
#define S526_MISC_LED_OFF BIT(0)
#define S526_GPCT_LSB_REG(x) (0x12 + ((x) * 8))
#define S526_GPCT_MSB_REG(x) (0x14 + ((x) * 8))
#define S526_GPCT_MODE_REG(x) (0x16 + ((x) * 8))
#define S526_GPCT_MODE_COUT_SRC(x) ((x) << 0)
#define S526_GPCT_MODE_COUT_SRC_MASK S526_GPCT_MODE_COUT_SRC(0x1)
#define S526_GPCT_MODE_COUT_SRC_RCAP S526_GPCT_MODE_COUT_SRC(0)
#define S526_GPCT_MODE_COUT_SRC_RTGL S526_GPCT_MODE_COUT_SRC(1)
#define S526_GPCT_MODE_COUT_POL(x) ((x) << 1)
#define S526_GPCT_MODE_COUT_POL_MASK S526_GPCT_MODE_COUT_POL(0x1)
#define S526_GPCT_MODE_COUT_POL_NORM S526_GPCT_MODE_COUT_POL(0)
#define S526_GPCT_MODE_COUT_POL_INV S526_GPCT_MODE_COUT_POL(1)
#define S526_GPCT_MODE_AUTOLOAD(x) ((x) << 2)
#define S526_GPCT_MODE_AUTOLOAD_MASK S526_GPCT_MODE_AUTOLOAD(0x7)
#define S526_GPCT_MODE_AUTOLOAD_NONE S526_GPCT_MODE_AUTOLOAD(0)
/* these 3 bits can be OR'ed */
#define S526_GPCT_MODE_AUTOLOAD_RO S526_GPCT_MODE_AUTOLOAD(0x1)
#define S526_GPCT_MODE_AUTOLOAD_IXFALL S526_GPCT_MODE_AUTOLOAD(0x2)
#define S526_GPCT_MODE_AUTOLOAD_IXRISE S526_GPCT_MODE_AUTOLOAD(0x4)
#define S526_GPCT_MODE_HWCTEN_SRC(x) ((x) << 5)
#define S526_GPCT_MODE_HWCTEN_SRC_MASK S526_GPCT_MODE_HWCTEN_SRC(0x3)
#define S526_GPCT_MODE_HWCTEN_SRC_CEN S526_GPCT_MODE_HWCTEN_SRC(0)
#define S526_GPCT_MODE_HWCTEN_SRC_IX S526_GPCT_MODE_HWCTEN_SRC(1)
#define S526_GPCT_MODE_HWCTEN_SRC_IXRF S526_GPCT_MODE_HWCTEN_SRC(2)
#define S526_GPCT_MODE_HWCTEN_SRC_NRCAP S526_GPCT_MODE_HWCTEN_SRC(3)
#define S526_GPCT_MODE_CTEN_CTRL(x) ((x) << 7)
#define S526_GPCT_MODE_CTEN_CTRL_MASK S526_GPCT_MODE_CTEN_CTRL(0x3)
#define S526_GPCT_MODE_CTEN_CTRL_DIS S526_GPCT_MODE_CTEN_CTRL(0)
#define S526_GPCT_MODE_CTEN_CTRL_ENA S526_GPCT_MODE_CTEN_CTRL(1)
#define S526_GPCT_MODE_CTEN_CTRL_HW S526_GPCT_MODE_CTEN_CTRL(2)
#define S526_GPCT_MODE_CTEN_CTRL_INVHW S526_GPCT_MODE_CTEN_CTRL(3)
#define S526_GPCT_MODE_CLK_SRC(x) ((x) << 9)
#define S526_GPCT_MODE_CLK_SRC_MASK S526_GPCT_MODE_CLK_SRC(0x3)
/* if count direction control set to quadrature */
#define S526_GPCT_MODE_CLK_SRC_QUADX1 S526_GPCT_MODE_CLK_SRC(0)
#define S526_GPCT_MODE_CLK_SRC_QUADX2 S526_GPCT_MODE_CLK_SRC(1)
#define S526_GPCT_MODE_CLK_SRC_QUADX4 S526_GPCT_MODE_CLK_SRC(2)
#define S526_GPCT_MODE_CLK_SRC_QUADX4_ S526_GPCT_MODE_CLK_SRC(3)
/* if count direction control set to software control */
#define S526_GPCT_MODE_CLK_SRC_ARISE S526_GPCT_MODE_CLK_SRC(0)
#define S526_GPCT_MODE_CLK_SRC_AFALL S526_GPCT_MODE_CLK_SRC(1)
#define S526_GPCT_MODE_CLK_SRC_INT S526_GPCT_MODE_CLK_SRC(2)
#define S526_GPCT_MODE_CLK_SRC_INTHALF S526_GPCT_MODE_CLK_SRC(3)
#define S526_GPCT_MODE_CT_DIR(x) ((x) << 11)
#define S526_GPCT_MODE_CT_DIR_MASK S526_GPCT_MODE_CT_DIR(0x1)
/* if count direction control set to software control */
#define S526_GPCT_MODE_CT_DIR_UP S526_GPCT_MODE_CT_DIR(0)
#define S526_GPCT_MODE_CT_DIR_DOWN S526_GPCT_MODE_CT_DIR(1)
#define S526_GPCT_MODE_CTDIR_CTRL(x) ((x) << 12)
#define S526_GPCT_MODE_CTDIR_CTRL_MASK S526_GPCT_MODE_CTDIR_CTRL(0x1)
#define S526_GPCT_MODE_CTDIR_CTRL_QUAD S526_GPCT_MODE_CTDIR_CTRL(0)
#define S526_GPCT_MODE_CTDIR_CTRL_SOFT S526_GPCT_MODE_CTDIR_CTRL(1)
#define S526_GPCT_MODE_LATCH_CTRL(x) ((x) << 13)
#define S526_GPCT_MODE_LATCH_CTRL_MASK S526_GPCT_MODE_LATCH_CTRL(0x1)
#define S526_GPCT_MODE_LATCH_CTRL_READ S526_GPCT_MODE_LATCH_CTRL(0)
#define S526_GPCT_MODE_LATCH_CTRL_EVENT S526_GPCT_MODE_LATCH_CTRL(1)
#define S526_GPCT_MODE_PR_SELECT(x) ((x) << 14)
#define S526_GPCT_MODE_PR_SELECT_MASK S526_GPCT_MODE_PR_SELECT(0x1)
#define S526_GPCT_MODE_PR_SELECT_PR0 S526_GPCT_MODE_PR_SELECT(0)
#define S526_GPCT_MODE_PR_SELECT_PR1 S526_GPCT_MODE_PR_SELECT(1)
/* Control/Status - R = readable, W = writeable, C = write 1 to clear */
#define S526_GPCT_CTRL_REG(x) (0x18 + ((x) * 8))
#define S526_GPCT_CTRL_EV_STATUS(x) ((x) << 0) /* RC */
#define S526_GPCT_CTRL_EV_STATUS_MASK S526_GPCT_EV_STATUS(0xf)
#define S526_GPCT_CTRL_EV_STATUS_NONE S526_GPCT_EV_STATUS(0)
/* these 4 bits can be OR'ed */
#define S526_GPCT_CTRL_EV_STATUS_ECAP S526_GPCT_EV_STATUS(0x1)
#define S526_GPCT_CTRL_EV_STATUS_ICAPN S526_GPCT_EV_STATUS(0x2)
#define S526_GPCT_CTRL_EV_STATUS_ICAPP S526_GPCT_EV_STATUS(0x4)
#define S526_GPCT_CTRL_EV_STATUS_RCAP S526_GPCT_EV_STATUS(0x8)
#define S526_GPCT_CTRL_COUT_STATUS BIT(4) /* R */
#define S526_GPCT_CTRL_INDEX_STATUS BIT(5) /* R */
#define S525_GPCT_CTRL_INTEN(x) ((x) << 6) /* W */
#define S525_GPCT_CTRL_INTEN_MASK S526_GPCT_CTRL_INTEN(0xf)
#define S525_GPCT_CTRL_INTEN_NONE S526_GPCT_CTRL_INTEN(0)
/* these 4 bits can be OR'ed */
#define S525_GPCT_CTRL_INTEN_ERROR S526_GPCT_CTRL_INTEN(0x1)
#define S525_GPCT_CTRL_INTEN_IXFALL S526_GPCT_CTRL_INTEN(0x2)
#define S525_GPCT_CTRL_INTEN_IXRISE S526_GPCT_CTRL_INTEN(0x4)
#define S525_GPCT_CTRL_INTEN_RO S526_GPCT_CTRL_INTEN(0x8)
#define S525_GPCT_CTRL_LATCH_SEL(x) ((x) << 10) /* W */
#define S525_GPCT_CTRL_LATCH_SEL_MASK S526_GPCT_CTRL_LATCH_SEL(0x7)
#define S525_GPCT_CTRL_LATCH_SEL_NONE S526_GPCT_CTRL_LATCH_SEL(0)
/* these 3 bits can be OR'ed */
#define S525_GPCT_CTRL_LATCH_SEL_IXFALL S526_GPCT_CTRL_LATCH_SEL(0x1)
#define S525_GPCT_CTRL_LATCH_SEL_IXRISE S526_GPCT_CTRL_LATCH_SEL(0x2)
#define S525_GPCT_CTRL_LATCH_SEL_ITIMER S526_GPCT_CTRL_LATCH_SEL(0x4)
#define S525_GPCT_CTRL_CT_ARM BIT(13) /* W */
#define S525_GPCT_CTRL_CT_LOAD BIT(14) /* W */
#define S526_GPCT_CTRL_CT_RESET BIT(15) /* W */
#define S526_EEPROM_DATA_REG 0x32
#define S526_EEPROM_CTRL_REG 0x34
#define S526_EEPROM_CTRL_ADDR(x) (((x) & 0x3f) << 3)
#define S526_EEPROM_CTRL(x) (((x) & 0x3) << 1)
#define S526_EEPROM_CTRL_READ S526_EEPROM_CTRL(2)
#define S526_EEPROM_CTRL_START BIT(0)
struct s526_private {
unsigned int gpct_config[4];
unsigned short ai_ctrl;
};
static void s526_gpct_write(struct comedi_device *dev,
unsigned int chan, unsigned int val)
{
/* write high word then low word */
outw((val >> 16) & 0xffff, dev->iobase + S526_GPCT_MSB_REG(chan));
outw(val & 0xffff, dev->iobase + S526_GPCT_LSB_REG(chan));
}
static unsigned int s526_gpct_read(struct comedi_device *dev,
unsigned int chan)
{
unsigned int val;
/* read the low word then high word */
val = inw(dev->iobase + S526_GPCT_LSB_REG(chan)) & 0xffff;
val |= (inw(dev->iobase + S526_GPCT_MSB_REG(chan)) & 0xff) << 16;
return val;
}
static int s526_gpct_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++)
data[i] = s526_gpct_read(dev, chan);
return insn->n;
}
static int s526_gpct_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val;
/*
* Check what type of Counter the user requested
* data[0] contains the Application type
*/
switch (data[0]) {
case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
/*
* data[0]: Application Type
* data[1]: Counter Mode Register Value
* data[2]: Pre-load Register Value
* data[3]: Conter Control Register
*/
devpriv->gpct_config[chan] = data[0];
#if 1
/* Set Counter Mode Register */
val = data[1] & 0xffff;
outw(val, dev->iobase + S526_GPCT_MODE_REG(chan));
/* Reset the counter if it is software preload */
if ((val & S526_GPCT_MODE_AUTOLOAD_MASK) ==
S526_GPCT_MODE_AUTOLOAD_NONE) {
/* Reset the counter */
outw(S526_GPCT_CTRL_CT_RESET,
dev->iobase + S526_GPCT_CTRL_REG(chan));
/*
* Load the counter from PR0
* outw(S526_GPCT_CTRL_CT_LOAD,
* dev->iobase + S526_GPCT_CTRL_REG(chan));
*/
}
#else
val = S526_GPCT_MODE_CTDIR_CTRL_QUAD;
/* data[1] contains GPCT_X1, GPCT_X2 or GPCT_X4 */
if (data[1] == GPCT_X2)
val |= S526_GPCT_MODE_CLK_SRC_QUADX2;
else if (data[1] == GPCT_X4)
val |= S526_GPCT_MODE_CLK_SRC_QUADX4;
else
val |= S526_GPCT_MODE_CLK_SRC_QUADX1;
/* When to take into account the indexpulse: */
/*
* if (data[2] == GPCT_IndexPhaseLowLow) {
* } else if (data[2] == GPCT_IndexPhaseLowHigh) {
* } else if (data[2] == GPCT_IndexPhaseHighLow) {
* } else if (data[2] == GPCT_IndexPhaseHighHigh) {
* }
*/
/* Take into account the index pulse? */
if (data[3] == GPCT_RESET_COUNTER_ON_INDEX) {
/* Auto load with INDEX^ */
val |= S526_GPCT_MODE_AUTOLOAD_IXRISE;
}
/* Set Counter Mode Register */
val = data[1] & 0xffff;
outw(val, dev->iobase + S526_GPCT_MODE_REG(chan));
/* Load the pre-load register */
s526_gpct_write(dev, chan, data[2]);
/* Write the Counter Control Register */
if (data[3])
outw(data[3] & 0xffff,
dev->iobase + S526_GPCT_CTRL_REG(chan));
/* Reset the counter if it is software preload */
if ((val & S526_GPCT_MODE_AUTOLOAD_MASK) ==
S526_GPCT_MODE_AUTOLOAD_NONE) {
/* Reset the counter */
outw(S526_GPCT_CTRL_CT_RESET,
dev->iobase + S526_GPCT_CTRL_REG(chan));
/* Load the counter from PR0 */
outw(S526_GPCT_CTRL_CT_LOAD,
dev->iobase + S526_GPCT_CTRL_REG(chan));
}
#endif
break;
case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
/*
* data[0]: Application Type
* data[1]: Counter Mode Register Value
* data[2]: Pre-load Register 0 Value
* data[3]: Pre-load Register 1 Value
* data[4]: Conter Control Register
*/
devpriv->gpct_config[chan] = data[0];
/* Set Counter Mode Register */
val = data[1] & 0xffff;
/* Select PR0 */
val &= ~S526_GPCT_MODE_PR_SELECT_MASK;
val |= S526_GPCT_MODE_PR_SELECT_PR0;
outw(val, dev->iobase + S526_GPCT_MODE_REG(chan));
/* Load the pre-load register 0 */
s526_gpct_write(dev, chan, data[2]);
/* Set Counter Mode Register */
val = data[1] & 0xffff;
/* Select PR1 */
val &= ~S526_GPCT_MODE_PR_SELECT_MASK;
val |= S526_GPCT_MODE_PR_SELECT_PR1;
outw(val, dev->iobase + S526_GPCT_MODE_REG(chan));
/* Load the pre-load register 1 */
s526_gpct_write(dev, chan, data[3]);
/* Write the Counter Control Register */
if (data[4]) {
val = data[4] & 0xffff;
outw(val, dev->iobase + S526_GPCT_CTRL_REG(chan));
}
break;
case INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
/*
* data[0]: Application Type
* data[1]: Counter Mode Register Value
* data[2]: Pre-load Register 0 Value
* data[3]: Pre-load Register 1 Value
* data[4]: Conter Control Register
*/
devpriv->gpct_config[chan] = data[0];
/* Set Counter Mode Register */
val = data[1] & 0xffff;
/* Select PR0 */
val &= ~S526_GPCT_MODE_PR_SELECT_MASK;
val |= S526_GPCT_MODE_PR_SELECT_PR0;
outw(val, dev->iobase + S526_GPCT_MODE_REG(chan));
/* Load the pre-load register 0 */
s526_gpct_write(dev, chan, data[2]);
/* Set Counter Mode Register */
val = data[1] & 0xffff;
/* Select PR1 */
val &= ~S526_GPCT_MODE_PR_SELECT_MASK;
val |= S526_GPCT_MODE_PR_SELECT_PR1;
outw(val, dev->iobase + S526_GPCT_MODE_REG(chan));
/* Load the pre-load register 1 */
s526_gpct_write(dev, chan, data[3]);
/* Write the Counter Control Register */
if (data[4]) {
val = data[4] & 0xffff;
outw(val, dev->iobase + S526_GPCT_CTRL_REG(chan));
}
break;
default:
return -EINVAL;
}
return insn->n;
}
static int s526_gpct_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
inw(dev->iobase + S526_GPCT_MODE_REG(chan)); /* Is this required? */
/* Check what Application of Counter this channel is configured for */
switch (devpriv->gpct_config[chan]) {
case INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
/*
* data[0] contains the PULSE_WIDTH
* data[1] contains the PULSE_PERIOD
* @pre PULSE_PERIOD > PULSE_WIDTH > 0
* The above periods must be expressed as a multiple of the
* pulse frequency on the selected source
*/
if ((data[1] <= data[0]) || !data[0])
return -EINVAL;
/* to write the PULSE_WIDTH */
fallthrough;
case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
s526_gpct_write(dev, chan, data[0]);
break;
default:
return -EINVAL;
}
return insn->n;
}
static int s526_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + S526_INT_STATUS_REG);
if (status & context) {
/* we got our eoc event, clear it */
outw(context, dev->iobase + S526_INT_STATUS_REG);
return 0;
}
return -EBUSY;
}
static int s526_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int ctrl;
unsigned int val;
int ret;
int i;
ctrl = S526_AI_CTRL_CONV(chan) | S526_AI_CTRL_READ(chan) |
S526_AI_CTRL_START;
if (ctrl != devpriv->ai_ctrl) {
/*
* The multiplexor needs to change, enable the 15us
* delay for the first sample.
*/
devpriv->ai_ctrl = ctrl;
ctrl |= S526_AI_CTRL_DELAY;
}
for (i = 0; i < insn->n; i++) {
/* trigger conversion */
outw(ctrl, dev->iobase + S526_AI_CTRL_REG);
ctrl &= ~S526_AI_CTRL_DELAY;
/* wait for conversion to end */
ret = comedi_timeout(dev, s, insn, s526_eoc, S526_INT_AI);
if (ret)
return ret;
val = inw(dev->iobase + S526_AI_REG);
data[i] = comedi_offset_munge(s, val);
}
return insn->n;
}
static int s526_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int ctrl = S526_AO_CTRL_CHAN(chan);
unsigned int val = s->readback[chan];
int ret;
int i;
outw(ctrl, dev->iobase + S526_AO_CTRL_REG);
ctrl |= S526_AO_CTRL_START;
for (i = 0; i < insn->n; i++) {
val = data[i];
outw(val, dev->iobase + S526_AO_REG);
outw(ctrl, dev->iobase + S526_AO_CTRL_REG);
/* wait for conversion to end */
ret = comedi_timeout(dev, s, insn, s526_eoc, S526_INT_AO);
if (ret)
return ret;
}
s->readback[chan] = val;
return insn->n;
}
static int s526_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + S526_DIO_CTRL_REG);
data[1] = inw(dev->iobase + S526_DIO_CTRL_REG) & 0xff;
return insn->n;
}
static int s526_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
int ret;
/*
* Digital I/O can be configured as inputs or outputs in
* groups of 4; DIO group 1 (DIO0-3) and DIO group 2 (DIO4-7).
*/
if (chan < 4)
mask = 0x0f;
else
mask = 0xf0;
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
if (ret)
return ret;
if (s->io_bits & 0x0f)
s->state |= S526_DIO_CTRL_GRP1_OUT;
else
s->state &= ~S526_DIO_CTRL_GRP1_OUT;
if (s->io_bits & 0xf0)
s->state |= S526_DIO_CTRL_GRP2_OUT;
else
s->state &= ~S526_DIO_CTRL_GRP2_OUT;
outw(s->state, dev->iobase + S526_DIO_CTRL_REG);
return insn->n;
}
static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct s526_private *devpriv;
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x40);
if (ret)
return ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
/* General-Purpose Counter/Timer (GPCT) */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
s->n_chan = 4;
s->maxdata = 0x00ffffff;
s->insn_read = s526_gpct_rinsn;
s->insn_config = s526_gpct_insn_config;
s->insn_write = s526_gpct_winsn;
/*
* Analog Input subdevice
* channels 0 to 7 are the regular differential inputs
* channel 8 is "reference 0" (+10V)
* channel 9 is "reference 1" (0V)
*/
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = 10;
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->len_chanlist = 16;
s->insn_read = s526_ai_insn_read;
/* Analog Output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = s526_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
/* Digital I/O subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = s526_dio_insn_bits;
s->insn_config = s526_dio_insn_config;
return 0;
}
static struct comedi_driver s526_driver = {
.driver_name = "s526",
.module = THIS_MODULE,
.attach = s526_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(s526_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/s526.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* ssv_dnp.c
* generic comedi driver for SSV Embedded Systems' DIL/Net-PCs
* Copyright (C) 2001 Robert Schwebel <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: ssv_dnp
* Description: SSV Embedded Systems DIL/Net-PC
* Author: Robert Schwebel <[email protected]>
* Devices: [SSV Embedded Systems] DIL/Net-PC 1486 (dnp-1486)
* Status: unknown
*/
/* include files ----------------------------------------------------------- */
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
/* Some global definitions: the registers of the DNP ----------------------- */
/* */
/* For port A and B the mode register has bits corresponding to the output */
/* pins, where Bit-N = 0 -> input, Bit-N = 1 -> output. Note that bits */
/* 4 to 7 correspond to pin 0..3 for port C data register. Ensure that bits */
/* 0..3 remain unchanged! For details about Port C Mode Register see */
/* the remarks in dnp_insn_config() below. */
#define CSCIR 0x22 /* Chip Setup and Control Index Register */
#define CSCDR 0x23 /* Chip Setup and Control Data Register */
#define PAMR 0xa5 /* Port A Mode Register */
#define PADR 0xa9 /* Port A Data Register */
#define PBMR 0xa4 /* Port B Mode Register */
#define PBDR 0xa8 /* Port B Data Register */
#define PCMR 0xa3 /* Port C Mode Register */
#define PCDR 0xa7 /* Port C Data Register */
static int dnp_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int mask;
unsigned int val;
/*
* Ports A and B are straight forward: each bit corresponds to an
* output pin with the same order. Port C is different: bits 0...3
* correspond to bits 4...7 of the output register (PCDR).
*/
mask = comedi_dio_update_state(s, data);
if (mask) {
outb(PADR, CSCIR);
outb(s->state & 0xff, CSCDR);
outb(PBDR, CSCIR);
outb((s->state >> 8) & 0xff, CSCDR);
outb(PCDR, CSCIR);
val = inb(CSCDR) & 0x0f;
outb(((s->state >> 12) & 0xf0) | val, CSCDR);
}
outb(PADR, CSCIR);
val = inb(CSCDR);
outb(PBDR, CSCIR);
val |= (inb(CSCDR) << 8);
outb(PCDR, CSCIR);
val |= ((inb(CSCDR) & 0xf0) << 12);
data[1] = val;
return insn->n;
}
static int dnp_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
unsigned int val;
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
if (chan < 8) { /* Port A */
mask = 1 << chan;
outb(PAMR, CSCIR);
} else if (chan < 16) { /* Port B */
mask = 1 << (chan - 8);
outb(PBMR, CSCIR);
} else { /* Port C */
/*
* We have to pay attention with port C.
* This is the meaning of PCMR:
* Bit in PCMR: 7 6 5 4 3 2 1 0
* Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch
*
* Multiplication by 2 brings bits into correct position
* for PCMR!
*/
mask = 1 << ((chan - 16) * 2);
outb(PCMR, CSCIR);
}
val = inb(CSCDR);
if (data[0] == COMEDI_OUTPUT)
val |= mask;
else
val &= ~mask;
outb(val, CSCDR);
return insn->n;
}
static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret;
/*
* We use I/O ports 0x22, 0x23 and 0xa3-0xa9, which are always
* allocated for the primary 8259, so we don't need to allocate
* them ourselves.
*/
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
s = &dev->subdevices[0];
/* digital i/o subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 20;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = dnp_dio_insn_bits;
s->insn_config = dnp_dio_insn_config;
/* configure all ports as input (default) */
outb(PAMR, CSCIR);
outb(0x00, CSCDR);
outb(PBMR, CSCIR);
outb(0x00, CSCDR);
outb(PCMR, CSCIR);
outb((inb(CSCDR) & 0xAA), CSCDR);
return 0;
}
static void dnp_detach(struct comedi_device *dev)
{
outb(PAMR, CSCIR);
outb(0x00, CSCDR);
outb(PBMR, CSCIR);
outb(0x00, CSCDR);
outb(PCMR, CSCIR);
outb((inb(CSCDR) & 0xAA), CSCDR);
}
static struct comedi_driver dnp_driver = {
.driver_name = "dnp-1486",
.module = THIS_MODULE,
.attach = dnp_attach,
.detach = dnp_detach,
};
module_comedi_driver(dnp_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ssv_dnp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI ISA DMA support functions
* Copyright (c) 2014 H Hartley Sweeten <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/isa-dma.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_isadma.h>
/**
* comedi_isadma_program - program and enable an ISA DMA transfer
* @desc: the ISA DMA cookie to program and enable
*/
void comedi_isadma_program(struct comedi_isadma_desc *desc)
{
unsigned long flags;
flags = claim_dma_lock();
clear_dma_ff(desc->chan);
set_dma_mode(desc->chan, desc->mode);
set_dma_addr(desc->chan, desc->hw_addr);
set_dma_count(desc->chan, desc->size);
enable_dma(desc->chan);
release_dma_lock(flags);
}
EXPORT_SYMBOL_GPL(comedi_isadma_program);
/**
* comedi_isadma_disable - disable the ISA DMA channel
* @dma_chan: the DMA channel to disable
*
* Returns the residue (remaining bytes) left in the DMA transfer.
*/
unsigned int comedi_isadma_disable(unsigned int dma_chan)
{
unsigned long flags;
unsigned int residue;
flags = claim_dma_lock();
disable_dma(dma_chan);
residue = get_dma_residue(dma_chan);
release_dma_lock(flags);
return residue;
}
EXPORT_SYMBOL_GPL(comedi_isadma_disable);
/**
* comedi_isadma_disable_on_sample - disable the ISA DMA channel
* @dma_chan: the DMA channel to disable
* @size: the sample size (in bytes)
*
* Returns the residue (remaining bytes) left in the DMA transfer.
*/
unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
unsigned int size)
{
int stalled = 0;
unsigned long flags;
unsigned int residue;
unsigned int new_residue;
residue = comedi_isadma_disable(dma_chan);
while (residue % size) {
/* residue is a partial sample, enable DMA to allow more data */
flags = claim_dma_lock();
enable_dma(dma_chan);
release_dma_lock(flags);
udelay(2);
new_residue = comedi_isadma_disable(dma_chan);
/* is DMA stalled? */
if (new_residue == residue) {
stalled++;
if (stalled > 10)
break;
} else {
residue = new_residue;
stalled = 0;
}
}
return residue;
}
EXPORT_SYMBOL_GPL(comedi_isadma_disable_on_sample);
/**
* comedi_isadma_poll - poll the current DMA transfer
* @dma: the ISA DMA to poll
*
* Returns the position (in bytes) of the current DMA transfer.
*/
unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
{
struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
unsigned long flags;
unsigned int result;
unsigned int result1;
flags = claim_dma_lock();
clear_dma_ff(desc->chan);
if (!isa_dma_bridge_buggy)
disable_dma(desc->chan);
result = get_dma_residue(desc->chan);
/*
* Read the counter again and choose higher value in order to
* avoid reading during counter lower byte roll over if the
* isa_dma_bridge_buggy is set.
*/
result1 = get_dma_residue(desc->chan);
if (!isa_dma_bridge_buggy)
enable_dma(desc->chan);
release_dma_lock(flags);
if (result < result1)
result = result1;
if (result >= desc->size || result == 0)
return 0;
return desc->size - result;
}
EXPORT_SYMBOL_GPL(comedi_isadma_poll);
/**
* comedi_isadma_set_mode - set the ISA DMA transfer direction
* @desc: the ISA DMA cookie to set
* @dma_dir: the DMA direction
*/
void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir)
{
desc->mode = (dma_dir == COMEDI_ISADMA_READ) ? DMA_MODE_READ
: DMA_MODE_WRITE;
}
EXPORT_SYMBOL_GPL(comedi_isadma_set_mode);
/**
* comedi_isadma_alloc - allocate and initialize the ISA DMA
* @dev: comedi_device struct
* @n_desc: the number of cookies to allocate
* @dma_chan1: DMA channel for the first cookie
* @dma_chan2: DMA channel for the second cookie
* @maxsize: the size of the buffer to allocate for each cookie
* @dma_dir: the DMA direction
*
* Returns the allocated and initialized ISA DMA or NULL if anything fails.
*/
struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
int n_desc, unsigned int dma_chan1,
unsigned int dma_chan2,
unsigned int maxsize, char dma_dir)
{
struct comedi_isadma *dma = NULL;
struct comedi_isadma_desc *desc;
unsigned int dma_chans[2];
int i;
if (n_desc < 1 || n_desc > 2)
goto no_dma;
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
goto no_dma;
desc = kcalloc(n_desc, sizeof(*desc), GFP_KERNEL);
if (!desc)
goto no_dma;
dma->desc = desc;
dma->n_desc = n_desc;
if (dev->hw_dev) {
dma->dev = dev->hw_dev;
} else {
/* Fall back to using the "class" device. */
if (!dev->class_dev)
goto no_dma;
/* Need 24-bit mask for ISA DMA. */
if (dma_coerce_mask_and_coherent(dev->class_dev,
DMA_BIT_MASK(24))) {
goto no_dma;
}
dma->dev = dev->class_dev;
}
dma_chans[0] = dma_chan1;
if (dma_chan2 == 0 || dma_chan2 == dma_chan1)
dma_chans[1] = dma_chan1;
else
dma_chans[1] = dma_chan2;
if (request_dma(dma_chans[0], dev->board_name))
goto no_dma;
dma->chan = dma_chans[0];
if (dma_chans[1] != dma_chans[0]) {
if (request_dma(dma_chans[1], dev->board_name))
goto no_dma;
}
dma->chan2 = dma_chans[1];
for (i = 0; i < n_desc; i++) {
desc = &dma->desc[i];
desc->chan = dma_chans[i];
desc->maxsize = maxsize;
desc->virt_addr = dma_alloc_coherent(dma->dev, desc->maxsize,
&desc->hw_addr,
GFP_KERNEL);
if (!desc->virt_addr)
goto no_dma;
comedi_isadma_set_mode(desc, dma_dir);
}
return dma;
no_dma:
comedi_isadma_free(dma);
return NULL;
}
EXPORT_SYMBOL_GPL(comedi_isadma_alloc);
/**
* comedi_isadma_free - free the ISA DMA
* @dma: the ISA DMA to free
*/
void comedi_isadma_free(struct comedi_isadma *dma)
{
struct comedi_isadma_desc *desc;
int i;
if (!dma)
return;
if (dma->desc) {
for (i = 0; i < dma->n_desc; i++) {
desc = &dma->desc[i];
if (desc->virt_addr)
dma_free_coherent(dma->dev, desc->maxsize,
desc->virt_addr,
desc->hw_addr);
}
kfree(dma->desc);
}
if (dma->chan2 && dma->chan2 != dma->chan)
free_dma(dma->chan2);
if (dma->chan)
free_dma(dma->chan);
kfree(dma);
}
EXPORT_SYMBOL_GPL(comedi_isadma_free);
static int __init comedi_isadma_init(void)
{
return 0;
}
module_init(comedi_isadma_init);
static void __exit comedi_isadma_exit(void)
{
}
module_exit(comedi_isadma_exit);
MODULE_AUTHOR("H Hartley Sweeten <[email protected]>");
MODULE_DESCRIPTION("Comedi ISA DMA support");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/comedi_isadma.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* das08_isa.c
* comedi driver for DAS08 ISA/PC-104 boards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
* Copyright (C) 2001,2002,2003 Frank Mori Hess <[email protected]>
* Copyright (C) 2004 Salvador E. Tropea <[email protected]> <[email protected]>
*/
/*
* Driver: das08_isa
* Description: DAS-08 ISA/PC-104 compatible boards
* Devices: [Keithley Metrabyte] DAS08 (isa-das08),
* [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
* DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
* DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
* DAS08/JR-16-AO (das08jr-16-ao), PC104-DAS08 (pc104-das08),
* DAS08/JR/16 (das08jr/16)
* Author: Warren Jasper, ds, Frank Hess
* Updated: Fri, 31 Aug 2012 19:19:06 +0100
* Status: works
*
* This is the ISA/PC-104-specific support split off from the das08 driver.
*
* Configuration Options:
* [0] - base io address
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
#include "das08.h"
static const struct das08_board_struct das08_isa_boards[] = {
{
/* cio-das08.pdf */
.name = "isa-das08",
.ai_nbits = 12,
.ai_pg = das08_pg_none,
.ai_encoding = das08_encode12,
.di_nchan = 3,
.do_nchan = 4,
.i8255_offset = 8,
.i8254_offset = 4,
.iosize = 16, /* unchecked */
}, {
/* cio-das08pgx.pdf */
.name = "das08-pgm",
.ai_nbits = 12,
.ai_pg = das08_pgm,
.ai_encoding = das08_encode12,
.di_nchan = 3,
.do_nchan = 4,
.i8255_offset = 0,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
/* cio-das08pgx.pdf */
.name = "das08-pgh",
.ai_nbits = 12,
.ai_pg = das08_pgh,
.ai_encoding = das08_encode12,
.di_nchan = 3,
.do_nchan = 4,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
/* cio-das08pgx.pdf */
.name = "das08-pgl",
.ai_nbits = 12,
.ai_pg = das08_pgl,
.ai_encoding = das08_encode12,
.di_nchan = 3,
.do_nchan = 4,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
/* cio-das08_aox.pdf */
.name = "das08-aoh",
.ai_nbits = 12,
.ai_pg = das08_pgh,
.ai_encoding = das08_encode12,
.ao_nbits = 12,
.di_nchan = 3,
.do_nchan = 4,
.i8255_offset = 0x0c,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
/* cio-das08_aox.pdf */
.name = "das08-aol",
.ai_nbits = 12,
.ai_pg = das08_pgl,
.ai_encoding = das08_encode12,
.ao_nbits = 12,
.di_nchan = 3,
.do_nchan = 4,
.i8255_offset = 0x0c,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
/* cio-das08_aox.pdf */
.name = "das08-aom",
.ai_nbits = 12,
.ai_pg = das08_pgm,
.ai_encoding = das08_encode12,
.ao_nbits = 12,
.di_nchan = 3,
.do_nchan = 4,
.i8255_offset = 0x0c,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
/* cio-das08-jr-ao.pdf */
.name = "das08/jr-ao",
.is_jr = true,
.ai_nbits = 12,
.ai_pg = das08_pg_none,
.ai_encoding = das08_encode12,
.ao_nbits = 12,
.di_nchan = 8,
.do_nchan = 8,
.iosize = 16, /* unchecked */
}, {
/* cio-das08jr-16-ao.pdf */
.name = "das08jr-16-ao",
.is_jr = true,
.ai_nbits = 16,
.ai_pg = das08_pg_none,
.ai_encoding = das08_encode16,
.ao_nbits = 16,
.di_nchan = 8,
.do_nchan = 8,
.i8254_offset = 0x04,
.iosize = 16, /* unchecked */
}, {
.name = "pc104-das08",
.ai_nbits = 12,
.ai_pg = das08_pg_none,
.ai_encoding = das08_encode12,
.di_nchan = 3,
.do_nchan = 4,
.i8254_offset = 4,
.iosize = 16, /* unchecked */
}, {
.name = "das08jr/16",
.is_jr = true,
.ai_nbits = 16,
.ai_pg = das08_pg_none,
.ai_encoding = das08_encode16,
.di_nchan = 8,
.do_nchan = 8,
.iosize = 16, /* unchecked */
},
};
static int das08_isa_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
const struct das08_board_struct *board = dev->board_ptr;
struct das08_private_struct *devpriv;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], board->iosize);
if (ret)
return ret;
return das08_common_attach(dev, dev->iobase);
}
static struct comedi_driver das08_isa_driver = {
.driver_name = "isa-das08",
.module = THIS_MODULE,
.attach = das08_isa_attach,
.detach = comedi_legacy_detach,
.board_name = &das08_isa_boards[0].name,
.num_names = ARRAY_SIZE(das08_isa_boards),
.offset = sizeof(das08_isa_boards[0]),
};
module_comedi_driver(das08_isa_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/das08_isa.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for generic PCI based 8255 digital i/o boards
* Copyright (C) 2012 H Hartley Sweeten <[email protected]>
*
* Based on the tested adl_pci7296 driver written by:
* Jon Grierson <[email protected]>
* and the experimental cb_pcidio driver written by:
* Yoshiya Matsuzaka
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: 8255_pci
* Description: Generic PCI based 8255 Digital I/O boards
* Devices: [ADLink] PCI-7224 (adl_pci-7224), PCI-7248 (adl_pci-7248),
* PCI-7296 (adl_pci-7296),
* [Measurement Computing] PCI-DIO24 (cb_pci-dio24),
* PCI-DIO24H (cb_pci-dio24h), PCI-DIO48H (cb_pci-dio48h),
* PCI-DIO96H (cb_pci-dio96h),
* [National Instruments] PCI-DIO-96 (ni_pci-dio-96),
* PCI-DIO-96B (ni_pci-dio-96b), PXI-6508 (ni_pxi-6508),
* PCI-6503 (ni_pci-6503), PCI-6503B (ni_pci-6503b),
* PCI-6503X (ni_pci-6503x), PXI-6503 (ni_pxi-6503)
* Author: H Hartley Sweeten <[email protected]>
* Updated: Wed, 12 Sep 2012 11:52:01 -0700
* Status: untested
*
* These boards have one or more 8255 digital I/O chips, each of which
* is supported as a separate 24-channel DIO subdevice.
*
* Boards with 24 DIO channels (1 DIO subdevice):
*
* PCI-7224, PCI-DIO24, PCI-DIO24H, PCI-6503, PCI-6503B, PCI-6503X,
* PXI-6503
*
* Boards with 48 DIO channels (2 DIO subdevices):
*
* PCI-7248, PCI-DIO48H
*
* Boards with 96 DIO channels (4 DIO subdevices):
*
* PCI-7296, PCI-DIO96H, PCI-DIO-96, PCI-DIO-96B, PXI-6508
*
* Some of these boards also have an 8254 programmable timer/counter
* chip. This chip is not currently supported by this driver.
*
* Interrupt support for these boards is also not currently supported.
*
* Configuration Options: not applicable, uses PCI auto config.
*/
#include <linux/module.h>
#include <linux/comedi/comedi_pci.h>
#include <linux/comedi/comedi_8255.h>
enum pci_8255_boardid {
BOARD_ADLINK_PCI7224,
BOARD_ADLINK_PCI7248,
BOARD_ADLINK_PCI7296,
BOARD_CB_PCIDIO24,
BOARD_CB_PCIDIO24H,
BOARD_CB_PCIDIO48H_OLD,
BOARD_CB_PCIDIO48H_NEW,
BOARD_CB_PCIDIO96H,
BOARD_NI_PCIDIO96,
BOARD_NI_PCIDIO96B,
BOARD_NI_PXI6508,
BOARD_NI_PCI6503,
BOARD_NI_PCI6503B,
BOARD_NI_PCI6503X,
BOARD_NI_PXI_6503,
};
struct pci_8255_boardinfo {
const char *name;
int dio_badr;
int n_8255;
unsigned int has_mite:1;
};
static const struct pci_8255_boardinfo pci_8255_boards[] = {
[BOARD_ADLINK_PCI7224] = {
.name = "adl_pci-7224",
.dio_badr = 2,
.n_8255 = 1,
},
[BOARD_ADLINK_PCI7248] = {
.name = "adl_pci-7248",
.dio_badr = 2,
.n_8255 = 2,
},
[BOARD_ADLINK_PCI7296] = {
.name = "adl_pci-7296",
.dio_badr = 2,
.n_8255 = 4,
},
[BOARD_CB_PCIDIO24] = {
.name = "cb_pci-dio24",
.dio_badr = 2,
.n_8255 = 1,
},
[BOARD_CB_PCIDIO24H] = {
.name = "cb_pci-dio24h",
.dio_badr = 2,
.n_8255 = 1,
},
[BOARD_CB_PCIDIO48H_OLD] = {
.name = "cb_pci-dio48h",
.dio_badr = 1,
.n_8255 = 2,
},
[BOARD_CB_PCIDIO48H_NEW] = {
.name = "cb_pci-dio48h",
.dio_badr = 2,
.n_8255 = 2,
},
[BOARD_CB_PCIDIO96H] = {
.name = "cb_pci-dio96h",
.dio_badr = 2,
.n_8255 = 4,
},
[BOARD_NI_PCIDIO96] = {
.name = "ni_pci-dio-96",
.dio_badr = 1,
.n_8255 = 4,
.has_mite = 1,
},
[BOARD_NI_PCIDIO96B] = {
.name = "ni_pci-dio-96b",
.dio_badr = 1,
.n_8255 = 4,
.has_mite = 1,
},
[BOARD_NI_PXI6508] = {
.name = "ni_pxi-6508",
.dio_badr = 1,
.n_8255 = 4,
.has_mite = 1,
},
[BOARD_NI_PCI6503] = {
.name = "ni_pci-6503",
.dio_badr = 1,
.n_8255 = 1,
.has_mite = 1,
},
[BOARD_NI_PCI6503B] = {
.name = "ni_pci-6503b",
.dio_badr = 1,
.n_8255 = 1,
.has_mite = 1,
},
[BOARD_NI_PCI6503X] = {
.name = "ni_pci-6503x",
.dio_badr = 1,
.n_8255 = 1,
.has_mite = 1,
},
[BOARD_NI_PXI_6503] = {
.name = "ni_pxi-6503",
.dio_badr = 1,
.n_8255 = 1,
.has_mite = 1,
},
};
/* ripped from mite.h and mite_setup2() to avoid mite dependency */
#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
#define WENAB BIT(7) /* window enable */
static int pci_8255_mite_init(struct pci_dev *pcidev)
{
void __iomem *mite_base;
u32 main_phys_addr;
/* ioremap the MITE registers (BAR 0) temporarily */
mite_base = pci_ioremap_bar(pcidev, 0);
if (!mite_base)
return -ENOMEM;
/* set data window to main registers (BAR 1) */
main_phys_addr = pci_resource_start(pcidev, 1);
writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
/* finished with MITE registers */
iounmap(mite_base);
return 0;
}
static int pci_8255_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct pci_8255_boardinfo *board = NULL;
struct comedi_subdevice *s;
int ret;
int i;
if (context < ARRAY_SIZE(pci_8255_boards))
board = &pci_8255_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
if (board->has_mite) {
ret = pci_8255_mite_init(pcidev);
if (ret)
return ret;
}
if ((pci_resource_flags(pcidev, board->dio_badr) & IORESOURCE_MEM)) {
dev->mmio = pci_ioremap_bar(pcidev, board->dio_badr);
if (!dev->mmio)
return -ENOMEM;
} else {
dev->iobase = pci_resource_start(pcidev, board->dio_badr);
}
/*
* One, two, or four subdevices are setup by this driver depending
* on the number of channels provided by the board. Each subdevice
* has 24 channels supported by the 8255 module.
*/
ret = comedi_alloc_subdevices(dev, board->n_8255);
if (ret)
return ret;
for (i = 0; i < board->n_8255; i++) {
s = &dev->subdevices[i];
if (dev->mmio)
ret = subdev_8255_mm_init(dev, s, NULL, i * I8255_SIZE);
else
ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
if (ret)
return ret;
}
return 0;
}
static struct comedi_driver pci_8255_driver = {
.driver_name = "8255_pci",
.module = THIS_MODULE,
.auto_attach = pci_8255_auto_attach,
.detach = comedi_pci_detach,
};
static int pci_8255_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data);
}
static const struct pci_device_id pci_8255_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 },
{ PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 },
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
{ PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
{ PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
.driver_data = BOARD_CB_PCIDIO48H_OLD },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
.driver_data = BOARD_CB_PCIDIO48H_NEW },
{ PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
{ PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
{ PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
{ PCI_VDEVICE(NI, 0x13c0), BOARD_NI_PXI6508 },
{ PCI_VDEVICE(NI, 0x0400), BOARD_NI_PCI6503 },
{ PCI_VDEVICE(NI, 0x1250), BOARD_NI_PCI6503B },
{ PCI_VDEVICE(NI, 0x17d0), BOARD_NI_PCI6503X },
{ PCI_VDEVICE(NI, 0x1800), BOARD_NI_PXI_6503 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, pci_8255_pci_table);
static struct pci_driver pci_8255_pci_driver = {
.name = "8255_pci",
.id_table = pci_8255_pci_table,
.probe = pci_8255_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(pci_8255_driver, pci_8255_pci_driver);
MODULE_DESCRIPTION("COMEDI - Generic PCI based 8255 Digital I/O boards");
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/8255_pci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/dt2814.c
* Hardware driver for Data Translation DT2814
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <[email protected]>
*/
/*
* Driver: dt2814
* Description: Data Translation DT2814
* Author: ds
* Status: complete
* Devices: [Data Translation] DT2814 (dt2814)
*
* Configuration options:
* [0] - I/O port base address
* [1] - IRQ
*
* This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
* is a minimally useful onboard clock. The base frequency for the
* clock is selected by jumpers, and the clock divider can be selected
* via programmed I/O. Unfortunately, the clock divider can only be
* a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
* addition, the clock does not seem to be very accurate.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2814_CSR 0
#define DT2814_DATA 1
/*
* flags
*/
#define DT2814_FINISH 0x80
#define DT2814_ERR 0x40
#define DT2814_BUSY 0x20
#define DT2814_ENB 0x10
#define DT2814_CHANMASK 0x0f
#define DT2814_TIMEOUT 10
#define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */
static int dt2814_ai_notbusy(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + DT2814_CSR);
if (context)
*(unsigned int *)context = status;
if (status & DT2814_BUSY)
return -EBUSY;
return 0;
}
static int dt2814_ai_clear(struct comedi_device *dev)
{
unsigned int status = 0;
int ret;
/* Wait until not busy and get status register value. */
ret = comedi_timeout(dev, NULL, NULL, dt2814_ai_notbusy,
(unsigned long)&status);
if (ret)
return ret;
if (status & (DT2814_FINISH | DT2814_ERR)) {
/*
* There unread data, or the error flag is set.
* Read the data register twice to clear the condition.
*/
inb(dev->iobase + DT2814_DATA);
inb(dev->iobase + DT2814_DATA);
}
return 0;
}
static int dt2814_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + DT2814_CSR);
if (status & DT2814_FINISH)
return 0;
return -EBUSY;
}
static int dt2814_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n, hi, lo;
int chan;
int ret;
dt2814_ai_clear(dev); /* clear stale data or error */
for (n = 0; n < insn->n; n++) {
chan = CR_CHAN(insn->chanspec);
outb(chan, dev->iobase + DT2814_CSR);
ret = comedi_timeout(dev, s, insn, dt2814_ai_eoc, 0);
if (ret)
return ret;
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
data[n] = (hi << 4) | (lo >> 4);
}
return n;
}
static int dt2814_ns_to_timer(unsigned int *ns, unsigned int flags)
{
int i;
unsigned int f;
/* XXX ignores flags */
f = 10000; /* ns */
for (i = 0; i < 8; i++) {
if ((2 * (*ns)) < (f * 11))
break;
f *= 10;
}
*ns = f;
return i;
}
static int dt2814_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000);
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
DT2814_MAX_SPEED);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 2);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
arg = cmd->scan_begin_arg;
dt2814_ns_to_timer(&arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
if (err)
return 4;
return 0;
}
static int dt2814_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
int chan;
int trigvar;
dt2814_ai_clear(dev); /* clear stale data or error */
trigvar = dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags);
chan = CR_CHAN(cmd->chanlist[0]);
outb(chan | DT2814_ENB | (trigvar << 5), dev->iobase + DT2814_CSR);
return 0;
}
static int dt2814_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int status;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
status = inb(dev->iobase + DT2814_CSR);
if (status & DT2814_ENB) {
/*
* Clear the timed trigger enable bit.
*
* Note: turning off timed mode triggers another
* sample. This will be mopped up by the calls to
* dt2814_ai_clear().
*/
outb(status & DT2814_CHANMASK, dev->iobase + DT2814_CSR);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
static irqreturn_t dt2814_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async;
unsigned int lo, hi;
unsigned short data;
unsigned int status;
if (!dev->attached) {
dev_err(dev->class_dev, "spurious interrupt\n");
return IRQ_HANDLED;
}
async = s->async;
spin_lock(&dev->spinlock);
status = inb(dev->iobase + DT2814_CSR);
if (!(status & DT2814_ENB)) {
/* Timed acquisition not enabled. Nothing to do. */
spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
}
if (!(status & (DT2814_FINISH | DT2814_ERR))) {
/* Spurious interrupt? */
spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
}
/* Read data or clear error. */
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
data = (hi << 4) | (lo >> 4);
if (status & DT2814_ERR) {
async->events |= COMEDI_CB_ERROR;
} else {
comedi_buf_write_samples(s, &data, 1);
if (async->cmd.stop_src == TRIG_COUNT &&
async->scans_done >= async->cmd.stop_arg) {
async->events |= COMEDI_CB_EOA;
}
}
if (async->events & COMEDI_CB_CANCEL_MASK) {
/*
* Disable timed mode.
*
* Note: turning off timed mode triggers another
* sample. This will be mopped up by the calls to
* dt2814_ai_clear().
*/
outb(status & DT2814_CHANMASK, dev->iobase + DT2814_CSR);
}
spin_unlock(&dev->spinlock);
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x2);
if (ret)
return ret;
outb(0, dev->iobase + DT2814_CSR);
if (dt2814_ai_clear(dev)) {
dev_err(dev->class_dev, "reset error (fatal)\n");
return -EIO;
}
if (it->options[1]) {
ret = request_irq(it->options[1], dt2814_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 16; /* XXX */
s->insn_read = dt2814_ai_insn_read;
s->maxdata = 0xfff;
s->range_table = &range_unknown; /* XXX */
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 1;
s->do_cmd = dt2814_ai_cmd;
s->do_cmdtest = dt2814_ai_cmdtest;
s->cancel = dt2814_ai_cancel;
}
return 0;
}
static void dt2814_detach(struct comedi_device *dev)
{
if (dev->irq) {
/*
* An extra conversion triggered on termination of an
* asynchronous command may still be in progress. Wait for
* it to finish and clear the data or error status.
*/
dt2814_ai_clear(dev);
}
comedi_legacy_detach(dev);
}
static struct comedi_driver dt2814_driver = {
.driver_name = "dt2814",
.module = THIS_MODULE,
.attach = dt2814_attach,
.detach = dt2814_detach,
};
module_comedi_driver(dt2814_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/dt2814.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_3501.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
* Project manager: Eric Stolz
*
* ADDI-DATA GmbH
* Dieselstrasse 3
* D-77833 Ottersweier
* Tel: +19(0)7223/9493-0
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* [email protected]
*/
/*
* Driver: addi_apci_3501
* Description: ADDI-DATA APCI-3501 Analog output board
* Devices: [ADDI-DATA] APCI-3501 (addi_apci_3501)
* Author: H Hartley Sweeten <[email protected]>
* Updated: Mon, 20 Jun 2016 10:57:01 -0700
* Status: untested
*
* Configuration Options: not applicable, uses comedi PCI auto config
*
* This board has the following features:
* - 4 or 8 analog output channels
* - 2 optically isolated digital inputs
* - 2 optically isolated digital outputs
* - 1 12-bit watchdog/timer
*
* There are 2 versions of the APCI-3501:
* - APCI-3501-4 4 analog output channels
* - APCI-3501-8 8 analog output channels
*
* These boards use the same PCI Vendor/Device IDs. The number of output
* channels used by this driver is determined by reading the EEPROM on
* the board.
*
* The watchdog/timer subdevice is not currently supported.
*/
#include <linux/module.h>
#include <linux/comedi/comedi_pci.h>
#include "amcc_s5933.h"
/*
* PCI bar 1 register I/O map
*/
#define APCI3501_AO_CTRL_STATUS_REG 0x00
#define APCI3501_AO_CTRL_BIPOLAR BIT(0)
#define APCI3501_AO_STATUS_READY BIT(8)
#define APCI3501_AO_DATA_REG 0x04
#define APCI3501_AO_DATA_CHAN(x) ((x) << 0)
#define APCI3501_AO_DATA_VAL(x) ((x) << 8)
#define APCI3501_AO_DATA_BIPOLAR BIT(31)
#define APCI3501_AO_TRIG_SCS_REG 0x08
#define APCI3501_TIMER_BASE 0x20
#define APCI3501_DO_REG 0x40
#define APCI3501_DI_REG 0x50
/*
* AMCC S5933 NVRAM
*/
#define NVRAM_USER_DATA_START 0x100
#define NVCMD_BEGIN_READ (0x7 << 5)
#define NVCMD_LOAD_LOW (0x4 << 5)
#define NVCMD_LOAD_HIGH (0x5 << 5)
/*
* Function types stored in the eeprom
*/
#define EEPROM_DIGITALINPUT 0
#define EEPROM_DIGITALOUTPUT 1
#define EEPROM_ANALOGINPUT 2
#define EEPROM_ANALOGOUTPUT 3
#define EEPROM_TIMER 4
#define EEPROM_WATCHDOG 5
#define EEPROM_TIMER_WATCHDOG_COUNTER 10
struct apci3501_private {
unsigned long amcc;
unsigned char timer_mode;
};
static const struct comedi_lrange apci3501_ao_range = {
2, {
BIP_RANGE(10),
UNI_RANGE(10)
}
};
static int apci3501_wait_for_dac(struct comedi_device *dev)
{
unsigned int status;
do {
status = inl(dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
} while (!(status & APCI3501_AO_STATUS_READY));
return 0;
}
static int apci3501_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int cfg = APCI3501_AO_DATA_CHAN(chan);
int ret;
int i;
/*
* All analog output channels have the same output range.
* 14-bit bipolar: 0-10V
* 13-bit unipolar: +/-10V
* Changing the range of one channel changes all of them!
*/
if (range) {
outl(0, dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
} else {
cfg |= APCI3501_AO_DATA_BIPOLAR;
outl(APCI3501_AO_CTRL_BIPOLAR,
dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
}
for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
if (range == 1) {
if (data[i] > 0x1fff) {
dev_err(dev->class_dev,
"Unipolar resolution is only 13-bits\n");
return -EINVAL;
}
}
ret = apci3501_wait_for_dac(dev);
if (ret)
return ret;
outl(cfg | APCI3501_AO_DATA_VAL(val),
dev->iobase + APCI3501_AO_DATA_REG);
s->readback[chan] = val;
}
return insn->n;
}
static int apci3501_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inl(dev->iobase + APCI3501_DI_REG) & 0x3;
return insn->n;
}
static int apci3501_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
s->state = inl(dev->iobase + APCI3501_DO_REG);
if (comedi_dio_update_state(s, data))
outl(s->state, dev->iobase + APCI3501_DO_REG);
data[1] = s->state;
return insn->n;
}
static void apci3501_eeprom_wait(unsigned long iobase)
{
unsigned char val;
do {
val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD);
} while (val & 0x80);
}
static unsigned short apci3501_eeprom_readw(unsigned long iobase,
unsigned short addr)
{
unsigned short val = 0;
unsigned char tmp;
unsigned char i;
/* Add the offset to the start of the user data */
addr += NVRAM_USER_DATA_START;
for (i = 0; i < 2; i++) {
/* Load the low 8 bit address */
outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD);
apci3501_eeprom_wait(iobase);
outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
apci3501_eeprom_wait(iobase);
/* Load the high 8 bit address */
outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD);
apci3501_eeprom_wait(iobase);
outb(((addr + i) >> 8) & 0xff,
iobase + AMCC_OP_REG_MCSR_NVDATA);
apci3501_eeprom_wait(iobase);
/* Read the eeprom data byte */
outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
apci3501_eeprom_wait(iobase);
tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
apci3501_eeprom_wait(iobase);
if (i == 0)
val |= tmp;
else
val |= (tmp << 8);
}
return val;
}
static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
{
struct apci3501_private *devpriv = dev->private;
unsigned char nfuncs;
int i;
nfuncs = apci3501_eeprom_readw(devpriv->amcc, 10) & 0xff;
/* Read functionality details */
for (i = 0; i < nfuncs; i++) {
unsigned short offset = i * 4;
unsigned short addr;
unsigned char func;
unsigned short val;
func = apci3501_eeprom_readw(devpriv->amcc, 12 + offset) & 0x3f;
addr = apci3501_eeprom_readw(devpriv->amcc, 14 + offset);
if (func == EEPROM_ANALOGOUTPUT) {
val = apci3501_eeprom_readw(devpriv->amcc, addr + 10);
return (val >> 4) & 0x3ff;
}
}
return 0;
}
static int apci3501_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
unsigned short addr = CR_CHAN(insn->chanspec);
unsigned int val;
unsigned int i;
if (insn->n) {
/* No point reading the same EEPROM location more than once. */
val = apci3501_eeprom_readw(devpriv->amcc, 2 * addr);
for (i = 0; i < insn->n; i++)
data[i] = val;
}
return insn->n;
}
static int apci3501_reset(struct comedi_device *dev)
{
unsigned int val;
int chan;
int ret;
/* Reset all digital outputs to "0" */
outl(0x0, dev->iobase + APCI3501_DO_REG);
/* Default all analog outputs to 0V (bipolar) */
outl(APCI3501_AO_CTRL_BIPOLAR,
dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
val = APCI3501_AO_DATA_BIPOLAR | APCI3501_AO_DATA_VAL(0);
/* Set all analog output channels */
for (chan = 0; chan < 8; chan++) {
ret = apci3501_wait_for_dac(dev);
if (ret) {
dev_warn(dev->class_dev,
"%s: DAC not-ready for channel %i\n",
__func__, chan);
} else {
outl(val | APCI3501_AO_DATA_CHAN(chan),
dev->iobase + APCI3501_AO_DATA_REG);
}
}
return 0;
}
static int apci3501_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct apci3501_private *devpriv;
struct comedi_subdevice *s;
int ao_n_chan;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
devpriv->amcc = pci_resource_start(pcidev, 0);
dev->iobase = pci_resource_start(pcidev, 1);
ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
/* Initialize the analog output subdevice */
s = &dev->subdevices[0];
if (ao_n_chan) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = ao_n_chan;
s->maxdata = 0x3fff;
s->range_table = &apci3501_ao_range;
s->insn_write = apci3501_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Initialize the digital input subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 2;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci3501_di_insn_bits;
/* Initialize the digital output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci3501_do_insn_bits;
/* Timer/Watchdog subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_UNUSED;
/* Initialize the eeprom subdevice */
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 256;
s->maxdata = 0xffff;
s->insn_read = apci3501_eeprom_insn_read;
apci3501_reset(dev);
return 0;
}
static void apci3501_detach(struct comedi_device *dev)
{
if (dev->iobase)
apci3501_reset(dev);
comedi_pci_detach(dev);
}
static struct comedi_driver apci3501_driver = {
.driver_name = "addi_apci_3501",
.module = THIS_MODULE,
.auto_attach = apci3501_auto_attach,
.detach = apci3501_detach,
};
static int apci3501_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data);
}
static const struct pci_device_id apci3501_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci3501_pci_table);
static struct pci_driver apci3501_pci_driver = {
.name = "addi_apci_3501",
.id_table = apci3501_pci_table,
.probe = apci3501_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3501_driver, apci3501_pci_driver);
MODULE_DESCRIPTION("ADDI-DATA APCI-3501 Analog output board");
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/addi_apci_3501.c |
// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/adv_pci_dio.c
*
* Author: Michal Dobes <[email protected]>
*
* Hardware driver for Advantech PCI DIO cards.
*/
/*
* Driver: adv_pci_dio
* Description: Advantech Digital I/O Cards
* Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
* PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
* PCI-1751, PCI-1752, PCI-1753, PCI-1753+PCI-1753E,
* PCI-1754, PCI-1756, PCI-1761, PCI-1762
* Author: Michal Dobes <[email protected]>
* Updated: Fri, 25 Aug 2017 07:23:06 +0300
* Status: untested
*
* Configuration Options: not applicable, uses PCI auto config
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/comedi/comedi_pci.h>
#include <linux/comedi/comedi_8255.h>
#include <linux/comedi/comedi_8254.h>
/*
* Register offset definitions
*/
/* PCI-1730, PCI-1733, PCI-1736 interrupt control registers */
#define PCI173X_INT_EN_REG 0x0008 /* R/W: enable/disable */
#define PCI173X_INT_RF_REG 0x000c /* R/W: falling/rising edge */
#define PCI173X_INT_FLAG_REG 0x0010 /* R: status */
#define PCI173X_INT_CLR_REG 0x0010 /* W: clear */
#define PCI173X_INT_IDI0 0x01 /* IDI0 edge occurred */
#define PCI173X_INT_IDI1 0x02 /* IDI1 edge occurred */
#define PCI173X_INT_DI0 0x04 /* DI0 edge occurred */
#define PCI173X_INT_DI1 0x08 /* DI1 edge occurred */
/* PCI-1739U, PCI-1750, PCI1751 interrupt control registers */
#define PCI1750_INT_REG 0x20 /* R/W: status/control */
/* PCI-1753, PCI-1753E interrupt control registers */
#define PCI1753_INT_REG(x) (0x10 + (x)) /* R/W: control group 0 to 3 */
#define PCI1753E_INT_REG(x) (0x30 + (x)) /* R/W: control group 0 to 3 */
/* PCI-1754, PCI-1756 interrupt control registers */
#define PCI1754_INT_REG(x) (0x08 + (x) * 2) /* R/W: control group 0 to 3 */
/* PCI-1752, PCI-1756 special registers */
#define PCI1752_CFC_REG 0x12 /* R/W: channel freeze function */
/* PCI-1761 interrupt control registers */
#define PCI1761_INT_EN_REG 0x03 /* R/W: enable/disable interrupts */
#define PCI1761_INT_RF_REG 0x04 /* R/W: falling/rising edge */
#define PCI1761_INT_CLR_REG 0x05 /* R/W: clear interrupts */
/* PCI-1762 interrupt control registers */
#define PCI1762_INT_REG 0x06 /* R/W: status/control */
/* maximum number of subdevice descriptions in the boardinfo */
#define PCI_DIO_MAX_DI_SUBDEVS 2 /* 2 x 8/16/32 input channels max */
#define PCI_DIO_MAX_DO_SUBDEVS 2 /* 2 x 8/16/32 output channels max */
#define PCI_DIO_MAX_DIO_SUBDEVG 2 /* 2 x any number of 8255 devices max */
#define PCI_DIO_MAX_IRQ_SUBDEVS 4 /* 4 x 1 input IRQ channels max */
enum pci_dio_boardid {
TYPE_PCI1730,
TYPE_PCI1733,
TYPE_PCI1734,
TYPE_PCI1735,
TYPE_PCI1736,
TYPE_PCI1739,
TYPE_PCI1750,
TYPE_PCI1751,
TYPE_PCI1752,
TYPE_PCI1753,
TYPE_PCI1753E,
TYPE_PCI1754,
TYPE_PCI1756,
TYPE_PCI1761,
TYPE_PCI1762
};
struct diosubd_data {
int chans; /* num of chans or 8255 devices */
unsigned long addr; /* PCI address offset */
};
struct dio_irq_subd_data {
unsigned short int_en; /* interrupt enable/status bit */
unsigned long addr; /* PCI address offset */
};
struct dio_boardtype {
const char *name; /* board name */
int nsubdevs;
struct diosubd_data sdi[PCI_DIO_MAX_DI_SUBDEVS];
struct diosubd_data sdo[PCI_DIO_MAX_DO_SUBDEVS];
struct diosubd_data sdio[PCI_DIO_MAX_DIO_SUBDEVG];
struct dio_irq_subd_data sdirq[PCI_DIO_MAX_IRQ_SUBDEVS];
unsigned long id_reg;
unsigned long timer_regbase;
unsigned int is_16bit:1;
};
static const struct dio_boardtype boardtypes[] = {
[TYPE_PCI1730] = {
.name = "pci1730",
/* DI, IDI, DO, IDO, ID, IRQ_DI0, IRQ_DI1, IRQ_IDI0, IRQ_IDI1 */
.nsubdevs = 9,
.sdi[0] = { 16, 0x02, }, /* DI 0-15 */
.sdi[1] = { 16, 0x00, }, /* ISO DI 0-15 */
.sdo[0] = { 16, 0x02, }, /* DO 0-15 */
.sdo[1] = { 16, 0x00, }, /* ISO DO 0-15 */
.id_reg = 0x04,
.sdirq[0] = { PCI173X_INT_DI0, 0x02, }, /* DI 0 */
.sdirq[1] = { PCI173X_INT_DI1, 0x02, }, /* DI 1 */
.sdirq[2] = { PCI173X_INT_IDI0, 0x00, }, /* ISO DI 0 */
.sdirq[3] = { PCI173X_INT_IDI1, 0x00, }, /* ISO DI 1 */
},
[TYPE_PCI1733] = {
.name = "pci1733",
.nsubdevs = 2,
.sdi[1] = { 32, 0x00, }, /* ISO DI 0-31 */
.id_reg = 0x04,
},
[TYPE_PCI1734] = {
.name = "pci1734",
.nsubdevs = 2,
.sdo[1] = { 32, 0x00, }, /* ISO DO 0-31 */
.id_reg = 0x04,
},
[TYPE_PCI1735] = {
.name = "pci1735",
.nsubdevs = 4,
.sdi[0] = { 32, 0x00, }, /* DI 0-31 */
.sdo[0] = { 32, 0x00, }, /* DO 0-31 */
.id_reg = 0x08,
.timer_regbase = 0x04,
},
[TYPE_PCI1736] = {
.name = "pci1736",
.nsubdevs = 3,
.sdi[1] = { 16, 0x00, }, /* ISO DI 0-15 */
.sdo[1] = { 16, 0x00, }, /* ISO DO 0-15 */
.id_reg = 0x04,
},
[TYPE_PCI1739] = {
.name = "pci1739",
.nsubdevs = 3,
.sdio[0] = { 2, 0x00, }, /* 8255 DIO */
.id_reg = 0x08,
},
[TYPE_PCI1750] = {
.name = "pci1750",
.nsubdevs = 2,
.sdi[1] = { 16, 0x00, }, /* ISO DI 0-15 */
.sdo[1] = { 16, 0x00, }, /* ISO DO 0-15 */
},
[TYPE_PCI1751] = {
.name = "pci1751",
.nsubdevs = 3,
.sdio[0] = { 2, 0x00, }, /* 8255 DIO */
.timer_regbase = 0x18,
},
[TYPE_PCI1752] = {
.name = "pci1752",
.nsubdevs = 3,
.sdo[0] = { 32, 0x00, }, /* DO 0-31 */
.sdo[1] = { 32, 0x04, }, /* DO 32-63 */
.id_reg = 0x10,
.is_16bit = 1,
},
[TYPE_PCI1753] = {
.name = "pci1753",
.nsubdevs = 4,
.sdio[0] = { 4, 0x00, }, /* 8255 DIO */
},
[TYPE_PCI1753E] = {
.name = "pci1753e",
.nsubdevs = 8,
.sdio[0] = { 4, 0x00, }, /* 8255 DIO */
.sdio[1] = { 4, 0x20, }, /* 8255 DIO */
},
[TYPE_PCI1754] = {
.name = "pci1754",
.nsubdevs = 3,
.sdi[0] = { 32, 0x00, }, /* DI 0-31 */
.sdi[1] = { 32, 0x04, }, /* DI 32-63 */
.id_reg = 0x10,
.is_16bit = 1,
},
[TYPE_PCI1756] = {
.name = "pci1756",
.nsubdevs = 3,
.sdi[1] = { 32, 0x00, }, /* DI 0-31 */
.sdo[1] = { 32, 0x04, }, /* DO 0-31 */
.id_reg = 0x10,
.is_16bit = 1,
},
[TYPE_PCI1761] = {
.name = "pci1761",
.nsubdevs = 3,
.sdi[1] = { 8, 0x01 }, /* ISO DI 0-7 */
.sdo[1] = { 8, 0x00 }, /* RELAY DO 0-7 */
.id_reg = 0x02,
},
[TYPE_PCI1762] = {
.name = "pci1762",
.nsubdevs = 3,
.sdi[1] = { 16, 0x02, }, /* ISO DI 0-15 */
.sdo[1] = { 16, 0x00, }, /* ISO DO 0-15 */
.id_reg = 0x04,
.is_16bit = 1,
},
};
struct pci_dio_dev_private_data {
int boardtype;
int irq_subd;
unsigned short int_ctrl;
unsigned short int_rf;
};
struct pci_dio_sd_private_data {
spinlock_t subd_slock; /* spin-lock for cmd_running */
unsigned long port_offset;
short int cmd_running;
};
static void process_irq(struct comedi_device *dev, unsigned int subdev,
unsigned char irqflags)
{
struct comedi_subdevice *s = &dev->subdevices[subdev];
struct pci_dio_sd_private_data *sd_priv = s->private;
unsigned long reg = sd_priv->port_offset;
struct comedi_async *async_p = s->async;
if (async_p) {
unsigned short val = inw(dev->iobase + reg);
spin_lock(&sd_priv->subd_slock);
if (sd_priv->cmd_running)
comedi_buf_write_samples(s, &val, 1);
spin_unlock(&sd_priv->subd_slock);
comedi_handle_events(dev, s);
}
}
static irqreturn_t pci_dio_interrupt(int irq, void *p_device)
{
struct comedi_device *dev = p_device;
struct pci_dio_dev_private_data *dev_private = dev->private;
const struct dio_boardtype *board = dev->board_ptr;
unsigned long cpu_flags;
unsigned char irqflags;
int i;
if (!dev->attached) {
/* Ignore interrupt before device fully attached. */
/* Might not even have allocated subdevices yet! */
return IRQ_NONE;
}
/* Check if we are source of interrupt */
spin_lock_irqsave(&dev->spinlock, cpu_flags);
irqflags = inb(dev->iobase + PCI173X_INT_FLAG_REG);
if (!(irqflags & 0x0F)) {
spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
return IRQ_NONE;
}
/* clear all current interrupt flags */
outb(irqflags, dev->iobase + PCI173X_INT_CLR_REG);
spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
/* check irq subdevice triggers */
for (i = 0; i < PCI_DIO_MAX_IRQ_SUBDEVS; i++) {
if (irqflags & board->sdirq[i].int_en)
process_irq(dev, dev_private->irq_subd + i, irqflags);
}
return IRQ_HANDLED;
}
static int pci_dio_asy_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
/*
* For scan_begin_arg, the trigger number must be 0 and the only
* allowed flags are CR_EDGE and CR_INVERT. CR_EDGE is ignored,
* CR_INVERT sets the trigger to falling edge.
*/
if (cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) {
cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
err |= -EINVAL;
}
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* Step 4: fix up any arguments */
/* Step 5: check channel list if it exists */
return 0;
}
static int pci_dio_asy_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pci_dio_dev_private_data *dev_private = dev->private;
struct pci_dio_sd_private_data *sd_priv = s->private;
const struct dio_boardtype *board = dev->board_ptr;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned long cpu_flags;
unsigned short int_en;
int_en = board->sdirq[s->index - dev_private->irq_subd].int_en;
spin_lock_irqsave(&dev->spinlock, cpu_flags);
if (cmd->scan_begin_arg & CR_INVERT)
dev_private->int_rf |= int_en; /* falling edge */
else
dev_private->int_rf &= ~int_en; /* rising edge */
outb(dev_private->int_rf, dev->iobase + PCI173X_INT_RF_REG);
dev_private->int_ctrl |= int_en; /* enable interrupt source */
outb(dev_private->int_ctrl, dev->iobase + PCI173X_INT_EN_REG);
spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
spin_lock_irqsave(&sd_priv->subd_slock, cpu_flags);
sd_priv->cmd_running = 1;
spin_unlock_irqrestore(&sd_priv->subd_slock, cpu_flags);
return 0;
}
static int pci_dio_asy_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pci_dio_dev_private_data *dev_private = dev->private;
struct pci_dio_sd_private_data *sd_priv = s->private;
const struct dio_boardtype *board = dev->board_ptr;
unsigned long cpu_flags;
unsigned short int_en;
spin_lock_irqsave(&sd_priv->subd_slock, cpu_flags);
sd_priv->cmd_running = 0;
spin_unlock_irqrestore(&sd_priv->subd_slock, cpu_flags);
int_en = board->sdirq[s->index - dev_private->irq_subd].int_en;
spin_lock_irqsave(&dev->spinlock, cpu_flags);
dev_private->int_ctrl &= ~int_en;
outb(dev_private->int_ctrl, dev->iobase + PCI173X_INT_EN_REG);
spin_unlock_irqrestore(&dev->spinlock, cpu_flags);
return 0;
}
/* same as _insn_bits_di_ because the IRQ-pins are the DI-ports */
static int pci_dio_insn_bits_dirq_b(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct pci_dio_sd_private_data *sd_priv = s->private;
unsigned long reg = (unsigned long)sd_priv->port_offset;
unsigned long iobase = dev->iobase + reg;
data[1] = inb(iobase);
return insn->n;
}
static int pci_dio_insn_bits_di_b(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
unsigned long iobase = dev->iobase + reg;
data[1] = inb(iobase);
if (s->n_chan > 8)
data[1] |= (inb(iobase + 1) << 8);
if (s->n_chan > 16)
data[1] |= (inb(iobase + 2) << 16);
if (s->n_chan > 24)
data[1] |= (inb(iobase + 3) << 24);
return insn->n;
}
static int pci_dio_insn_bits_di_w(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
unsigned long iobase = dev->iobase + reg;
data[1] = inw(iobase);
if (s->n_chan > 16)
data[1] |= (inw(iobase + 2) << 16);
return insn->n;
}
static int pci_dio_insn_bits_do_b(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
unsigned long iobase = dev->iobase + reg;
if (comedi_dio_update_state(s, data)) {
outb(s->state & 0xff, iobase);
if (s->n_chan > 8)
outb((s->state >> 8) & 0xff, iobase + 1);
if (s->n_chan > 16)
outb((s->state >> 16) & 0xff, iobase + 2);
if (s->n_chan > 24)
outb((s->state >> 24) & 0xff, iobase + 3);
}
data[1] = s->state;
return insn->n;
}
static int pci_dio_insn_bits_do_w(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
unsigned long iobase = dev->iobase + reg;
if (comedi_dio_update_state(s, data)) {
outw(s->state & 0xffff, iobase);
if (s->n_chan > 16)
outw((s->state >> 16) & 0xffff, iobase + 2);
}
data[1] = s->state;
return insn->n;
}
static int pci_dio_reset(struct comedi_device *dev, unsigned long cardtype)
{
struct pci_dio_dev_private_data *dev_private = dev->private;
/* disable channel freeze function on the PCI-1752/1756 boards */
if (cardtype == TYPE_PCI1752 || cardtype == TYPE_PCI1756)
outw(0, dev->iobase + PCI1752_CFC_REG);
/* disable and clear interrupts */
switch (cardtype) {
case TYPE_PCI1730:
case TYPE_PCI1733:
case TYPE_PCI1736:
dev_private->int_ctrl = 0x00;
outb(dev_private->int_ctrl, dev->iobase + PCI173X_INT_EN_REG);
/* Reset all 4 Int Flags */
outb(0x0f, dev->iobase + PCI173X_INT_CLR_REG);
/* Rising Edge => IRQ . On all 4 Pins */
dev_private->int_rf = 0x00;
outb(dev_private->int_rf, dev->iobase + PCI173X_INT_RF_REG);
break;
case TYPE_PCI1739:
case TYPE_PCI1750:
case TYPE_PCI1751:
outb(0x88, dev->iobase + PCI1750_INT_REG);
break;
case TYPE_PCI1753:
case TYPE_PCI1753E:
outb(0x88, dev->iobase + PCI1753_INT_REG(0));
outb(0x80, dev->iobase + PCI1753_INT_REG(1));
outb(0x80, dev->iobase + PCI1753_INT_REG(2));
outb(0x80, dev->iobase + PCI1753_INT_REG(3));
if (cardtype == TYPE_PCI1753E) {
outb(0x88, dev->iobase + PCI1753E_INT_REG(0));
outb(0x80, dev->iobase + PCI1753E_INT_REG(1));
outb(0x80, dev->iobase + PCI1753E_INT_REG(2));
outb(0x80, dev->iobase + PCI1753E_INT_REG(3));
}
break;
case TYPE_PCI1754:
case TYPE_PCI1756:
outw(0x08, dev->iobase + PCI1754_INT_REG(0));
outw(0x08, dev->iobase + PCI1754_INT_REG(1));
if (cardtype == TYPE_PCI1754) {
outw(0x08, dev->iobase + PCI1754_INT_REG(2));
outw(0x08, dev->iobase + PCI1754_INT_REG(3));
}
break;
case TYPE_PCI1761:
/* disable interrupts */
outb(0, dev->iobase + PCI1761_INT_EN_REG);
/* clear interrupts */
outb(0xff, dev->iobase + PCI1761_INT_CLR_REG);
/* set rising edge trigger */
outb(0, dev->iobase + PCI1761_INT_RF_REG);
break;
case TYPE_PCI1762:
outw(0x0101, dev->iobase + PCI1762_INT_REG);
break;
default:
break;
}
return 0;
}
static int pci_dio_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct dio_boardtype *board = NULL;
struct comedi_subdevice *s;
struct pci_dio_dev_private_data *dev_private;
int ret, subdev, i, j;
if (context < ARRAY_SIZE(boardtypes))
board = &boardtypes[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
if (!dev_private)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
if (context == TYPE_PCI1736)
dev->iobase = pci_resource_start(pcidev, 0);
else
dev->iobase = pci_resource_start(pcidev, 2);
dev_private->boardtype = context;
pci_dio_reset(dev, context);
/* request IRQ if device has irq subdevices */
if (board->sdirq[0].int_en && pcidev->irq) {
ret = request_irq(pcidev->irq, pci_dio_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0)
dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, board->nsubdevs);
if (ret)
return ret;
subdev = 0;
for (i = 0; i < PCI_DIO_MAX_DI_SUBDEVS; i++) {
const struct diosubd_data *d = &board->sdi[i];
if (d->chans) {
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = d->chans;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = board->is_16bit
? pci_dio_insn_bits_di_w
: pci_dio_insn_bits_di_b;
s->private = (void *)d->addr;
}
}
for (i = 0; i < PCI_DIO_MAX_DO_SUBDEVS; i++) {
const struct diosubd_data *d = &board->sdo[i];
if (d->chans) {
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = d->chans;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = board->is_16bit
? pci_dio_insn_bits_do_w
: pci_dio_insn_bits_do_b;
s->private = (void *)d->addr;
/* reset all outputs to 0 */
if (board->is_16bit) {
outw(0, dev->iobase + d->addr);
if (s->n_chan > 16)
outw(0, dev->iobase + d->addr + 2);
} else {
outb(0, dev->iobase + d->addr);
if (s->n_chan > 8)
outb(0, dev->iobase + d->addr + 1);
if (s->n_chan > 16)
outb(0, dev->iobase + d->addr + 2);
if (s->n_chan > 24)
outb(0, dev->iobase + d->addr + 3);
}
}
}
for (i = 0; i < PCI_DIO_MAX_DIO_SUBDEVG; i++) {
const struct diosubd_data *d = &board->sdio[i];
for (j = 0; j < d->chans; j++) {
s = &dev->subdevices[subdev++];
ret = subdev_8255_init(dev, s, NULL,
d->addr + j * I8255_SIZE);
if (ret)
return ret;
}
}
if (board->id_reg) {
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = board->is_16bit ? pci_dio_insn_bits_di_w
: pci_dio_insn_bits_di_b;
s->private = (void *)board->id_reg;
}
if (board->timer_regbase) {
s = &dev->subdevices[subdev++];
dev->pacer = comedi_8254_init(dev->iobase +
board->timer_regbase,
0, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
comedi_8254_subdevice_init(s, dev->pacer);
}
dev_private->irq_subd = subdev; /* first interrupt subdevice index */
for (i = 0; i < PCI_DIO_MAX_IRQ_SUBDEVS; ++i) {
struct pci_dio_sd_private_data *sd_priv = NULL;
const struct dio_irq_subd_data *d = &board->sdirq[i];
if (d->int_en) {
s = &dev->subdevices[subdev++];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pci_dio_insn_bits_dirq_b;
sd_priv = comedi_alloc_spriv(s, sizeof(*sd_priv));
if (!sd_priv)
return -ENOMEM;
spin_lock_init(&sd_priv->subd_slock);
sd_priv->port_offset = d->addr;
sd_priv->cmd_running = 0;
if (dev->irq) {
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->len_chanlist = 1;
s->do_cmdtest = pci_dio_asy_cmdtest;
s->do_cmd = pci_dio_asy_cmd;
s->cancel = pci_dio_asy_cancel;
}
}
}
return 0;
}
static void pci_dio_detach(struct comedi_device *dev)
{
struct pci_dio_dev_private_data *dev_private = dev->private;
int boardtype = dev_private->boardtype;
if (dev->iobase)
pci_dio_reset(dev, boardtype);
comedi_pci_detach(dev);
}
static struct comedi_driver adv_pci_dio_driver = {
.driver_name = "adv_pci_dio",
.module = THIS_MODULE,
.auto_attach = pci_dio_auto_attach,
.detach = pci_dio_detach,
};
static unsigned long pci_dio_override_cardtype(struct pci_dev *pcidev,
unsigned long cardtype)
{
/*
* Change cardtype from TYPE_PCI1753 to TYPE_PCI1753E if expansion
* board available. Need to enable PCI device and request the main
* registers PCI BAR temporarily to perform the test.
*/
if (cardtype != TYPE_PCI1753)
return cardtype;
if (pci_enable_device(pcidev) < 0)
return cardtype;
if (pci_request_region(pcidev, 2, "adv_pci_dio") == 0) {
/*
* This test is based on Advantech's "advdaq" driver source
* (which declares its module licence as "GPL" although the
* driver source does not include a "COPYING" file).
*/
unsigned long reg = pci_resource_start(pcidev, 2) + 53;
outb(0x05, reg);
if ((inb(reg) & 0x07) == 0x02) {
outb(0x02, reg);
if ((inb(reg) & 0x07) == 0x05)
cardtype = TYPE_PCI1753E;
}
pci_release_region(pcidev, 2);
}
pci_disable_device(pcidev);
return cardtype;
}
static int adv_pci_dio_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned long cardtype;
cardtype = pci_dio_override_cardtype(dev, id->driver_data);
return comedi_pci_auto_config(dev, &adv_pci_dio_driver, cardtype);
}
static const struct pci_device_id adv_pci_dio_pci_table[] = {
{ PCI_VDEVICE(ADVANTECH, 0x1730), TYPE_PCI1730 },
{ PCI_VDEVICE(ADVANTECH, 0x1733), TYPE_PCI1733 },
{ PCI_VDEVICE(ADVANTECH, 0x1734), TYPE_PCI1734 },
{ PCI_VDEVICE(ADVANTECH, 0x1735), TYPE_PCI1735 },
{ PCI_VDEVICE(ADVANTECH, 0x1736), TYPE_PCI1736 },
{ PCI_VDEVICE(ADVANTECH, 0x1739), TYPE_PCI1739 },
{ PCI_VDEVICE(ADVANTECH, 0x1750), TYPE_PCI1750 },
{ PCI_VDEVICE(ADVANTECH, 0x1751), TYPE_PCI1751 },
{ PCI_VDEVICE(ADVANTECH, 0x1752), TYPE_PCI1752 },
{ PCI_VDEVICE(ADVANTECH, 0x1753), TYPE_PCI1753 },
{ PCI_VDEVICE(ADVANTECH, 0x1754), TYPE_PCI1754 },
{ PCI_VDEVICE(ADVANTECH, 0x1756), TYPE_PCI1756 },
{ PCI_VDEVICE(ADVANTECH, 0x1761), TYPE_PCI1761 },
{ PCI_VDEVICE(ADVANTECH, 0x1762), TYPE_PCI1762 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, adv_pci_dio_pci_table);
static struct pci_driver adv_pci_dio_pci_driver = {
.name = "adv_pci_dio",
.id_table = adv_pci_dio_pci_table,
.probe = adv_pci_dio_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adv_pci_dio_driver, adv_pci_dio_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for Advantech Digital I/O Cards");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/adv_pci_dio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_labpc_pci.c
* Driver for National Instruments Lab-PC PCI-1200
* Copyright (C) 2001, 2002, 2003 Frank Mori Hess <[email protected]>
*/
/*
* Driver: ni_labpc_pci
* Description: National Instruments Lab-PC PCI-1200
* Devices: [National Instruments] PCI-1200 (ni_pci-1200)
* Author: Frank Mori Hess <[email protected]>
* Status: works
*
* This is the PCI-specific support split off from the ni_labpc driver.
*
* Configuration Options: not applicable, uses PCI auto config
*
* NI manuals:
* 340914a (pci-1200)
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include "ni_labpc.h"
enum labpc_pci_boardid {
BOARD_NI_PCI1200,
};
static const struct labpc_boardinfo labpc_pci_boards[] = {
[BOARD_NI_PCI1200] = {
.name = "ni_pci-1200",
.ai_speed = 10000,
.ai_scan_up = 1,
.has_ao = 1,
.is_labpc1200 = 1,
},
};
/* ripped from mite.h and mite_setup2() to avoid mite dependency */
#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
#define WENAB BIT(7) /* window enable */
static int labpc_pci_mite_init(struct pci_dev *pcidev)
{
void __iomem *mite_base;
u32 main_phys_addr;
/* ioremap the MITE registers (BAR 0) temporarily */
mite_base = pci_ioremap_bar(pcidev, 0);
if (!mite_base)
return -ENOMEM;
/* set data window to main registers (BAR 1) */
main_phys_addr = pci_resource_start(pcidev, 1);
writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
/* finished with MITE registers */
iounmap(mite_base);
return 0;
}
static int labpc_pci_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct labpc_boardinfo *board = NULL;
int ret;
if (context < ARRAY_SIZE(labpc_pci_boards))
board = &labpc_pci_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
ret = labpc_pci_mite_init(pcidev);
if (ret)
return ret;
dev->mmio = pci_ioremap_bar(pcidev, 1);
if (!dev->mmio)
return -ENOMEM;
return labpc_common_attach(dev, pcidev->irq, IRQF_SHARED);
}
static void labpc_pci_detach(struct comedi_device *dev)
{
labpc_common_detach(dev);
comedi_pci_detach(dev);
}
static struct comedi_driver labpc_pci_comedi_driver = {
.driver_name = "labpc_pci",
.module = THIS_MODULE,
.auto_attach = labpc_pci_auto_attach,
.detach = labpc_pci_detach,
};
static const struct pci_device_id labpc_pci_table[] = {
{ PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, labpc_pci_table);
static int labpc_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &labpc_pci_comedi_driver,
id->driver_data);
}
static struct pci_driver labpc_pci_driver = {
.name = "labpc_pci",
.id_table = labpc_pci_table,
.probe = labpc_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(labpc_pci_comedi_driver, labpc_pci_driver);
MODULE_DESCRIPTION("Comedi: National Instruments Lab-PC PCI-1200 driver");
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ni_labpc_pci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for NI general purpose counters
*
* Copyright (C) 2006 Frank Mori Hess <[email protected]>
*/
/*
* Module: ni_tio
* Description: National Instruments general purpose counters
* Author: J.P. Mellor <[email protected]>,
* [email protected],
* [email protected],
* [email protected],
* Frank Mori Hess <[email protected]>
* Updated: Thu Nov 16 09:50:32 EST 2006
* Status: works
*
* This module is not used directly by end-users. Rather, it
* is used by other drivers (for example ni_660x and ni_pcimio)
* to provide support for NI's general purpose counters. It was
* originally based on the counter code from ni_660x.c and
* ni_mio_common.c.
*
* References:
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
*
* TODO: Support use of both banks X and Y
*/
#include <linux/module.h>
#include <linux/slab.h>
#include "ni_tio_internal.h"
/*
* clock sources for ni e and m series boards,
* get bits with GI_SRC_SEL()
*/
#define NI_M_TIMEBASE_1_CLK 0x0 /* 20MHz */
#define NI_M_PFI_CLK(x) (((x) < 10) ? (1 + (x)) : (0xb + (x)))
#define NI_M_RTSI_CLK(x) (((x) == 7) ? 0x1b : (0xb + (x)))
#define NI_M_TIMEBASE_2_CLK 0x12 /* 100KHz */
#define NI_M_NEXT_TC_CLK 0x13
#define NI_M_NEXT_GATE_CLK 0x14 /* Gi_Src_SubSelect=0 */
#define NI_M_PXI_STAR_TRIGGER_CLK 0x14 /* Gi_Src_SubSelect=1 */
#define NI_M_PXI10_CLK 0x1d
#define NI_M_TIMEBASE_3_CLK 0x1e /* 80MHz, Gi_Src_SubSelect=0 */
#define NI_M_ANALOG_TRIGGER_OUT_CLK 0x1e /* Gi_Src_SubSelect=1 */
#define NI_M_LOGIC_LOW_CLK 0x1f
#define NI_M_MAX_PFI_CHAN 15
#define NI_M_MAX_RTSI_CHAN 7
/*
* clock sources for ni_660x boards,
* get bits with GI_SRC_SEL()
*/
#define NI_660X_TIMEBASE_1_CLK 0x0 /* 20MHz */
#define NI_660X_SRC_PIN_I_CLK 0x1
#define NI_660X_SRC_PIN_CLK(x) (0x2 + (x))
#define NI_660X_NEXT_GATE_CLK 0xa
#define NI_660X_RTSI_CLK(x) (0xb + (x))
#define NI_660X_TIMEBASE_2_CLK 0x12 /* 100KHz */
#define NI_660X_NEXT_TC_CLK 0x13
#define NI_660X_TIMEBASE_3_CLK 0x1e /* 80MHz */
#define NI_660X_LOGIC_LOW_CLK 0x1f
#define NI_660X_MAX_SRC_PIN 7
#define NI_660X_MAX_RTSI_CHAN 6
/* ni m series gate_select */
#define NI_M_TIMESTAMP_MUX_GATE_SEL 0x0
#define NI_M_PFI_GATE_SEL(x) (((x) < 10) ? (1 + (x)) : (0xb + (x)))
#define NI_M_RTSI_GATE_SEL(x) (((x) == 7) ? 0x1b : (0xb + (x)))
#define NI_M_AI_START2_GATE_SEL 0x12
#define NI_M_PXI_STAR_TRIGGER_GATE_SEL 0x13
#define NI_M_NEXT_OUT_GATE_SEL 0x14
#define NI_M_AI_START1_GATE_SEL 0x1c
#define NI_M_NEXT_SRC_GATE_SEL 0x1d
#define NI_M_ANALOG_TRIG_OUT_GATE_SEL 0x1e
#define NI_M_LOGIC_LOW_GATE_SEL 0x1f
/* ni_660x gate select */
#define NI_660X_SRC_PIN_I_GATE_SEL 0x0
#define NI_660X_GATE_PIN_I_GATE_SEL 0x1
#define NI_660X_PIN_GATE_SEL(x) (0x2 + (x))
#define NI_660X_NEXT_SRC_GATE_SEL 0xa
#define NI_660X_RTSI_GATE_SEL(x) (0xb + (x))
#define NI_660X_NEXT_OUT_GATE_SEL 0x14
#define NI_660X_LOGIC_LOW_GATE_SEL 0x1f
#define NI_660X_MAX_GATE_PIN 7
/* ni_660x second gate select */
#define NI_660X_SRC_PIN_I_GATE2_SEL 0x0
#define NI_660X_UD_PIN_I_GATE2_SEL 0x1
#define NI_660X_UD_PIN_GATE2_SEL(x) (0x2 + (x))
#define NI_660X_NEXT_SRC_GATE2_SEL 0xa
#define NI_660X_RTSI_GATE2_SEL(x) (0xb + (x))
#define NI_660X_NEXT_OUT_GATE2_SEL 0x14
#define NI_660X_SELECTED_GATE2_SEL 0x1e
#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f
#define NI_660X_MAX_UP_DOWN_PIN 7
static inline unsigned int GI_PRESCALE_X2(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
default:
return 0;
case ni_gpct_variant_m_series:
return GI_M_PRESCALE_X2;
case ni_gpct_variant_660x:
return GI_660X_PRESCALE_X2;
}
}
static inline unsigned int GI_PRESCALE_X8(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
default:
return 0;
case ni_gpct_variant_m_series:
return GI_M_PRESCALE_X8;
case ni_gpct_variant_660x:
return GI_660X_PRESCALE_X8;
}
}
static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
{
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
default:
return false;
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
return true;
}
}
/**
* ni_tio_write() - Write a TIO register using the driver provided callback.
* @counter: struct ni_gpct counter.
* @value: the value to write
* @reg: the register to write.
*/
void ni_tio_write(struct ni_gpct *counter, unsigned int value,
enum ni_gpct_register reg)
{
if (reg < NITIO_NUM_REGS)
counter->counter_dev->write(counter, value, reg);
}
EXPORT_SYMBOL_GPL(ni_tio_write);
/**
* ni_tio_read() - Read a TIO register using the driver provided callback.
* @counter: struct ni_gpct counter.
* @reg: the register to read.
*/
unsigned int ni_tio_read(struct ni_gpct *counter, enum ni_gpct_register reg)
{
if (reg < NITIO_NUM_REGS)
return counter->counter_dev->read(counter, reg);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_read);
static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
{
unsigned int cidx = counter->counter_index;
ni_tio_write(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
}
static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
unsigned int generic_clock_source,
u64 *period_ps)
{
u64 clock_period_ps;
switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
clock_period_ps = 50000;
break;
case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
clock_period_ps = 10000000;
break;
case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
clock_period_ps = 12500;
break;
case NI_GPCT_PXI10_CLOCK_SRC_BITS:
clock_period_ps = 100000;
break;
default:
/*
* clock period is specified by user with prescaling
* already taken into account.
*/
*period_ps = counter->clock_period_ps;
return 0;
}
switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
break;
case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
clock_period_ps *= 2;
break;
case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
clock_period_ps *= 8;
break;
default:
return -EINVAL;
}
*period_ps = clock_period_ps;
return 0;
}
static void ni_tio_set_bits_transient(struct ni_gpct *counter,
enum ni_gpct_register reg,
unsigned int mask, unsigned int value,
unsigned int transient)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int chip = counter->chip_index;
unsigned long flags;
if (reg < NITIO_NUM_REGS && chip < counter_dev->num_chips) {
unsigned int *regs = counter_dev->regs[chip];
spin_lock_irqsave(&counter_dev->regs_lock, flags);
regs[reg] &= ~mask;
regs[reg] |= (value & mask);
ni_tio_write(counter, regs[reg] | transient, reg);
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
}
/**
* ni_tio_set_bits() - Safely write a counter register.
* @counter: struct ni_gpct counter.
* @reg: the register to write.
* @mask: the bits to change.
* @value: the new bits value.
*
* Used to write to, and update the software copy, a register whose bits may
* be twiddled in interrupt context, or whose software copy may be read in
* interrupt context.
*/
void ni_tio_set_bits(struct ni_gpct *counter, enum ni_gpct_register reg,
unsigned int mask, unsigned int value)
{
ni_tio_set_bits_transient(counter, reg, mask, value, 0x0);
}
EXPORT_SYMBOL_GPL(ni_tio_set_bits);
/**
* ni_tio_get_soft_copy() - Safely read the software copy of a counter register.
* @counter: struct ni_gpct counter.
* @reg: the register to read.
*
* Used to get the software copy of a register whose bits might be modified
* in interrupt context, or whose software copy might need to be read in
* interrupt context.
*/
unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int chip = counter->chip_index;
unsigned int value = 0;
unsigned long flags;
if (reg < NITIO_NUM_REGS && chip < counter_dev->num_chips) {
spin_lock_irqsave(&counter_dev->regs_lock, flags);
value = counter_dev->regs[chip][reg];
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
return value;
}
EXPORT_SYMBOL_GPL(ni_tio_get_soft_copy);
static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int counting_mode_bits =
ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
unsigned int bits = 0;
if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
GI_SRC_POL_INVERT)
bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
if (counting_mode_bits & GI_PRESCALE_X2(counter_dev->variant))
bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS;
if (counting_mode_bits & GI_PRESCALE_X8(counter_dev->variant))
bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS;
return bits;
}
static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
unsigned int *clk_src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
unsigned int clock_source = 0;
unsigned int src;
unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
switch (src) {
case NI_M_TIMEBASE_1_CLK:
clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
break;
case NI_M_TIMEBASE_2_CLK:
clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
break;
case NI_M_TIMEBASE_3_CLK:
if (counter_dev->regs[chip][second_gate_reg] & GI_SRC_SUBSEL)
clock_source =
NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS;
else
clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
break;
case NI_M_LOGIC_LOW_CLK:
clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
break;
case NI_M_NEXT_GATE_CLK:
if (counter_dev->regs[chip][second_gate_reg] & GI_SRC_SUBSEL)
clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS;
else
clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
break;
case NI_M_PXI10_CLK:
clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS;
break;
case NI_M_NEXT_TC_CLK:
clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
if (src == NI_M_RTSI_CLK(i)) {
clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
break;
}
}
if (i <= NI_M_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
if (src == NI_M_PFI_CLK(i)) {
clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i);
break;
}
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
*clk_src = clock_source;
return 0;
}
static int ni_660x_clock_src_select(const struct ni_gpct *counter,
unsigned int *clk_src)
{
unsigned int clock_source = 0;
unsigned int cidx = counter->counter_index;
unsigned int src;
unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
switch (src) {
case NI_660X_TIMEBASE_1_CLK:
clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
break;
case NI_660X_TIMEBASE_2_CLK:
clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
break;
case NI_660X_TIMEBASE_3_CLK:
clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
break;
case NI_660X_LOGIC_LOW_CLK:
clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
break;
case NI_660X_SRC_PIN_I_CLK:
clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS;
break;
case NI_660X_NEXT_GATE_CLK:
clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
break;
case NI_660X_NEXT_TC_CLK:
clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
if (src == NI_660X_RTSI_CLK(i)) {
clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
break;
}
}
if (i <= NI_660X_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_660X_MAX_SRC_PIN; ++i) {
if (src == NI_660X_SRC_PIN_CLK(i)) {
clock_source =
NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i);
break;
}
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
*clk_src = clock_source;
return 0;
}
static int ni_tio_generic_clock_src_select(const struct ni_gpct *counter,
unsigned int *clk_src)
{
switch (counter->counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
return ni_m_series_clock_src_select(counter, clk_src);
case ni_gpct_variant_660x:
return ni_660x_clock_src_select(counter, clk_src);
}
}
static void ni_tio_set_sync_mode(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
static const u64 min_normal_sync_period_ps = 25000;
unsigned int mask = 0;
unsigned int bits = 0;
unsigned int reg;
unsigned int mode;
unsigned int clk_src = 0;
u64 ps = 0;
int ret;
bool force_alt_sync;
/* only m series and 660x variants have counting mode registers */
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
default:
return;
case ni_gpct_variant_m_series:
mask = GI_M_ALT_SYNC;
break;
case ni_gpct_variant_660x:
mask = GI_660X_ALT_SYNC;
break;
}
reg = NITIO_CNT_MODE_REG(cidx);
mode = ni_tio_get_soft_copy(counter, reg);
switch (mode & GI_CNT_MODE_MASK) {
case GI_CNT_MODE_QUADX1:
case GI_CNT_MODE_QUADX2:
case GI_CNT_MODE_QUADX4:
case GI_CNT_MODE_SYNC_SRC:
force_alt_sync = true;
break;
default:
force_alt_sync = false;
break;
}
ret = ni_tio_generic_clock_src_select(counter, &clk_src);
if (ret)
return;
ret = ni_tio_clock_period_ps(counter, clk_src, &ps);
if (ret)
return;
/*
* It's not clear what we should do if clock_period is unknown, so we
* are not using the alt sync bit in that case.
*/
if (force_alt_sync || (ps && ps < min_normal_sync_period_ps))
bits = mask;
ni_tio_set_bits(counter, reg, mask, bits);
}
static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int mode_reg_mask;
unsigned int mode_reg_values;
unsigned int input_select_bits = 0;
/* these bits map directly on to the mode register */
static const unsigned int mode_reg_direct_mask =
NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT;
mode_reg_mask = mode_reg_direct_mask | GI_RELOAD_SRC_SWITCHING;
mode_reg_values = mode & mode_reg_direct_mask;
switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) {
case NI_GPCT_RELOAD_SOURCE_FIXED_BITS:
break;
case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS:
mode_reg_values |= GI_RELOAD_SRC_SWITCHING;
break;
case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS:
input_select_bits |= GI_GATE_SEL_LOAD_SRC;
mode_reg_mask |= GI_GATING_MODE_MASK;
mode_reg_values |= GI_LEVEL_GATING;
break;
default:
break;
}
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
mode_reg_mask, mode_reg_values);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
unsigned int bits = 0;
bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT);
bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT));
if (mode & NI_GPCT_INDEX_ENABLE_BIT)
bits |= GI_INDEX_MODE;
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK |
GI_INDEX_MODE, bits);
ni_tio_set_sync_mode(counter);
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK,
GI_CNT_DIR(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT));
if (mode & NI_GPCT_OR_GATE_BIT)
input_select_bits |= GI_OR_GATE;
if (mode & NI_GPCT_INVERT_OUTPUT_BIT)
input_select_bits |= GI_OUTPUT_POL_INVERT;
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
GI_GATE_SEL_LOAD_SRC | GI_OR_GATE |
GI_OUTPUT_POL_INVERT, input_select_bits);
return 0;
}
int ni_tio_arm(struct ni_gpct *counter, bool arm, unsigned int start_trigger)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int transient_bits = 0;
if (arm) {
unsigned int mask = 0;
unsigned int bits = 0;
/* only m series and 660x have counting mode registers */
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
default:
break;
case ni_gpct_variant_m_series:
mask = GI_M_HW_ARM_SEL_MASK;
break;
case ni_gpct_variant_660x:
mask = GI_660X_HW_ARM_SEL_MASK;
break;
}
switch (start_trigger) {
case NI_GPCT_ARM_IMMEDIATE:
transient_bits |= GI_ARM;
break;
case NI_GPCT_ARM_PAIRED_IMMEDIATE:
transient_bits |= GI_ARM | GI_ARM_COPY;
break;
default:
/*
* for m series and 660x, pass-through the least
* significant bits so we can figure out what select
* later
*/
if (mask && (start_trigger & NI_GPCT_ARM_UNKNOWN)) {
bits |= GI_HW_ARM_ENA |
(GI_HW_ARM_SEL(start_trigger) & mask);
} else {
return -EINVAL;
}
break;
}
if (mask)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
GI_HW_ARM_ENA | mask, bits);
} else {
transient_bits |= GI_DISARM;
}
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, transient_bits);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_arm);
static int ni_660x_clk_src(unsigned int clock_source, unsigned int *bits)
{
unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
unsigned int ni_660x_clock;
unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_TIMEBASE_1_CLK;
break;
case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_TIMEBASE_2_CLK;
break;
case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_TIMEBASE_3_CLK;
break;
case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_LOGIC_LOW_CLK;
break;
case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_SRC_PIN_I_CLK;
break;
case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_NEXT_GATE_CLK;
break;
case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
ni_660x_clock = NI_660X_NEXT_TC_CLK;
break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
if (clk_src == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
ni_660x_clock = NI_660X_RTSI_CLK(i);
break;
}
}
if (i <= NI_660X_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_660X_MAX_SRC_PIN; ++i) {
if (clk_src == NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) {
ni_660x_clock = NI_660X_SRC_PIN_CLK(i);
break;
}
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
return -EINVAL;
}
*bits = GI_SRC_SEL(ni_660x_clock);
return 0;
}
static int ni_m_clk_src(unsigned int clock_source, unsigned int *bits)
{
unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
unsigned int ni_m_series_clock;
unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_TIMEBASE_1_CLK;
break;
case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_TIMEBASE_2_CLK;
break;
case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_TIMEBASE_3_CLK;
break;
case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_LOGIC_LOW_CLK;
break;
case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_NEXT_GATE_CLK;
break;
case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_NEXT_TC_CLK;
break;
case NI_GPCT_PXI10_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_PXI10_CLK;
break;
case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_PXI_STAR_TRIGGER_CLK;
break;
case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
ni_m_series_clock = NI_M_ANALOG_TRIGGER_OUT_CLK;
break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
if (clk_src == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
ni_m_series_clock = NI_M_RTSI_CLK(i);
break;
}
}
if (i <= NI_M_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
if (clk_src == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) {
ni_m_series_clock = NI_M_PFI_CLK(i);
break;
}
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
return -EINVAL;
}
*bits = GI_SRC_SEL(ni_m_series_clock);
return 0;
};
static void ni_tio_set_source_subselect(struct ni_gpct *counter,
unsigned int clock_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
return;
switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
/* Gi_Source_Subselect is zero */
case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
counter_dev->regs[chip][second_gate_reg] &= ~GI_SRC_SUBSEL;
break;
/* Gi_Source_Subselect is one */
case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
counter_dev->regs[chip][second_gate_reg] |= GI_SRC_SUBSEL;
break;
/* Gi_Source_Subselect doesn't matter */
default:
return;
}
ni_tio_write(counter, counter_dev->regs[chip][second_gate_reg],
second_gate_reg);
}
static int ni_tio_set_clock_src(struct ni_gpct *counter,
unsigned int clock_source,
unsigned int period_ns)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int bits = 0;
int ret;
switch (counter_dev->variant) {
case ni_gpct_variant_660x:
ret = ni_660x_clk_src(clock_source, &bits);
break;
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
ret = ni_m_clk_src(clock_source, &bits);
break;
}
if (ret) {
struct comedi_device *dev = counter_dev->dev;
dev_err(dev->class_dev, "invalid clock source 0x%x\n",
clock_source);
return ret;
}
if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
bits |= GI_SRC_POL_INVERT;
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
GI_SRC_SEL_MASK | GI_SRC_POL_INVERT, bits);
ni_tio_set_source_subselect(counter, clock_source);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
bits = 0;
switch (clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
break;
case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
bits |= GI_PRESCALE_X2(counter_dev->variant);
break;
case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
bits |= GI_PRESCALE_X8(counter_dev->variant);
break;
default:
return -EINVAL;
}
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
GI_PRESCALE_X2(counter_dev->variant) |
GI_PRESCALE_X8(counter_dev->variant), bits);
}
counter->clock_period_ps = period_ns * 1000;
ni_tio_set_sync_mode(counter);
return 0;
}
static int ni_tio_get_clock_src(struct ni_gpct *counter,
unsigned int *clock_source,
unsigned int *period_ns)
{
u64 temp64 = 0;
int ret;
ret = ni_tio_generic_clock_src_select(counter, clock_source);
if (ret)
return ret;
ret = ni_tio_clock_period_ps(counter, *clock_source, &temp64);
if (ret)
return ret;
do_div(temp64, 1000); /* ps to ns */
*period_ns = temp64;
return 0;
}
static inline void ni_tio_set_gate_raw(struct ni_gpct *counter,
unsigned int gate_source)
{
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(counter->counter_index),
GI_GATE_SEL_MASK, GI_GATE_SEL(gate_source));
}
static inline void ni_tio_set_gate2_raw(struct ni_gpct *counter,
unsigned int gate_source)
{
ni_tio_set_bits(counter, NITIO_GATE2_REG(counter->counter_index),
GI_GATE2_SEL_MASK, GI_GATE2_SEL(gate_source));
}
/* Set the mode bits for gate. */
static inline void ni_tio_set_gate_mode(struct ni_gpct *counter,
unsigned int src)
{
unsigned int mode_bits = 0;
if (CR_CHAN(src) & NI_GPCT_DISABLED_GATE_SELECT) {
/*
* Allowing bitwise comparison here to allow non-zero raw
* register value to be used for channel when disabling.
*/
mode_bits = GI_GATING_DISABLED;
} else {
if (src & CR_INVERT)
mode_bits |= GI_GATE_POL_INVERT;
if (src & CR_EDGE)
mode_bits |= GI_RISING_EDGE_GATING;
else
mode_bits |= GI_LEVEL_GATING;
}
ni_tio_set_bits(counter, NITIO_MODE_REG(counter->counter_index),
GI_GATE_POL_INVERT | GI_GATING_MODE_MASK,
mode_bits);
}
/*
* Set the mode bits for gate2.
*
* Previously, the code this function represents did not actually write anything
* to the register. Rather, writing to this register was reserved for the code
* ni ni_tio_set_gate2_raw.
*/
static inline void ni_tio_set_gate2_mode(struct ni_gpct *counter,
unsigned int src)
{
/*
* The GI_GATE2_MODE bit was previously set in the code that also sets
* the gate2 source.
* We'll set mode bits _after_ source bits now, and thus, this function
* will effectively enable the second gate after all bits are set.
*/
unsigned int mode_bits = GI_GATE2_MODE;
if (CR_CHAN(src) & NI_GPCT_DISABLED_GATE_SELECT)
/*
* Allowing bitwise comparison here to allow non-zero raw
* register value to be used for channel when disabling.
*/
mode_bits = GI_GATING_DISABLED;
if (src & CR_INVERT)
mode_bits |= GI_GATE2_POL_INVERT;
ni_tio_set_bits(counter, NITIO_GATE2_REG(counter->counter_index),
GI_GATE2_POL_INVERT | GI_GATE2_MODE, mode_bits);
}
static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
unsigned int gate_sel;
unsigned int i;
switch (chan) {
case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
gate_sel = NI_660X_NEXT_SRC_GATE_SEL;
break;
case NI_GPCT_NEXT_OUT_GATE_SELECT:
case NI_GPCT_LOGIC_LOW_GATE_SELECT:
case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
case NI_GPCT_GATE_PIN_i_GATE_SELECT:
gate_sel = chan & 0x1f;
break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
if (chan == NI_GPCT_RTSI_GATE_SELECT(i)) {
gate_sel = chan & 0x1f;
break;
}
}
if (i <= NI_660X_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
if (chan == NI_GPCT_GATE_PIN_GATE_SELECT(i)) {
gate_sel = chan & 0x1f;
break;
}
}
if (i <= NI_660X_MAX_GATE_PIN)
break;
return -EINVAL;
}
ni_tio_set_gate_raw(counter, gate_sel);
return 0;
}
static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
unsigned int gate_sel;
unsigned int i;
switch (chan) {
case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
case NI_GPCT_AI_START2_GATE_SELECT:
case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT:
case NI_GPCT_NEXT_OUT_GATE_SELECT:
case NI_GPCT_AI_START1_GATE_SELECT:
case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT:
case NI_GPCT_LOGIC_LOW_GATE_SELECT:
gate_sel = chan & 0x1f;
break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
if (chan == NI_GPCT_RTSI_GATE_SELECT(i)) {
gate_sel = chan & 0x1f;
break;
}
}
if (i <= NI_M_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
if (chan == NI_GPCT_PFI_GATE_SELECT(i)) {
gate_sel = chan & 0x1f;
break;
}
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
return -EINVAL;
}
ni_tio_set_gate_raw(counter, gate_sel);
return 0;
}
static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
unsigned int gate2_sel;
unsigned int i;
switch (chan) {
case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT:
case NI_GPCT_SELECTED_GATE_GATE_SELECT:
case NI_GPCT_NEXT_OUT_GATE_SELECT:
case NI_GPCT_LOGIC_LOW_GATE_SELECT:
gate2_sel = chan & 0x1f;
break;
case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
gate2_sel = NI_660X_NEXT_SRC_GATE2_SEL;
break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
if (chan == NI_GPCT_RTSI_GATE_SELECT(i)) {
gate2_sel = chan & 0x1f;
break;
}
}
if (i <= NI_660X_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
if (chan == NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) {
gate2_sel = chan & 0x1f;
break;
}
}
if (i <= NI_660X_MAX_UP_DOWN_PIN)
break;
return -EINVAL;
}
ni_tio_set_gate2_raw(counter, gate2_sel);
return 0;
}
static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
/*
* FIXME: We don't know what the m-series second gate codes are,
* so we'll just pass the bits through for now.
*/
ni_tio_set_gate2_raw(counter, gate_source);
return 0;
}
int ni_tio_set_gate_src_raw(struct ni_gpct *counter,
unsigned int gate, unsigned int src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
switch (gate) {
case 0:
/* 1. start by disabling gate */
ni_tio_set_gate_mode(counter, NI_GPCT_DISABLED_GATE_SELECT);
/* 2. set the requested gate source */
ni_tio_set_gate_raw(counter, src);
/* 3. reenable & set mode to starts things back up */
ni_tio_set_gate_mode(counter, src);
break;
case 1:
if (!ni_tio_has_gate2_registers(counter_dev))
return -EINVAL;
/* 1. start by disabling gate */
ni_tio_set_gate2_mode(counter, NI_GPCT_DISABLED_GATE_SELECT);
/* 2. set the requested gate source */
ni_tio_set_gate2_raw(counter, src);
/* 3. reenable & set mode to starts things back up */
ni_tio_set_gate2_mode(counter, src);
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_set_gate_src_raw);
int ni_tio_set_gate_src(struct ni_gpct *counter,
unsigned int gate, unsigned int src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
/*
* mask off disable flag. This high bit still passes CR_CHAN.
* Doing this allows one to both set the gate as disabled, but also
* change the route value of the gate.
*/
int chan = CR_CHAN(src) & (~NI_GPCT_DISABLED_GATE_SELECT);
int ret;
switch (gate) {
case 0:
/* 1. start by disabling gate */
ni_tio_set_gate_mode(counter, NI_GPCT_DISABLED_GATE_SELECT);
/* 2. set the requested gate source */
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
ret = ni_m_set_gate(counter, chan);
break;
case ni_gpct_variant_660x:
ret = ni_660x_set_gate(counter, chan);
break;
default:
return -EINVAL;
}
if (ret)
return ret;
/* 3. reenable & set mode to starts things back up */
ni_tio_set_gate_mode(counter, src);
break;
case 1:
if (!ni_tio_has_gate2_registers(counter_dev))
return -EINVAL;
/* 1. start by disabling gate */
ni_tio_set_gate2_mode(counter, NI_GPCT_DISABLED_GATE_SELECT);
/* 2. set the requested gate source */
switch (counter_dev->variant) {
case ni_gpct_variant_m_series:
ret = ni_m_set_gate2(counter, chan);
break;
case ni_gpct_variant_660x:
ret = ni_660x_set_gate2(counter, chan);
break;
default:
return -EINVAL;
}
if (ret)
return ret;
/* 3. reenable & set mode to starts things back up */
ni_tio_set_gate2_mode(counter, src);
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
unsigned int source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
unsigned int abz_reg, shift, mask;
if (counter_dev->variant != ni_gpct_variant_m_series)
return -EINVAL;
abz_reg = NITIO_ABZ_REG(cidx);
/* allow for new device-global names */
if (index == NI_GPCT_SOURCE_ENCODER_A ||
(index >= NI_CtrA(0) && index <= NI_CtrA(-1))) {
shift = 10;
} else if (index == NI_GPCT_SOURCE_ENCODER_B ||
(index >= NI_CtrB(0) && index <= NI_CtrB(-1))) {
shift = 5;
} else if (index == NI_GPCT_SOURCE_ENCODER_Z ||
(index >= NI_CtrZ(0) && index <= NI_CtrZ(-1))) {
shift = 0;
} else {
return -EINVAL;
}
mask = 0x1f << shift;
if (source > 0x1f)
source = 0x1f; /* Disable gate */
counter_dev->regs[chip][abz_reg] &= ~mask;
counter_dev->regs[chip][abz_reg] |= (source << shift) & mask;
ni_tio_write(counter, counter_dev->regs[chip][abz_reg], abz_reg);
return 0;
}
static int ni_tio_get_other_src(struct ni_gpct *counter, unsigned int index,
unsigned int *source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int abz_reg, shift, mask;
if (counter_dev->variant != ni_gpct_variant_m_series)
/* A,B,Z only valid for m-series */
return -EINVAL;
abz_reg = NITIO_ABZ_REG(cidx);
/* allow for new device-global names */
if (index == NI_GPCT_SOURCE_ENCODER_A ||
(index >= NI_CtrA(0) && index <= NI_CtrA(-1))) {
shift = 10;
} else if (index == NI_GPCT_SOURCE_ENCODER_B ||
(index >= NI_CtrB(0) && index <= NI_CtrB(-1))) {
shift = 5;
} else if (index == NI_GPCT_SOURCE_ENCODER_Z ||
(index >= NI_CtrZ(0) && index <= NI_CtrZ(-1))) {
shift = 0;
} else {
return -EINVAL;
}
mask = 0x1f;
*source = (ni_tio_get_soft_copy(counter, abz_reg) >> shift) & mask;
return 0;
}
static int ni_660x_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
unsigned int source;
unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE_SEL:
source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
break;
case NI_660X_GATE_PIN_I_GATE_SEL:
source = NI_GPCT_GATE_PIN_i_GATE_SELECT;
break;
case NI_660X_NEXT_SRC_GATE_SEL:
source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
break;
case NI_660X_NEXT_OUT_GATE_SEL:
source = NI_GPCT_NEXT_OUT_GATE_SELECT;
break;
case NI_660X_LOGIC_LOW_GATE_SEL:
source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
if (gate == NI_660X_RTSI_GATE_SEL(i)) {
source = NI_GPCT_RTSI_GATE_SELECT(i);
break;
}
}
if (i <= NI_660X_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
if (gate == NI_660X_PIN_GATE_SEL(i)) {
source = NI_GPCT_GATE_PIN_GATE_SELECT(i);
break;
}
}
if (i <= NI_660X_MAX_GATE_PIN)
break;
return -EINVAL;
}
*src = source;
return 0;
}
static int ni_m_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
unsigned int source;
unsigned int i;
switch (gate) {
case NI_M_TIMESTAMP_MUX_GATE_SEL:
source = NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
break;
case NI_M_AI_START2_GATE_SEL:
source = NI_GPCT_AI_START2_GATE_SELECT;
break;
case NI_M_PXI_STAR_TRIGGER_GATE_SEL:
source = NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
break;
case NI_M_NEXT_OUT_GATE_SEL:
source = NI_GPCT_NEXT_OUT_GATE_SELECT;
break;
case NI_M_AI_START1_GATE_SEL:
source = NI_GPCT_AI_START1_GATE_SELECT;
break;
case NI_M_NEXT_SRC_GATE_SEL:
source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
break;
case NI_M_ANALOG_TRIG_OUT_GATE_SEL:
source = NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
break;
case NI_M_LOGIC_LOW_GATE_SEL:
source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
if (gate == NI_M_RTSI_GATE_SEL(i)) {
source = NI_GPCT_RTSI_GATE_SELECT(i);
break;
}
}
if (i <= NI_M_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
if (gate == NI_M_PFI_GATE_SEL(i)) {
source = NI_GPCT_PFI_GATE_SELECT(i);
break;
}
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
return -EINVAL;
}
*src = source;
return 0;
}
static int ni_660x_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
unsigned int source;
unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE2_SEL:
source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
break;
case NI_660X_UD_PIN_I_GATE2_SEL:
source = NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
break;
case NI_660X_NEXT_SRC_GATE2_SEL:
source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
break;
case NI_660X_NEXT_OUT_GATE2_SEL:
source = NI_GPCT_NEXT_OUT_GATE_SELECT;
break;
case NI_660X_SELECTED_GATE2_SEL:
source = NI_GPCT_SELECTED_GATE_GATE_SELECT;
break;
case NI_660X_LOGIC_LOW_GATE2_SEL:
source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
if (gate == NI_660X_RTSI_GATE2_SEL(i)) {
source = NI_GPCT_RTSI_GATE_SELECT(i);
break;
}
}
if (i <= NI_660X_MAX_RTSI_CHAN)
break;
for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
if (gate == NI_660X_UD_PIN_GATE2_SEL(i)) {
source = NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
break;
}
}
if (i <= NI_660X_MAX_UP_DOWN_PIN)
break;
return -EINVAL;
}
*src = source;
return 0;
}
static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
/*
* FIXME: the second gate sources for the m series are undocumented,
* so we just return the raw bits for now.
*/
*src = gate;
return 0;
}
static inline unsigned int ni_tio_get_gate_mode(struct ni_gpct *counter)
{
unsigned int mode = ni_tio_get_soft_copy(counter,
NITIO_MODE_REG(counter->counter_index));
unsigned int ret = 0;
if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED)
ret |= NI_GPCT_DISABLED_GATE_SELECT;
if (mode & GI_GATE_POL_INVERT)
ret |= CR_INVERT;
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
ret |= CR_EDGE;
return ret;
}
static inline unsigned int ni_tio_get_gate2_mode(struct ni_gpct *counter)
{
unsigned int mode = ni_tio_get_soft_copy(counter,
NITIO_GATE2_REG(counter->counter_index));
unsigned int ret = 0;
if (!(mode & GI_GATE2_MODE))
ret |= NI_GPCT_DISABLED_GATE_SELECT;
if (mode & GI_GATE2_POL_INVERT)
ret |= CR_INVERT;
return ret;
}
static inline unsigned int ni_tio_get_gate_val(struct ni_gpct *counter)
{
return GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(counter->counter_index)));
}
static inline unsigned int ni_tio_get_gate2_val(struct ni_gpct *counter)
{
return GI_BITS_TO_GATE2(ni_tio_get_soft_copy(counter,
NITIO_GATE2_REG(counter->counter_index)));
}
static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned int gate_index,
unsigned int *gate_source)
{
unsigned int gate;
int ret;
switch (gate_index) {
case 0:
gate = ni_tio_get_gate_val(counter);
switch (counter->counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
ret = ni_m_gate_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
ret = ni_660x_gate_to_generic_gate(gate, gate_source);
break;
}
if (ret)
return ret;
*gate_source |= ni_tio_get_gate_mode(counter);
break;
case 1:
gate = ni_tio_get_gate2_val(counter);
switch (counter->counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
ret = ni_m_gate2_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
ret = ni_660x_gate2_to_generic_gate(gate, gate_source);
break;
}
if (ret)
return ret;
*gate_source |= ni_tio_get_gate2_mode(counter);
break;
default:
return -EINVAL;
}
return 0;
}
static int ni_tio_get_gate_src_raw(struct ni_gpct *counter,
unsigned int gate_index,
unsigned int *gate_source)
{
switch (gate_index) {
case 0:
*gate_source = ni_tio_get_gate_mode(counter)
| ni_tio_get_gate_val(counter);
break;
case 1:
*gate_source = ni_tio_get_gate2_mode(counter)
| ni_tio_get_gate2_val(counter);
break;
default:
return -EINVAL;
}
return 0;
}
int ni_tio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct ni_gpct *counter = s->private;
unsigned int cidx = counter->counter_index;
unsigned int status;
int ret = 0;
switch (data[0]) {
case INSN_CONFIG_SET_COUNTER_MODE:
ret = ni_tio_set_counter_mode(counter, data[1]);
break;
case INSN_CONFIG_ARM:
ret = ni_tio_arm(counter, true, data[1]);
break;
case INSN_CONFIG_DISARM:
ret = ni_tio_arm(counter, false, 0);
break;
case INSN_CONFIG_GET_COUNTER_STATUS:
data[1] = 0;
status = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
if (status & GI_ARMED(cidx)) {
data[1] |= COMEDI_COUNTER_ARMED;
if (status & GI_COUNTING(cidx))
data[1] |= COMEDI_COUNTER_COUNTING;
}
data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
break;
case INSN_CONFIG_SET_CLOCK_SRC:
ret = ni_tio_set_clock_src(counter, data[1], data[2]);
break;
case INSN_CONFIG_GET_CLOCK_SRC:
ret = ni_tio_get_clock_src(counter, &data[1], &data[2]);
break;
case INSN_CONFIG_SET_GATE_SRC:
ret = ni_tio_set_gate_src(counter, data[1], data[2]);
break;
case INSN_CONFIG_GET_GATE_SRC:
ret = ni_tio_get_gate_src(counter, data[1], &data[2]);
break;
case INSN_CONFIG_SET_OTHER_SRC:
ret = ni_tio_set_other_src(counter, data[1], data[2]);
break;
case INSN_CONFIG_RESET:
ni_tio_reset_count_and_disarm(counter);
break;
default:
return -EINVAL;
}
return ret ? ret : insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
/*
* Retrieves the register value of the current source of the output selector for
* the given destination.
*
* If the terminal for the destination is not already configured as an output,
* this function returns -EINVAL as error.
*
* Return: the register value of the destination output selector;
* -EINVAL if terminal is not configured for output.
*/
int ni_tio_get_routing(struct ni_gpct_device *counter_dev, unsigned int dest)
{
/* we need to know the actual counter below... */
int ctr_index = (dest - NI_COUNTER_NAMES_BASE) % NI_MAX_COUNTERS;
struct ni_gpct *counter = &counter_dev->counters[ctr_index];
int ret = 1;
unsigned int reg;
if (dest >= NI_CtrA(0) && dest <= NI_CtrZ(-1)) {
ret = ni_tio_get_other_src(counter, dest, ®);
} else if (dest >= NI_CtrGate(0) && dest <= NI_CtrGate(-1)) {
ret = ni_tio_get_gate_src_raw(counter, 0, ®);
} else if (dest >= NI_CtrAux(0) && dest <= NI_CtrAux(-1)) {
ret = ni_tio_get_gate_src_raw(counter, 1, ®);
/*
* This case is not possible through this interface. A user must use
* INSN_CONFIG_SET_CLOCK_SRC instead.
* } else if (dest >= NI_CtrSource(0) && dest <= NI_CtrSource(-1)) {
* ret = ni_tio_set_clock_src(counter, ®, &period_ns);
*/
}
if (ret)
return -EINVAL;
return reg;
}
EXPORT_SYMBOL_GPL(ni_tio_get_routing);
/**
* ni_tio_set_routing() - Sets the register value of the selector MUX for the given destination.
* @counter_dev: Pointer to general counter device.
* @dest: Device-global identifier of route destination.
* @reg:
* The first several bits of this value should store the desired
* value to write to the register. All other bits are for
* transmitting information that modify the mode of the particular
* destination/gate. These mode bits might include a bitwise or of
* CR_INVERT and CR_EDGE. Note that the calling function should
* have already validated the correctness of this value.
*/
int ni_tio_set_routing(struct ni_gpct_device *counter_dev, unsigned int dest,
unsigned int reg)
{
/* we need to know the actual counter below... */
int ctr_index = (dest - NI_COUNTER_NAMES_BASE) % NI_MAX_COUNTERS;
struct ni_gpct *counter = &counter_dev->counters[ctr_index];
int ret;
if (dest >= NI_CtrA(0) && dest <= NI_CtrZ(-1)) {
ret = ni_tio_set_other_src(counter, dest, reg);
} else if (dest >= NI_CtrGate(0) && dest <= NI_CtrGate(-1)) {
ret = ni_tio_set_gate_src_raw(counter, 0, reg);
} else if (dest >= NI_CtrAux(0) && dest <= NI_CtrAux(-1)) {
ret = ni_tio_set_gate_src_raw(counter, 1, reg);
/*
* This case is not possible through this interface. A user must use
* INSN_CONFIG_SET_CLOCK_SRC instead.
* } else if (dest >= NI_CtrSource(0) && dest <= NI_CtrSource(-1)) {
* ret = ni_tio_set_clock_src(counter, reg, period_ns);
*/
} else {
return -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(ni_tio_set_routing);
/*
* Sets the given destination MUX to its default value or disable it.
*
* Return: 0 if successful; -EINVAL if terminal is unknown.
*/
int ni_tio_unset_routing(struct ni_gpct_device *counter_dev, unsigned int dest)
{
if (dest >= NI_GATES_NAMES_BASE && dest <= NI_GATES_NAMES_MAX)
/* Disable gate (via mode bits) and set to default 0-value */
return ni_tio_set_routing(counter_dev, dest,
NI_GPCT_DISABLED_GATE_SELECT);
/*
* This case is not possible through this interface. A user must use
* INSN_CONFIG_SET_CLOCK_SRC instead.
* if (dest >= NI_CtrSource(0) && dest <= NI_CtrSource(-1))
* return ni_tio_set_clock_src(counter, reg, period_ns);
*/
return -EINVAL;
}
EXPORT_SYMBOL_GPL(ni_tio_unset_routing);
static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
unsigned int cidx = counter->counter_index;
unsigned int val;
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
GI_SAVE_TRACE, GI_SAVE_TRACE);
/*
* The count doesn't get latched until the next clock edge, so it is
* possible the count may change (once) while we are reading. Since
* the read of the SW_Save_Reg isn't atomic (apparently even when it's
* a 32 bit register according to 660x docs), we need to read twice
* and make sure the reading hasn't changed. If it has, a third read
* will be correct since the count value will definitely have latched
* by then.
*/
val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
if (val != ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx)))
val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
return val;
}
int ni_tio_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
int i;
for (i = 0; i < insn->n; i++) {
switch (channel) {
case 0:
data[i] = ni_tio_read_sw_save_reg(dev, s);
break;
case 1:
data[i] =
counter_dev->regs[chip][NITIO_LOADA_REG(cidx)];
break;
case 2:
data[i] =
counter_dev->regs[chip][NITIO_LOADB_REG(cidx)];
break;
}
}
return insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_read);
static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
{
unsigned int cidx = counter->counter_index;
unsigned int bits = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
return (bits & GI_NEXT_LOAD_SRC(cidx))
? NITIO_LOADB_REG(cidx)
: NITIO_LOADA_REG(cidx);
}
int ni_tio_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
unsigned int load_reg;
unsigned int load_val;
if (insn->n < 1)
return 0;
load_val = data[insn->n - 1];
switch (channel) {
case 0:
/*
* Unsafe if counter is armed.
* Should probably check status and return -EBUSY if armed.
*/
/*
* Don't disturb load source select, just use whichever
* load register is already selected.
*/
load_reg = ni_tio_next_load_register(counter);
ni_tio_write(counter, load_val, load_reg);
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, GI_LOAD);
/* restore load reg */
ni_tio_write(counter, counter_dev->regs[chip][load_reg],
load_reg);
break;
case 1:
counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = load_val;
ni_tio_write(counter, load_val, NITIO_LOADA_REG(cidx));
break;
case 2:
counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = load_val;
ni_tio_write(counter, load_val, NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
}
return insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_write);
void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
unsigned int chip = counter->chip_index;
ni_tio_reset_count_and_disarm(counter);
/* initialize counter registers */
counter_dev->regs[chip][NITIO_AUTO_INC_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
~0, GI_SYNC_GATE);
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx));
counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx));
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
if (ni_tio_counting_mode_registers_present(counter_dev))
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0);
if (ni_tio_has_gate2_registers(counter_dev)) {
counter_dev->regs[chip][NITIO_GATE2_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx));
}
ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), ~0, 0x0);
}
EXPORT_SYMBOL_GPL(ni_tio_init_counter);
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *dev,
void (*write)(struct ni_gpct *counter,
unsigned int value,
enum ni_gpct_register reg),
unsigned int (*read)(struct ni_gpct *counter,
enum ni_gpct_register reg),
enum ni_gpct_variant variant,
unsigned int num_counters,
unsigned int counters_per_chip,
const struct ni_route_tables *routing_tables)
{
struct ni_gpct_device *counter_dev;
struct ni_gpct *counter;
unsigned int i;
if (num_counters == 0 || counters_per_chip == 0)
return NULL;
counter_dev = kzalloc(sizeof(*counter_dev), GFP_KERNEL);
if (!counter_dev)
return NULL;
counter_dev->dev = dev;
counter_dev->write = write;
counter_dev->read = read;
counter_dev->variant = variant;
counter_dev->routing_tables = routing_tables;
spin_lock_init(&counter_dev->regs_lock);
counter_dev->num_counters = num_counters;
counter_dev->num_chips = DIV_ROUND_UP(num_counters, counters_per_chip);
counter_dev->counters = kcalloc(num_counters, sizeof(*counter),
GFP_KERNEL);
counter_dev->regs = kcalloc(counter_dev->num_chips,
sizeof(*counter_dev->regs), GFP_KERNEL);
if (!counter_dev->regs || !counter_dev->counters) {
kfree(counter_dev->regs);
kfree(counter_dev->counters);
kfree(counter_dev);
return NULL;
}
for (i = 0; i < num_counters; ++i) {
counter = &counter_dev->counters[i];
counter->counter_dev = counter_dev;
counter->chip_index = i / counters_per_chip;
counter->counter_index = i % counters_per_chip;
spin_lock_init(&counter->lock);
}
return counter_dev;
}
EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
{
if (!counter_dev)
return;
kfree(counter_dev->regs);
kfree(counter_dev->counters);
kfree(counter_dev);
}
EXPORT_SYMBOL_GPL(ni_gpct_device_destroy);
static int __init ni_tio_init_module(void)
{
return 0;
}
module_init(ni_tio_init_module);
static void __exit ni_tio_cleanup_module(void)
{
}
module_exit(ni_tio_cleanup_module);
MODULE_AUTHOR("Comedi <[email protected]>");
MODULE_DESCRIPTION("Comedi support for NI general-purpose counters");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ni_tio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aio_iiro_16.c
* Comedi driver for Access I/O Products 104-IIRO-16 board
* Copyright (C) 2006 C&C Technologies, Inc.
*/
/*
* Driver: aio_iiro_16
* Description: Access I/O Products PC/104 Isolated Input/Relay Output Board
* Author: Zachary Ware <[email protected]>
* Devices: [Access I/O] 104-IIRO-16 (aio_iiro_16)
* Status: experimental
*
* Configuration Options:
* [0] - I/O port base address
* [1] - IRQ (optional)
*
* The board supports interrupts on change of state of the digital inputs.
* The sample data returned by the async command indicates which inputs
* changed state and the current state of the inputs:
*
* Bit 23 - IRQ Enable (1) / Disable (0)
* Bit 17 - Input 8-15 Changed State (1 = Changed, 0 = No Change)
* Bit 16 - Input 0-7 Changed State (1 = Changed, 0 = No Change)
* Bit 15 - Digital input 15
* ...
* Bit 0 - Digital input 0
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
#define AIO_IIRO_16_RELAY_0_7 0x00
#define AIO_IIRO_16_INPUT_0_7 0x01
#define AIO_IIRO_16_IRQ 0x02
#define AIO_IIRO_16_RELAY_8_15 0x04
#define AIO_IIRO_16_INPUT_8_15 0x05
#define AIO_IIRO_16_STATUS 0x07
#define AIO_IIRO_16_STATUS_IRQE BIT(7)
#define AIO_IIRO_16_STATUS_INPUT_8_15 BIT(1)
#define AIO_IIRO_16_STATUS_INPUT_0_7 BIT(0)
static unsigned int aio_iiro_16_read_inputs(struct comedi_device *dev)
{
unsigned int val;
val = inb(dev->iobase + AIO_IIRO_16_INPUT_0_7);
val |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8;
return val;
}
static irqreturn_t aio_iiro_16_cos(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
unsigned int status;
unsigned int val;
status = inb(dev->iobase + AIO_IIRO_16_STATUS);
if (!(status & AIO_IIRO_16_STATUS_IRQE))
return IRQ_NONE;
val = aio_iiro_16_read_inputs(dev);
val |= (status << 16);
comedi_buf_write_samples(s, &val, 1);
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static void aio_iiro_enable_irq(struct comedi_device *dev, bool enable)
{
if (enable)
inb(dev->iobase + AIO_IIRO_16_IRQ);
else
outb(0, dev->iobase + AIO_IIRO_16_IRQ);
}
static int aio_iiro_16_cos_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
aio_iiro_enable_irq(dev, false);
return 0;
}
static int aio_iiro_16_cos_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
aio_iiro_enable_irq(dev, true);
return 0;
}
static int aio_iiro_16_cos_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* Step 4: fix up any arguments */
/* Step 5: check channel list if it exists */
return 0;
}
static int aio_iiro_16_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7);
outb((s->state >> 8) & 0xff,
dev->iobase + AIO_IIRO_16_RELAY_8_15);
}
data[1] = s->state;
return insn->n;
}
static int aio_iiro_16_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = aio_iiro_16_read_inputs(dev);
return insn->n;
}
static int aio_iiro_16_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x8);
if (ret)
return ret;
aio_iiro_enable_irq(dev, false);
/*
* Digital input change of state interrupts are optionally supported
* using IRQ 2-7, 10-12, 14, or 15.
*/
if ((1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
/* Digital Output subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = aio_iiro_16_do_insn_bits;
/* get the initial state of the relays */
s->state = inb(dev->iobase + AIO_IIRO_16_RELAY_0_7) |
(inb(dev->iobase + AIO_IIRO_16_RELAY_8_15) << 8);
/* Digital Input subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = aio_iiro_16_di_insn_bits;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL;
s->len_chanlist = 1;
s->do_cmdtest = aio_iiro_16_cos_cmdtest;
s->do_cmd = aio_iiro_16_cos_cmd;
s->cancel = aio_iiro_16_cos_cancel;
}
return 0;
}
static struct comedi_driver aio_iiro_16_driver = {
.driver_name = "aio_iiro_16",
.module = THIS_MODULE,
.attach = aio_iiro_16_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(aio_iiro_16_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for Access I/O Products 104-IIRO-16 board");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/aio_iiro_16.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/s626.c
* Sensoray s626 Comedi driver
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*
* Based on Sensoray Model 626 Linux driver Version 0.2
* Copyright (C) 2002-2004 Sensoray Co., Inc.
*/
/*
* Driver: s626
* Description: Sensoray 626 driver
* Devices: [Sensoray] 626 (s626)
* Authors: Gianluca Palli <[email protected]>,
* Updated: Fri, 15 Feb 2008 10:28:42 +0000
* Status: experimental
* Configuration options: not applicable, uses PCI auto config
* INSN_CONFIG instructions:
* analog input:
* none
*
* analog output:
* none
*
* digital channel:
* s626 has 3 dio subdevices (2,3 and 4) each with 16 i/o channels
* supported configuration options:
* INSN_CONFIG_DIO_QUERY
* COMEDI_INPUT
* COMEDI_OUTPUT
*
* encoder:
* Every channel must be configured before reading.
*
* Example code
*
* insn.insn=INSN_CONFIG; //configuration instruction
* insn.n=1; //number of operation (must be 1)
* insn.data=&initialvalue; //initial value loaded into encoder
* //during configuration
* insn.subdev=5; //encoder subdevice
* insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); //encoder_channel
* //to configure
*
* comedi_do_insn(cf,&insn); //executing configuration
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/comedi/comedi_pci.h>
#include "s626.h"
struct s626_buffer_dma {
dma_addr_t physical_base;
void *logical_base;
};
/**
* struct s626_private - Working data for s626 driver.
* @ai_cmd_running: non-zero if ai_cmd is running.
* @ai_sample_timer: time between samples in units of the timer.
* @ai_convert_count: conversion counter.
* @ai_convert_timer: time between conversion in units of the timer.
* @counter_int_enabs: counter interrupt enable mask for MISC2 register.
* @adc_items: number of items in ADC poll list.
* @rps_buf: DMA buffer used to hold ADC (RPS1) program.
* @ana_buf: DMA buffer used to receive ADC data and hold DAC data.
* @dac_wbuf: pointer to logical adrs of DMA buffer used to hold DAC data.
* @dacpol: image of DAC polarity register.
* @trim_setpoint: images of TrimDAC setpoints.
* @i2c_adrs: I2C device address for onboard EEPROM (board rev dependent)
*/
struct s626_private {
u8 ai_cmd_running;
unsigned int ai_sample_timer;
int ai_convert_count;
unsigned int ai_convert_timer;
u16 counter_int_enabs;
u8 adc_items;
struct s626_buffer_dma rps_buf;
struct s626_buffer_dma ana_buf;
u32 *dac_wbuf;
u16 dacpol;
u8 trim_setpoint[12];
u32 i2c_adrs;
};
/* Counter overflow/index event flag masks for RDMISC2. */
#define S626_INDXMASK(C) (1 << (((C) > 2) ? ((C) * 2 - 1) : ((C) * 2 + 4)))
#define S626_OVERMASK(C) (1 << (((C) > 2) ? ((C) * 2 + 5) : ((C) * 2 + 10)))
/*
* Enable/disable a function or test status bit(s) that are accessed
* through Main Control Registers 1 or 2.
*/
static void s626_mc_enable(struct comedi_device *dev,
unsigned int cmd, unsigned int reg)
{
unsigned int val = (cmd << 16) | cmd;
writel(val, dev->mmio + reg);
}
static void s626_mc_disable(struct comedi_device *dev,
unsigned int cmd, unsigned int reg)
{
writel(cmd << 16, dev->mmio + reg);
}
static bool s626_mc_test(struct comedi_device *dev,
unsigned int cmd, unsigned int reg)
{
unsigned int val;
val = readl(dev->mmio + reg);
return (val & cmd) ? true : false;
}
#define S626_BUGFIX_STREG(REGADRS) ((REGADRS) - 4)
/* Write a time slot control record to TSL2. */
#define S626_VECTPORT(VECTNUM) (S626_P_TSL2 + ((VECTNUM) << 2))
static const struct comedi_lrange s626_range_table = {
2, {
BIP_RANGE(5),
BIP_RANGE(10)
}
};
/*
* Execute a DEBI transfer. This must be called from within a critical section.
*/
static void s626_debi_transfer(struct comedi_device *dev)
{
static const int timeout = 10000;
int i;
/* Initiate upload of shadow RAM to DEBI control register */
s626_mc_enable(dev, S626_MC2_UPLD_DEBI, S626_P_MC2);
/*
* Wait for completion of upload from shadow RAM to
* DEBI control register.
*/
for (i = 0; i < timeout; i++) {
if (s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
break;
udelay(1);
}
if (i == timeout)
dev_err(dev->class_dev,
"Timeout while uploading to DEBI control register\n");
/* Wait until DEBI transfer is done */
for (i = 0; i < timeout; i++) {
if (!(readl(dev->mmio + S626_P_PSR) & S626_PSR_DEBI_S))
break;
udelay(1);
}
if (i == timeout)
dev_err(dev->class_dev, "DEBI transfer timeout\n");
}
/*
* Read a value from a gate array register.
*/
static u16 s626_debi_read(struct comedi_device *dev, u16 addr)
{
/* Set up DEBI control register value in shadow RAM */
writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
/* Execute the DEBI transfer. */
s626_debi_transfer(dev);
return readl(dev->mmio + S626_P_DEBIAD);
}
/*
* Write a value to a gate array register.
*/
static void s626_debi_write(struct comedi_device *dev, u16 addr,
u16 wdata)
{
/* Set up DEBI control register value in shadow RAM */
writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
writel(wdata, dev->mmio + S626_P_DEBIAD);
/* Execute the DEBI transfer. */
s626_debi_transfer(dev);
}
/*
* Replace the specified bits in a gate array register. Imports: mask
* specifies bits that are to be preserved, wdata is new value to be
* or'd with the masked original.
*/
static void s626_debi_replace(struct comedi_device *dev, unsigned int addr,
unsigned int mask, unsigned int wdata)
{
unsigned int val;
addr &= 0xffff;
writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
s626_debi_transfer(dev);
writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
val = readl(dev->mmio + S626_P_DEBIAD);
val &= mask;
val |= wdata;
writel(val & 0xffff, dev->mmio + S626_P_DEBIAD);
s626_debi_transfer(dev);
}
/* ************** EEPROM ACCESS FUNCTIONS ************** */
static int s626_i2c_handshake_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
bool status;
status = s626_mc_test(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
if (status)
return 0;
return -EBUSY;
}
static int s626_i2c_handshake(struct comedi_device *dev, u32 val)
{
unsigned int ctrl;
int ret;
/* Write I2C command to I2C Transfer Control shadow register */
writel(val, dev->mmio + S626_P_I2CCTRL);
/*
* Upload I2C shadow registers into working registers and
* wait for upload confirmation.
*/
s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
ret = comedi_timeout(dev, NULL, NULL, s626_i2c_handshake_eoc, 0);
if (ret)
return ret;
/* Wait until I2C bus transfer is finished or an error occurs */
do {
ctrl = readl(dev->mmio + S626_P_I2CCTRL);
} while ((ctrl & (S626_I2C_BUSY | S626_I2C_ERR)) == S626_I2C_BUSY);
/* Return non-zero if I2C error occurred */
return ctrl & S626_I2C_ERR;
}
/* Read u8 from EEPROM. */
static u8 s626_i2c_read(struct comedi_device *dev, u8 addr)
{
struct s626_private *devpriv = dev->private;
/*
* Send EEPROM target address:
* Byte2 = I2C command: write to I2C EEPROM device.
* Byte1 = EEPROM internal target address.
* Byte0 = Not sent.
*/
if (s626_i2c_handshake(dev, S626_I2C_B2(S626_I2C_ATTRSTART,
devpriv->i2c_adrs) |
S626_I2C_B1(S626_I2C_ATTRSTOP, addr) |
S626_I2C_B0(S626_I2C_ATTRNOP, 0)))
/* Abort function and declare error if handshake failed. */
return 0;
/*
* Execute EEPROM read:
* Byte2 = I2C command: read from I2C EEPROM device.
* Byte1 receives uint8_t from EEPROM.
* Byte0 = Not sent.
*/
if (s626_i2c_handshake(dev, S626_I2C_B2(S626_I2C_ATTRSTART,
(devpriv->i2c_adrs | 1)) |
S626_I2C_B1(S626_I2C_ATTRSTOP, 0) |
S626_I2C_B0(S626_I2C_ATTRNOP, 0)))
/* Abort function and declare error if handshake failed. */
return 0;
return (readl(dev->mmio + S626_P_I2CCTRL) >> 16) & 0xff;
}
/* *********** DAC FUNCTIONS *********** */
/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
static const u8 s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
static const u8 s626_trimadrs[] = {
0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
};
enum {
s626_send_dac_wait_not_mc1_a2out,
s626_send_dac_wait_ssr_af2_out,
s626_send_dac_wait_fb_buffer2_msb_00,
s626_send_dac_wait_fb_buffer2_msb_ff
};
static int s626_send_dac_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
switch (context) {
case s626_send_dac_wait_not_mc1_a2out:
status = readl(dev->mmio + S626_P_MC1);
if (!(status & S626_MC1_A2OUT))
return 0;
break;
case s626_send_dac_wait_ssr_af2_out:
status = readl(dev->mmio + S626_P_SSR);
if (status & S626_SSR_AF2_OUT)
return 0;
break;
case s626_send_dac_wait_fb_buffer2_msb_00:
status = readl(dev->mmio + S626_P_FB_BUFFER2);
if (!(status & 0xff000000))
return 0;
break;
case s626_send_dac_wait_fb_buffer2_msb_ff:
status = readl(dev->mmio + S626_P_FB_BUFFER2);
if (status & 0xff000000)
return 0;
break;
default:
return -EINVAL;
}
return -EBUSY;
}
/*
* Private helper function: Transmit serial data to DAC via Audio
* channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
* dacpol contains valid target image.
*/
static int s626_send_dac(struct comedi_device *dev, u32 val)
{
struct s626_private *devpriv = dev->private;
int ret;
/* START THE SERIAL CLOCK RUNNING ------------- */
/*
* Assert DAC polarity control and enable gating of DAC serial clock
* and audio bit stream signals. At this point in time we must be
* assured of being in time slot 0. If we are not in slot 0, the
* serial clock and audio stream signals will be disabled; this is
* because the following s626_debi_write statement (which enables
* signals to be passed through the gate array) would execute before
* the trailing edge of WS1/WS3 (which turns off the signals), thus
* causing the signals to be inactive during the DAC write.
*/
s626_debi_write(dev, S626_LP_DACPOL, devpriv->dacpol);
/* TRANSFER OUTPUT DWORD VALUE INTO A2'S OUTPUT FIFO ---------------- */
/* Copy DAC setpoint value to DAC's output DMA buffer. */
/* writel(val, dev->mmio + (uint32_t)devpriv->dac_wbuf); */
*devpriv->dac_wbuf = val;
/*
* Enable the output DMA transfer. This will cause the DMAC to copy
* the DAC's data value to A2's output FIFO. The DMA transfer will
* then immediately terminate because the protection address is
* reached upon transfer of the first DWORD value.
*/
s626_mc_enable(dev, S626_MC1_A2OUT, S626_P_MC1);
/* While the DMA transfer is executing ... */
/*
* Reset Audio2 output FIFO's underflow flag (along with any
* other FIFO underflow/overflow flags). When set, this flag
* will indicate that we have emerged from slot 0.
*/
writel(S626_ISR_AFOU, dev->mmio + S626_P_ISR);
/*
* Wait for the DMA transfer to finish so that there will be data
* available in the FIFO when time slot 1 tries to transfer a DWORD
* from the FIFO to the output buffer register. We test for DMA
* Done by polling the DMAC enable flag; this flag is automatically
* cleared when the transfer has finished.
*/
ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
s626_send_dac_wait_not_mc1_a2out);
if (ret) {
dev_err(dev->class_dev, "DMA transfer timeout\n");
return ret;
}
/* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */
/*
* FIFO data is now available, so we enable execution of time slots
* 1 and higher by clearing the EOS flag in slot 0. Note that SD3
* will be shifted in and stored in FB_BUFFER2 for end-of-slot-list
* detection.
*/
writel(S626_XSD2 | S626_RSD3 | S626_SIB_A2,
dev->mmio + S626_VECTPORT(0));
/*
* Wait for slot 1 to execute to ensure that the Packet will be
* transmitted. This is detected by polling the Audio2 output FIFO
* underflow flag, which will be set when slot 1 execution has
* finished transferring the DAC's data DWORD from the output FIFO
* to the output buffer register.
*/
ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
s626_send_dac_wait_ssr_af2_out);
if (ret) {
dev_err(dev->class_dev,
"TSL timeout waiting for slot 1 to execute\n");
return ret;
}
/*
* Set up to trap execution at slot 0 when the TSL sequencer cycles
* back to slot 0 after executing the EOS in slot 5. Also,
* simultaneously shift out and in the 0x00 that is ALWAYS the value
* stored in the last byte to be shifted out of the FIFO's DWORD
* buffer register.
*/
writel(S626_XSD2 | S626_XFIFO_2 | S626_RSD2 | S626_SIB_A2 | S626_EOS,
dev->mmio + S626_VECTPORT(0));
/* WAIT FOR THE TRANSACTION TO FINISH ----------------------- */
/*
* Wait for the TSL to finish executing all time slots before
* exiting this function. We must do this so that the next DAC
* write doesn't start, thereby enabling clock/chip select signals:
*
* 1. Before the TSL sequence cycles back to slot 0, which disables
* the clock/cs signal gating and traps slot // list execution.
* we have not yet finished slot 5 then the clock/cs signals are
* still gated and we have not finished transmitting the stream.
*
* 2. While slots 2-5 are executing due to a late slot 0 trap. In
* this case, the slot sequence is currently repeating, but with
* clock/cs signals disabled. We must wait for slot 0 to trap
* execution before setting up the next DAC setpoint DMA transfer
* and enabling the clock/cs signals. To detect the end of slot 5,
* we test for the FB_BUFFER2 MSB contents to be equal to 0xFF. If
* the TSL has not yet finished executing slot 5 ...
*/
if (readl(dev->mmio + S626_P_FB_BUFFER2) & 0xff000000) {
/*
* The trap was set on time and we are still executing somewhere
* in slots 2-5, so we now wait for slot 0 to execute and trap
* TSL execution. This is detected when FB_BUFFER2 MSB changes
* from 0xFF to 0x00, which slot 0 causes to happen by shifting
* out/in on SD2 the 0x00 that is always referenced by slot 5.
*/
ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
s626_send_dac_wait_fb_buffer2_msb_00);
if (ret) {
dev_err(dev->class_dev,
"TSL timeout waiting for slot 0 to execute\n");
return ret;
}
}
/*
* Either (1) we were too late setting the slot 0 trap; the TSL
* sequencer restarted slot 0 before we could set the EOS trap flag,
* or (2) we were not late and execution is now trapped at slot 0.
* In either case, we must now change slot 0 so that it will store
* value 0xFF (instead of 0x00) to FB_BUFFER2 next time it executes.
* In order to do this, we reprogram slot 0 so that it will shift in
* SD3, which is driven only by a pull-up resistor.
*/
writel(S626_RSD3 | S626_SIB_A2 | S626_EOS,
dev->mmio + S626_VECTPORT(0));
/*
* Wait for slot 0 to execute, at which time the TSL is setup for
* the next DAC write. This is detected when FB_BUFFER2 MSB changes
* from 0x00 to 0xFF.
*/
ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
s626_send_dac_wait_fb_buffer2_msb_ff);
if (ret) {
dev_err(dev->class_dev,
"TSL timeout waiting for slot 0 to execute\n");
return ret;
}
return 0;
}
/*
* Private helper function: Write setpoint to an application DAC channel.
*/
static int s626_set_dac(struct comedi_device *dev,
u16 chan, int16_t dacdata)
{
struct s626_private *devpriv = dev->private;
u16 signmask;
u32 ws_image;
u32 val;
/*
* Adjust DAC data polarity and set up Polarity Control Register image.
*/
signmask = 1 << chan;
if (dacdata < 0) {
dacdata = -dacdata;
devpriv->dacpol |= signmask;
} else {
devpriv->dacpol &= ~signmask;
}
/* Limit DAC setpoint value to valid range. */
if ((u16)dacdata > 0x1FFF)
dacdata = 0x1FFF;
/*
* Set up TSL2 records (aka "vectors") for DAC update. Vectors V2
* and V3 transmit the setpoint to the target DAC. V4 and V5 send
* data to a non-existent TrimDac channel just to keep the clock
* running after sending data to the target DAC. This is necessary
* to eliminate the clock glitch that would otherwise occur at the
* end of the target DAC's serial data stream. When the sequence
* restarts at V0 (after executing V5), the gate array automatically
* disables gating for the DAC clock and all DAC chip selects.
*/
/* Choose DAC chip select to be asserted */
ws_image = (chan & 2) ? S626_WS1 : S626_WS2;
/* Slot 2: Transmit high data byte to target DAC */
writel(S626_XSD2 | S626_XFIFO_1 | ws_image,
dev->mmio + S626_VECTPORT(2));
/* Slot 3: Transmit low data byte to target DAC */
writel(S626_XSD2 | S626_XFIFO_0 | ws_image,
dev->mmio + S626_VECTPORT(3));
/* Slot 4: Transmit to non-existent TrimDac channel to keep clock */
writel(S626_XSD2 | S626_XFIFO_3 | S626_WS3,
dev->mmio + S626_VECTPORT(4));
/* Slot 5: running after writing target DAC's low data byte */
writel(S626_XSD2 | S626_XFIFO_2 | S626_WS3 | S626_EOS,
dev->mmio + S626_VECTPORT(5));
/*
* Construct and transmit target DAC's serial packet:
* (A10D DDDD), (DDDD DDDD), (0x0F), (0x00) where A is chan<0>,
* and D<12:0> is the DAC setpoint. Append a WORD value (that writes
* to a non-existent TrimDac channel) that serves to keep the clock
* running after the packet has been sent to the target DAC.
*/
val = 0x0F000000; /* Continue clock after target DAC data
* (write to non-existent trimdac).
*/
val |= 0x00004000; /* Address the two main dual-DAC devices
* (TSL's chip select enables target device).
*/
val |= ((u32)(chan & 1) << 15); /* Address the DAC channel
* within the device.
*/
val |= (u32)dacdata; /* Include DAC setpoint data. */
return s626_send_dac(dev, val);
}
static int s626_write_trim_dac(struct comedi_device *dev,
u8 logical_chan, u8 dac_data)
{
struct s626_private *devpriv = dev->private;
u32 chan;
/*
* Save the new setpoint in case the application needs to read it back
* later.
*/
devpriv->trim_setpoint[logical_chan] = dac_data;
/* Map logical channel number to physical channel number. */
chan = s626_trimchan[logical_chan];
/*
* Set up TSL2 records for TrimDac write operation. All slots shift
* 0xFF in from pulled-up SD3 so that the end of the slot sequence
* can be detected.
*/
/* Slot 2: Send high uint8_t to target TrimDac */
writel(S626_XSD2 | S626_XFIFO_1 | S626_WS3,
dev->mmio + S626_VECTPORT(2));
/* Slot 3: Send low uint8_t to target TrimDac */
writel(S626_XSD2 | S626_XFIFO_0 | S626_WS3,
dev->mmio + S626_VECTPORT(3));
/* Slot 4: Send NOP high uint8_t to DAC0 to keep clock running */
writel(S626_XSD2 | S626_XFIFO_3 | S626_WS1,
dev->mmio + S626_VECTPORT(4));
/* Slot 5: Send NOP low uint8_t to DAC0 */
writel(S626_XSD2 | S626_XFIFO_2 | S626_WS1 | S626_EOS,
dev->mmio + S626_VECTPORT(5));
/*
* Construct and transmit target DAC's serial packet:
* (0000 AAAA), (DDDD DDDD), (0x00), (0x00) where A<3:0> is the
* DAC channel's address, and D<7:0> is the DAC setpoint. Append a
* WORD value (that writes a channel 0 NOP command to a non-existent
* main DAC channel) that serves to keep the clock running after the
* packet has been sent to the target DAC.
*/
/*
* Address the DAC channel within the trimdac device.
* Include DAC setpoint data.
*/
return s626_send_dac(dev, (chan << 8) | dac_data);
}
static int s626_load_trim_dacs(struct comedi_device *dev)
{
u8 i;
int ret;
/* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++) {
ret = s626_write_trim_dac(dev, i,
s626_i2c_read(dev, s626_trimadrs[i]));
if (ret)
return ret;
}
return 0;
}
/* ****** COUNTER FUNCTIONS ******* */
/*
* All counter functions address a specific counter by means of the
* "Counter" argument, which is a logical counter number. The Counter
* argument may have any of the following legal values: 0=0A, 1=1A,
* 2=2A, 3=0B, 4=1B, 5=2B.
*/
/*
* Return/set a counter pair's latch trigger source. 0: On read
* access, 1: A index latches A, 2: B index latches B, 3: A overflow
* latches B.
*/
static void s626_set_latch_source(struct comedi_device *dev,
unsigned int chan, u16 value)
{
s626_debi_replace(dev, S626_LP_CRB(chan),
~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
S626_SET_CRB_LATCHSRC(value));
}
/*
* Write value into counter preload register.
*/
static void s626_preload(struct comedi_device *dev,
unsigned int chan, u32 value)
{
s626_debi_write(dev, S626_LP_CNTR(chan), value);
s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16);
}
/* ****** PRIVATE COUNTER FUNCTIONS ****** */
/*
* Reset a counter's index and overflow event capture flags.
*/
static void s626_reset_cap_flags(struct comedi_device *dev,
unsigned int chan)
{
u16 set;
set = S626_SET_CRB_INTRESETCMD(1);
if (chan < 3)
set |= S626_SET_CRB_INTRESET_A(1);
else
set |= S626_SET_CRB_INTRESET_B(1);
s626_debi_replace(dev, S626_LP_CRB(chan), ~S626_CRBMSK_INTCTRL, set);
}
/*
* Set the operating mode for the specified counter. The setup
* parameter is treated as a COUNTER_SETUP data type. The following
* parameters are programmable (all other parms are ignored): ClkMult,
* ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
*/
static void s626_set_mode_a(struct comedi_device *dev,
unsigned int chan, u16 setup,
u16 disable_int_src)
{
struct s626_private *devpriv = dev->private;
u16 cra;
u16 crb;
unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
/* Preload trigger is passed through. */
cra = S626_SET_CRA_LOADSRC_A(S626_GET_STD_LOADSRC(setup));
/* IndexSrc is passed through. */
cra |= S626_SET_CRA_INDXSRC_A(S626_GET_STD_INDXSRC(setup));
/* Reset any pending CounterA event captures. */
crb = S626_SET_CRB_INTRESETCMD(1) | S626_SET_CRB_INTRESET_A(1);
/* Clock enable is passed through. */
crb |= S626_SET_CRB_CLKENAB_A(S626_GET_STD_CLKENAB(setup));
/* Force IntSrc to Disabled if disable_int_src is asserted. */
if (!disable_int_src)
cra |= S626_SET_CRA_INTSRC_A(S626_GET_STD_INTSRC(setup));
/* Populate all mode-dependent attributes of CRA & CRB images. */
clkpol = S626_GET_STD_CLKPOL(setup);
switch (S626_GET_STD_ENCMODE(setup)) {
case S626_ENCMODE_EXTENDER: /* Extender Mode: */
/* Force to Timer mode (Extender valid only for B counters). */
/* Fall through to case S626_ENCMODE_TIMER: */
case S626_ENCMODE_TIMER: /* Timer Mode: */
/* CntSrcA<1> selects system clock */
cntsrc = S626_CNTSRC_SYSCLK;
/* Count direction (CntSrcA<0>) obtained from ClkPol. */
cntsrc |= clkpol;
/* ClkPolA behaves as always-on clock enable. */
clkpol = 1;
/* ClkMult must be 1x. */
clkmult = S626_CLKMULT_1X;
break;
default: /* Counter Mode: */
/* Select ENC_C and ENC_D as clock/direction inputs. */
cntsrc = S626_CNTSRC_ENCODER;
/* Clock polarity is passed through. */
/* Force multiplier to x1 if not legal, else pass through. */
clkmult = S626_GET_STD_CLKMULT(setup);
if (clkmult == S626_CLKMULT_SPECIAL)
clkmult = S626_CLKMULT_1X;
break;
}
cra |= S626_SET_CRA_CNTSRC_A(cntsrc) | S626_SET_CRA_CLKPOL_A(clkpol) |
S626_SET_CRA_CLKMULT_A(clkmult);
/*
* Force positive index polarity if IndxSrc is software-driven only,
* otherwise pass it through.
*/
if (S626_GET_STD_INDXSRC(setup) != S626_INDXSRC_SOFT)
cra |= S626_SET_CRA_INDXPOL_A(S626_GET_STD_INDXPOL(setup));
/*
* If IntSrc has been forced to Disabled, update the MISC2 interrupt
* enable mask to indicate the counter interrupt is disabled.
*/
if (disable_int_src)
devpriv->counter_int_enabs &= ~(S626_OVERMASK(chan) |
S626_INDXMASK(chan));
/*
* While retaining CounterB and LatchSrc configurations, program the
* new counter operating mode.
*/
s626_debi_replace(dev, S626_LP_CRA(chan),
S626_CRAMSK_INDXSRC_B | S626_CRAMSK_CNTSRC_B, cra);
s626_debi_replace(dev, S626_LP_CRB(chan),
~(S626_CRBMSK_INTCTRL | S626_CRBMSK_CLKENAB_A), crb);
}
static void s626_set_mode_b(struct comedi_device *dev,
unsigned int chan, u16 setup,
u16 disable_int_src)
{
struct s626_private *devpriv = dev->private;
u16 cra;
u16 crb;
unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
/* IndexSrc is passed through. */
cra = S626_SET_CRA_INDXSRC_B(S626_GET_STD_INDXSRC(setup));
/* Reset event captures and disable interrupts. */
crb = S626_SET_CRB_INTRESETCMD(1) | S626_SET_CRB_INTRESET_B(1);
/* Clock enable is passed through. */
crb |= S626_SET_CRB_CLKENAB_B(S626_GET_STD_CLKENAB(setup));
/* Preload trigger source is passed through. */
crb |= S626_SET_CRB_LOADSRC_B(S626_GET_STD_LOADSRC(setup));
/* Force IntSrc to Disabled if disable_int_src is asserted. */
if (!disable_int_src)
crb |= S626_SET_CRB_INTSRC_B(S626_GET_STD_INTSRC(setup));
/* Populate all mode-dependent attributes of CRA & CRB images. */
clkpol = S626_GET_STD_CLKPOL(setup);
switch (S626_GET_STD_ENCMODE(setup)) {
case S626_ENCMODE_TIMER: /* Timer Mode: */
/* CntSrcB<1> selects system clock */
cntsrc = S626_CNTSRC_SYSCLK;
/* with direction (CntSrcB<0>) obtained from ClkPol. */
cntsrc |= clkpol;
/* ClkPolB behaves as always-on clock enable. */
clkpol = 1;
/* ClkMultB must be 1x. */
clkmult = S626_CLKMULT_1X;
break;
case S626_ENCMODE_EXTENDER: /* Extender Mode: */
/* CntSrcB source is OverflowA (same as "timer") */
cntsrc = S626_CNTSRC_SYSCLK;
/* with direction obtained from ClkPol. */
cntsrc |= clkpol;
/* ClkPolB controls IndexB -- always set to active. */
clkpol = 1;
/* ClkMultB selects OverflowA as the clock source. */
clkmult = S626_CLKMULT_SPECIAL;
break;
default: /* Counter Mode: */
/* Select ENC_C and ENC_D as clock/direction inputs. */
cntsrc = S626_CNTSRC_ENCODER;
/* ClkPol is passed through. */
/* Force ClkMult to x1 if not legal, otherwise pass through. */
clkmult = S626_GET_STD_CLKMULT(setup);
if (clkmult == S626_CLKMULT_SPECIAL)
clkmult = S626_CLKMULT_1X;
break;
}
cra |= S626_SET_CRA_CNTSRC_B(cntsrc);
crb |= S626_SET_CRB_CLKPOL_B(clkpol) | S626_SET_CRB_CLKMULT_B(clkmult);
/*
* Force positive index polarity if IndxSrc is software-driven only,
* otherwise pass it through.
*/
if (S626_GET_STD_INDXSRC(setup) != S626_INDXSRC_SOFT)
crb |= S626_SET_CRB_INDXPOL_B(S626_GET_STD_INDXPOL(setup));
/*
* If IntSrc has been forced to Disabled, update the MISC2 interrupt
* enable mask to indicate the counter interrupt is disabled.
*/
if (disable_int_src)
devpriv->counter_int_enabs &= ~(S626_OVERMASK(chan) |
S626_INDXMASK(chan));
/*
* While retaining CounterA and LatchSrc configurations, program the
* new counter operating mode.
*/
s626_debi_replace(dev, S626_LP_CRA(chan),
~(S626_CRAMSK_INDXSRC_B | S626_CRAMSK_CNTSRC_B), cra);
s626_debi_replace(dev, S626_LP_CRB(chan),
S626_CRBMSK_CLKENAB_A | S626_CRBMSK_LATCHSRC, crb);
}
static void s626_set_mode(struct comedi_device *dev,
unsigned int chan,
u16 setup, u16 disable_int_src)
{
if (chan < 3)
s626_set_mode_a(dev, chan, setup, disable_int_src);
else
s626_set_mode_b(dev, chan, setup, disable_int_src);
}
/*
* Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index.
*/
static void s626_set_enable(struct comedi_device *dev,
unsigned int chan, u16 enab)
{
unsigned int mask = S626_CRBMSK_INTCTRL;
unsigned int set;
if (chan < 3) {
mask |= S626_CRBMSK_CLKENAB_A;
set = S626_SET_CRB_CLKENAB_A(enab);
} else {
mask |= S626_CRBMSK_CLKENAB_B;
set = S626_SET_CRB_CLKENAB_B(enab);
}
s626_debi_replace(dev, S626_LP_CRB(chan), ~mask, set);
}
/*
* Return/set the event that will trigger transfer of the preload
* register into the counter. 0=ThisCntr_Index, 1=ThisCntr_Overflow,
* 2=OverflowA (B counters only), 3=disabled.
*/
static void s626_set_load_trig(struct comedi_device *dev,
unsigned int chan, u16 trig)
{
u16 reg;
u16 mask;
u16 set;
if (chan < 3) {
reg = S626_LP_CRA(chan);
mask = S626_CRAMSK_LOADSRC_A;
set = S626_SET_CRA_LOADSRC_A(trig);
} else {
reg = S626_LP_CRB(chan);
mask = S626_CRBMSK_LOADSRC_B | S626_CRBMSK_INTCTRL;
set = S626_SET_CRB_LOADSRC_B(trig);
}
s626_debi_replace(dev, reg, ~mask, set);
}
/*
* Return/set counter interrupt source and clear any captured
* index/overflow events. int_source: 0=Disabled, 1=OverflowOnly,
* 2=IndexOnly, 3=IndexAndOverflow.
*/
static void s626_set_int_src(struct comedi_device *dev,
unsigned int chan, u16 int_source)
{
struct s626_private *devpriv = dev->private;
u16 cra_reg = S626_LP_CRA(chan);
u16 crb_reg = S626_LP_CRB(chan);
if (chan < 3) {
/* Reset any pending counter overflow or index captures */
s626_debi_replace(dev, crb_reg, ~S626_CRBMSK_INTCTRL,
S626_SET_CRB_INTRESETCMD(1) |
S626_SET_CRB_INTRESET_A(1));
/* Program counter interrupt source */
s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A,
S626_SET_CRA_INTSRC_A(int_source));
} else {
u16 crb;
/* Cache writeable CRB register image */
crb = s626_debi_read(dev, crb_reg);
crb &= ~S626_CRBMSK_INTCTRL;
/* Reset any pending counter overflow or index captures */
s626_debi_write(dev, crb_reg,
crb | S626_SET_CRB_INTRESETCMD(1) |
S626_SET_CRB_INTRESET_B(1));
/* Program counter interrupt source */
s626_debi_write(dev, crb_reg,
(crb & ~S626_CRBMSK_INTSRC_B) |
S626_SET_CRB_INTSRC_B(int_source));
}
/* Update MISC2 interrupt enable mask. */
devpriv->counter_int_enabs &= ~(S626_OVERMASK(chan) |
S626_INDXMASK(chan));
switch (int_source) {
case 0:
default:
break;
case 1:
devpriv->counter_int_enabs |= S626_OVERMASK(chan);
break;
case 2:
devpriv->counter_int_enabs |= S626_INDXMASK(chan);
break;
case 3:
devpriv->counter_int_enabs |= (S626_OVERMASK(chan) |
S626_INDXMASK(chan));
break;
}
}
/*
* Generate an index pulse.
*/
static void s626_pulse_index(struct comedi_device *dev,
unsigned int chan)
{
if (chan < 3) {
u16 cra;
cra = s626_debi_read(dev, S626_LP_CRA(chan));
/* Pulse index */
s626_debi_write(dev, S626_LP_CRA(chan),
(cra ^ S626_CRAMSK_INDXPOL_A));
s626_debi_write(dev, S626_LP_CRA(chan), cra);
} else {
u16 crb;
crb = s626_debi_read(dev, S626_LP_CRB(chan));
crb &= ~S626_CRBMSK_INTCTRL;
/* Pulse index */
s626_debi_write(dev, S626_LP_CRB(chan),
(crb ^ S626_CRBMSK_INDXPOL_B));
s626_debi_write(dev, S626_LP_CRB(chan), crb);
}
}
static unsigned int s626_ai_reg_to_uint(unsigned int data)
{
return ((data >> 18) & 0x3fff) ^ 0x2000;
}
static int s626_dio_set_irq(struct comedi_device *dev, unsigned int chan)
{
unsigned int group = chan / 16;
unsigned int mask = 1 << (chan - (16 * group));
unsigned int status;
/* set channel to capture positive edge */
status = s626_debi_read(dev, S626_LP_RDEDGSEL(group));
s626_debi_write(dev, S626_LP_WREDGSEL(group), mask | status);
/* enable interrupt on selected channel */
status = s626_debi_read(dev, S626_LP_RDINTSEL(group));
s626_debi_write(dev, S626_LP_WRINTSEL(group), mask | status);
/* enable edge capture write command */
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_EDCAP);
/* enable edge capture on selected channel */
status = s626_debi_read(dev, S626_LP_RDCAPSEL(group));
s626_debi_write(dev, S626_LP_WRCAPSEL(group), mask | status);
return 0;
}
static int s626_dio_reset_irq(struct comedi_device *dev, unsigned int group,
unsigned int mask)
{
/* disable edge capture write command */
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
/* enable edge capture on selected channel */
s626_debi_write(dev, S626_LP_WRCAPSEL(group), mask);
return 0;
}
static int s626_dio_clear_irq(struct comedi_device *dev)
{
unsigned int group;
/* disable edge capture write command */
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
/* clear all dio pending events and interrupt */
for (group = 0; group < S626_DIO_BANKS; group++)
s626_debi_write(dev, S626_LP_WRCAPSEL(group), 0xffff);
return 0;
}
static void s626_handle_dio_interrupt(struct comedi_device *dev,
u16 irqbit, u8 group)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
s626_dio_reset_irq(dev, group, irqbit);
if (devpriv->ai_cmd_running) {
/* check if interrupt is an ai acquisition start trigger */
if ((irqbit >> (cmd->start_arg - (16 * group))) == 1 &&
cmd->start_src == TRIG_EXT) {
/* Start executing the RPS program */
s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
if (cmd->scan_begin_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
}
if ((irqbit >> (cmd->scan_begin_arg - (16 * group))) == 1 &&
cmd->scan_begin_src == TRIG_EXT) {
/* Trigger ADC scan loop start */
s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
if (cmd->convert_src == TRIG_EXT) {
devpriv->ai_convert_count = cmd->chanlist_len;
s626_dio_set_irq(dev, cmd->convert_arg);
}
if (cmd->convert_src == TRIG_TIMER) {
devpriv->ai_convert_count = cmd->chanlist_len;
s626_set_enable(dev, 5, S626_CLKENAB_ALWAYS);
}
}
if ((irqbit >> (cmd->convert_arg - (16 * group))) == 1 &&
cmd->convert_src == TRIG_EXT) {
/* Trigger ADC scan loop start */
s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
devpriv->ai_convert_count--;
if (devpriv->ai_convert_count > 0)
s626_dio_set_irq(dev, cmd->convert_arg);
}
}
}
static void s626_check_dio_interrupts(struct comedi_device *dev)
{
u16 irqbit;
u8 group;
for (group = 0; group < S626_DIO_BANKS; group++) {
/* read interrupt type */
irqbit = s626_debi_read(dev, S626_LP_RDCAPFLG(group));
/* check if interrupt is generated from dio channels */
if (irqbit) {
s626_handle_dio_interrupt(dev, irqbit, group);
return;
}
}
}
static void s626_check_counter_interrupts(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
u16 irqbit;
/* read interrupt type */
irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
/* check interrupt on counters */
if (irqbit & S626_IRQ_COINT1A) {
/* clear interrupt capture flag */
s626_reset_cap_flags(dev, 0);
}
if (irqbit & S626_IRQ_COINT2A) {
/* clear interrupt capture flag */
s626_reset_cap_flags(dev, 1);
}
if (irqbit & S626_IRQ_COINT3A) {
/* clear interrupt capture flag */
s626_reset_cap_flags(dev, 2);
}
if (irqbit & S626_IRQ_COINT1B) {
/* clear interrupt capture flag */
s626_reset_cap_flags(dev, 3);
}
if (irqbit & S626_IRQ_COINT2B) {
/* clear interrupt capture flag */
s626_reset_cap_flags(dev, 4);
if (devpriv->ai_convert_count > 0) {
devpriv->ai_convert_count--;
if (devpriv->ai_convert_count == 0)
s626_set_enable(dev, 4, S626_CLKENAB_INDEX);
if (cmd->convert_src == TRIG_TIMER) {
/* Trigger ADC scan loop start */
s626_mc_enable(dev, S626_MC2_ADC_RPS,
S626_P_MC2);
}
}
}
if (irqbit & S626_IRQ_COINT3B) {
/* clear interrupt capture flag */
s626_reset_cap_flags(dev, 5);
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Trigger ADC scan loop start */
s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
}
if (cmd->convert_src == TRIG_TIMER) {
devpriv->ai_convert_count = cmd->chanlist_len;
s626_set_enable(dev, 4, S626_CLKENAB_ALWAYS);
}
}
}
static bool s626_handle_eos_interrupt(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
/*
* Init ptr to DMA buffer that holds new ADC data. We skip the
* first uint16_t in the buffer because it contains junk data
* from the final ADC of the previous poll list scan.
*/
u32 *readaddr = (u32 *)devpriv->ana_buf.logical_base + 1;
int i;
/* get the data and hand it over to comedi */
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned short tempdata;
/*
* Convert ADC data to 16-bit integer values and copy
* to application buffer.
*/
tempdata = s626_ai_reg_to_uint(*readaddr);
readaddr++;
comedi_buf_write_samples(s, &tempdata, 1);
}
if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
if (async->events & COMEDI_CB_CANCEL_MASK)
devpriv->ai_cmd_running = 0;
if (devpriv->ai_cmd_running && cmd->scan_begin_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
comedi_handle_events(dev, s);
return !devpriv->ai_cmd_running;
}
static irqreturn_t s626_irq_handler(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned long flags;
u32 irqtype, irqstatus;
if (!dev->attached)
return IRQ_NONE;
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&dev->spinlock, flags);
/* save interrupt enable register state */
irqstatus = readl(dev->mmio + S626_P_IER);
/* read interrupt type */
irqtype = readl(dev->mmio + S626_P_ISR);
/* disable master interrupt */
writel(0, dev->mmio + S626_P_IER);
/* clear interrupt */
writel(irqtype, dev->mmio + S626_P_ISR);
switch (irqtype) {
case S626_IRQ_RPS1: /* end_of_scan occurs */
if (s626_handle_eos_interrupt(dev))
irqstatus = 0;
break;
case S626_IRQ_GPIO3: /* check dio and counter interrupt */
/* s626_dio_clear_irq(dev); */
s626_check_dio_interrupts(dev);
s626_check_counter_interrupts(dev);
break;
}
/* enable interrupt */
writel(irqstatus, dev->mmio + S626_P_IER);
spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
}
/*
* This function builds the RPS program for hardware driven acquisition.
*/
static void s626_reset_adc(struct comedi_device *dev, u8 *ppl)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
u32 *rps;
u32 jmp_adrs;
u16 i;
u16 n;
u32 local_ppl;
/* Stop RPS program in case it is currently running */
s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
/* Set starting logical address to write RPS commands. */
rps = (u32 *)devpriv->rps_buf.logical_base;
/* Initialize RPS instruction pointer */
writel((u32)devpriv->rps_buf.physical_base,
dev->mmio + S626_P_RPSADDR1);
/* Construct RPS program in rps_buf DMA buffer */
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Wait for Start trigger. */
*rps++ = S626_RPS_PAUSE | S626_RPS_SIGADC;
*rps++ = S626_RPS_CLRSIGNAL | S626_RPS_SIGADC;
}
/*
* SAA7146 BUG WORKAROUND Do a dummy DEBI Write. This is necessary
* because the first RPS DEBI Write following a non-RPS DEBI write
* seems to always fail. If we don't do this dummy write, the ADC
* gain might not be set to the value required for the first slot in
* the poll list; the ADC gain would instead remain unchanged from
* the previously programmed value.
*/
/* Write DEBI Write command and address to shadow RAM. */
*rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
*rps++ = S626_DEBI_CMD_WRWORD | S626_LP_GSEL;
*rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
/* Write DEBI immediate data to shadow RAM: */
*rps++ = S626_GSEL_BIPOLAR5V; /* arbitrary immediate data value. */
*rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
/* Reset "shadow RAM uploaded" flag. */
/* Invoke shadow RAM upload. */
*rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
/* Wait for shadow upload to finish. */
*rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
/*
* Digitize all slots in the poll list. This is implemented as a
* for loop to limit the slot count to 16 in case the application
* forgot to set the S626_EOPL flag in the final slot.
*/
for (devpriv->adc_items = 0; devpriv->adc_items < 16;
devpriv->adc_items++) {
/*
* Convert application's poll list item to private board class
* format. Each app poll list item is an uint8_t with form
* (EOPL,x,x,RANGE,CHAN<3:0>), where RANGE code indicates 0 =
* +-10V, 1 = +-5V, and EOPL = End of Poll List marker.
*/
local_ppl = (*ppl << 8) | (*ppl & 0x10 ? S626_GSEL_BIPOLAR5V :
S626_GSEL_BIPOLAR10V);
/* Switch ADC analog gain. */
/* Write DEBI command and address to shadow RAM. */
*rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
*rps++ = S626_DEBI_CMD_WRWORD | S626_LP_GSEL;
/* Write DEBI immediate data to shadow RAM. */
*rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
*rps++ = local_ppl;
/* Reset "shadow RAM uploaded" flag. */
*rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
/* Invoke shadow RAM upload. */
*rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
/* Wait for shadow upload to finish. */
*rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
/* Select ADC analog input channel. */
*rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
/* Write DEBI command and address to shadow RAM. */
*rps++ = S626_DEBI_CMD_WRWORD | S626_LP_ISEL;
*rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
/* Write DEBI immediate data to shadow RAM. */
*rps++ = local_ppl;
/* Reset "shadow RAM uploaded" flag. */
*rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
/* Invoke shadow RAM upload. */
*rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
/* Wait for shadow upload to finish. */
*rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
/*
* Delay at least 10 microseconds for analog input settling.
* Instead of padding with NOPs, we use S626_RPS_JUMP
* instructions here; this allows us to produce a longer delay
* than is possible with NOPs because each S626_RPS_JUMP
* flushes the RPS' instruction prefetch pipeline.
*/
jmp_adrs =
(u32)devpriv->rps_buf.physical_base +
(u32)((unsigned long)rps -
(unsigned long)devpriv->rps_buf.logical_base);
for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
jmp_adrs += 8; /* Repeat to implement time delay: */
/* Jump to next RPS instruction. */
*rps++ = S626_RPS_JUMP;
*rps++ = jmp_adrs;
}
if (cmd->convert_src != TRIG_NOW) {
/* Wait for Start trigger. */
*rps++ = S626_RPS_PAUSE | S626_RPS_SIGADC;
*rps++ = S626_RPS_CLRSIGNAL | S626_RPS_SIGADC;
}
/* Start ADC by pulsing GPIO1. */
/* Begin ADC Start pulse. */
*rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
*rps++ = S626_GPIO_BASE | S626_GPIO1_LO;
*rps++ = S626_RPS_NOP;
/* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
/* End ADC Start pulse. */
*rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
*rps++ = S626_GPIO_BASE | S626_GPIO1_HI;
/*
* Wait for ADC to complete (GPIO2 is asserted high when ADC not
* busy) and for data from previous conversion to shift into FB
* BUFFER 1 register.
*/
/* Wait for ADC done. */
*rps++ = S626_RPS_PAUSE | S626_RPS_GPIO2;
/* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
*rps++ = S626_RPS_STREG |
(S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
*rps++ = (u32)devpriv->ana_buf.physical_base +
(devpriv->adc_items << 2);
/*
* If this slot's EndOfPollList flag is set, all channels have
* now been processed.
*/
if (*ppl++ & S626_EOPL) {
devpriv->adc_items++; /* Adjust poll list item count. */
break; /* Exit poll list processing loop. */
}
}
/*
* VERSION 2.01 CHANGE: DELAY CHANGED FROM 250NS to 2US. Allow the
* ADC to stabilize for 2 microseconds before starting the final
* (dummy) conversion. This delay is necessary to allow sufficient
* time between last conversion finished and the start of the dummy
* conversion. Without this delay, the last conversion's data value
* is sometimes set to the previous conversion's data value.
*/
for (n = 0; n < (2 * S626_RPSCLK_PER_US); n++)
*rps++ = S626_RPS_NOP;
/*
* Start a dummy conversion to cause the data from the last
* conversion of interest to be shifted in.
*/
/* Begin ADC Start pulse. */
*rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
*rps++ = S626_GPIO_BASE | S626_GPIO1_LO;
*rps++ = S626_RPS_NOP;
/* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
*rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2); /* End ADC Start pulse. */
*rps++ = S626_GPIO_BASE | S626_GPIO1_HI;
/*
* Wait for the data from the last conversion of interest to arrive
* in FB BUFFER 1 register.
*/
*rps++ = S626_RPS_PAUSE | S626_RPS_GPIO2; /* Wait for ADC done. */
/* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
*rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
*rps++ = (u32)devpriv->ana_buf.physical_base +
(devpriv->adc_items << 2);
/* Indicate ADC scan loop is finished. */
/* Signal ReadADC() that scan is done. */
/* *rps++= S626_RPS_CLRSIGNAL | S626_RPS_SIGADC; */
/* invoke interrupt */
if (devpriv->ai_cmd_running == 1)
*rps++ = S626_RPS_IRQ;
/* Restart RPS program at its beginning. */
*rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */
*rps++ = (u32)devpriv->rps_buf.physical_base;
/* End of RPS program build */
}
static int s626_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = readl(dev->mmio + S626_P_PSR);
if (status & S626_PSR_GPIO2)
return 0;
return -EBUSY;
}
static int s626_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
u16 chan = CR_CHAN(insn->chanspec);
u16 range = CR_RANGE(insn->chanspec);
u16 adc_spec = 0;
u32 gpio_image;
u32 tmp;
int ret;
int n;
/*
* Convert application's ADC specification into form
* appropriate for register programming.
*/
if (range == 0)
adc_spec = (chan << 8) | (S626_GSEL_BIPOLAR5V);
else
adc_spec = (chan << 8) | (S626_GSEL_BIPOLAR10V);
/* Switch ADC analog gain. */
s626_debi_write(dev, S626_LP_GSEL, adc_spec); /* Set gain. */
/* Select ADC analog input channel. */
s626_debi_write(dev, S626_LP_ISEL, adc_spec); /* Select channel. */
for (n = 0; n < insn->n; n++) {
/* Delay 10 microseconds for analog input settling. */
usleep_range(10, 20);
/* Start ADC by pulsing GPIO1 low */
gpio_image = readl(dev->mmio + S626_P_GPIO);
/* Assert ADC Start command */
writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/* and stretch it out */
writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/* Negate ADC Start command */
writel(gpio_image | S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/*
* Wait for ADC to complete (GPIO2 is asserted high when
* ADC not busy) and for data from previous conversion to
* shift into FB BUFFER 1 register.
*/
/* Wait for ADC done */
ret = comedi_timeout(dev, s, insn, s626_ai_eoc, 0);
if (ret)
return ret;
/* Fetch ADC data */
if (n != 0) {
tmp = readl(dev->mmio + S626_P_FB_BUFFER1);
data[n - 1] = s626_ai_reg_to_uint(tmp);
}
/*
* Allow the ADC to stabilize for 4 microseconds before
* starting the next (final) conversion. This delay is
* necessary to allow sufficient time between last
* conversion finished and the start of the next
* conversion. Without this delay, the last conversion's
* data value is sometimes set to the previous
* conversion's data value.
*/
udelay(4);
}
/*
* Start a dummy conversion to cause the data from the
* previous conversion to be shifted in.
*/
gpio_image = readl(dev->mmio + S626_P_GPIO);
/* Assert ADC Start command */
writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/* and stretch it out */
writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/* Negate ADC Start command */
writel(gpio_image | S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/* Wait for the data to arrive in FB BUFFER 1 register. */
/* Wait for ADC done */
ret = comedi_timeout(dev, s, insn, s626_ai_eoc, 0);
if (ret)
return ret;
/* Fetch ADC data from audio interface's input shift register. */
/* Fetch ADC data */
if (n != 0) {
tmp = readl(dev->mmio + S626_P_FB_BUFFER1);
data[n - 1] = s626_ai_reg_to_uint(tmp);
}
return n;
}
static int s626_ai_load_polllist(u8 *ppl, struct comedi_cmd *cmd)
{
int n;
for (n = 0; n < cmd->chanlist_len; n++) {
if (CR_RANGE(cmd->chanlist[n]) == 0)
ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_5V;
else
ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_10V;
}
if (n != 0)
ppl[n - 1] |= S626_EOPL;
return n;
}
static int s626_ai_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
struct comedi_cmd *cmd = &s->async->cmd;
if (trig_num != cmd->start_arg)
return -EINVAL;
/* Start executing the RPS program */
s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
s->async->inttrig = NULL;
return 1;
}
/*
* This function doesn't require a particular form, this is just what
* happens to be used in some of the drivers. It should convert ns
* nanoseconds to a counter value suitable for programming the device.
* Also, it should adjust ns so that it cooresponds to the actual time
* that the device will use.
*/
static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
{
int divider, base;
base = 500; /* 2MHz internal clock */
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
divider = DIV_ROUND_CLOSEST(*nanosec, base);
break;
case CMDF_ROUND_DOWN:
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
divider = DIV_ROUND_UP(*nanosec, base);
break;
}
*nanosec = base * divider;
return divider - 1;
}
static void s626_timer_load(struct comedi_device *dev,
unsigned int chan, int tick)
{
u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
/* Operating mode is Timer. */
S626_SET_STD_ENCMODE(S626_ENCMODE_TIMER) |
/* Count direction is Down. */
S626_SET_STD_CLKPOL(S626_CNTDIR_DOWN) |
/* Clock multiplier is 1x. */
S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
/* Enabled by index */
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
u16 value_latchsrc = S626_LATCHSRC_A_INDXA;
/* uint16_t enab = S626_CLKENAB_ALWAYS; */
s626_set_mode(dev, chan, setup, false);
/* Set the preload register */
s626_preload(dev, chan, tick);
/*
* Software index pulse forces the preload register to load
* into the counter
*/
s626_set_load_trig(dev, chan, 0);
s626_pulse_index(dev, chan);
/* set reload on counter overflow */
s626_set_load_trig(dev, chan, 1);
/* set interrupt on overflow */
s626_set_int_src(dev, chan, S626_INTSRC_OVER);
s626_set_latch_source(dev, chan, value_latchsrc);
/* s626_set_enable(dev, chan, (uint16_t)(enab != 0)); */
}
/* TO COMPLETE */
static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct s626_private *devpriv = dev->private;
u8 ppl[16];
struct comedi_cmd *cmd = &s->async->cmd;
int tick;
if (devpriv->ai_cmd_running) {
dev_err(dev->class_dev,
"%s: Another ai_cmd is running\n", __func__);
return -EBUSY;
}
/* disable interrupt */
writel(0, dev->mmio + S626_P_IER);
/* clear interrupt request */
writel(S626_IRQ_RPS1 | S626_IRQ_GPIO3, dev->mmio + S626_P_ISR);
/* clear any pending interrupt */
s626_dio_clear_irq(dev);
/* s626_enc_clear_irq(dev); */
/* reset ai_cmd_running flag */
devpriv->ai_cmd_running = 0;
s626_ai_load_polllist(ppl, cmd);
devpriv->ai_cmd_running = 1;
devpriv->ai_convert_count = 0;
switch (cmd->scan_begin_src) {
case TRIG_FOLLOW:
break;
case TRIG_TIMER:
/*
* set a counter to generate adc trigger at scan_begin_arg
* interval
*/
tick = s626_ns_to_timer(&cmd->scan_begin_arg, cmd->flags);
/* load timer value and enable interrupt */
s626_timer_load(dev, 5, tick);
s626_set_enable(dev, 5, S626_CLKENAB_ALWAYS);
break;
case TRIG_EXT:
/* set the digital line and interrupt for scan trigger */
if (cmd->start_src != TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
break;
}
switch (cmd->convert_src) {
case TRIG_NOW:
break;
case TRIG_TIMER:
/*
* set a counter to generate adc trigger at convert_arg
* interval
*/
tick = s626_ns_to_timer(&cmd->convert_arg, cmd->flags);
/* load timer value and enable interrupt */
s626_timer_load(dev, 4, tick);
s626_set_enable(dev, 4, S626_CLKENAB_INDEX);
break;
case TRIG_EXT:
/* set the digital line and interrupt for convert trigger */
if (cmd->scan_begin_src != TRIG_EXT &&
cmd->start_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->convert_arg);
break;
}
s626_reset_adc(dev, ppl);
switch (cmd->start_src) {
case TRIG_NOW:
/* Trigger ADC scan loop start */
/* s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2); */
/* Start executing the RPS program */
s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
s->async->inttrig = NULL;
break;
case TRIG_EXT:
/* configure DIO channel for acquisition trigger */
s626_dio_set_irq(dev, cmd->start_arg);
s->async->inttrig = NULL;
break;
case TRIG_INT:
s->async->inttrig = s626_ai_inttrig;
break;
}
/* enable interrupt */
writel(S626_IRQ_GPIO3 | S626_IRQ_RPS1, dev->mmio + S626_P_IER);
return 0;
}
static int s626_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src,
TRIG_NOW | TRIG_INT | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->convert_src,
TRIG_TIMER | TRIG_EXT | TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
err |= comedi_check_trigger_is_unique(cmd->convert_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
switch (cmd->start_src) {
case TRIG_NOW:
case TRIG_INT:
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
break;
case TRIG_EXT:
err |= comedi_check_trigger_arg_max(&cmd->start_arg, 39);
break;
}
if (cmd->scan_begin_src == TRIG_EXT)
err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 39);
if (cmd->convert_src == TRIG_EXT)
err |= comedi_check_trigger_arg_max(&cmd->convert_arg, 39);
#define S626_MAX_SPEED 200000 /* in nanoseconds */
#define S626_MIN_SPEED 2000000000 /* in nanoseconds */
if (cmd->scan_begin_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
S626_MAX_SPEED);
err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
S626_MIN_SPEED);
} else {
/*
* external trigger
* should be level/edge, hi/lo specification here
* should specify multiple external triggers
* err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
*/
}
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
S626_MAX_SPEED);
err |= comedi_check_trigger_arg_max(&cmd->convert_arg,
S626_MIN_SPEED);
} else {
/*
* external trigger - see above
* err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
*/
}
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->scan_begin_arg;
s626_ns_to_timer(&arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
}
if (cmd->convert_src == TRIG_TIMER) {
arg = cmd->convert_arg;
s626_ns_to_timer(&arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->convert_arg * cmd->scan_end_arg;
err |= comedi_check_trigger_arg_min(
&cmd->scan_begin_arg, arg);
}
}
if (err)
return 4;
return 0;
}
static int s626_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct s626_private *devpriv = dev->private;
/* Stop RPS program in case it is currently running */
s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
/* disable master interrupt */
writel(0, dev->mmio + S626_P_IER);
devpriv->ai_cmd_running = 0;
return 0;
}
static int s626_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++) {
s16 dacdata = (s16)data[i];
int ret;
dacdata -= (0x1fff);
ret = s626_set_dac(dev, chan, dacdata);
if (ret)
return ret;
s->readback[chan] = data[i];
}
return insn->n;
}
/* *************** DIGITAL I/O FUNCTIONS *************** */
/*
* All DIO functions address a group of DIO channels by means of
* "group" argument. group may be 0, 1 or 2, which correspond to DIO
* ports A, B and C, respectively.
*/
static void s626_dio_init(struct comedi_device *dev)
{
u16 group;
/* Prepare to treat writes to WRCapSel as capture disables. */
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
/* For each group of sixteen channels ... */
for (group = 0; group < S626_DIO_BANKS; group++) {
/* Disable all interrupts */
s626_debi_write(dev, S626_LP_WRINTSEL(group), 0);
/* Disable all event captures */
s626_debi_write(dev, S626_LP_WRCAPSEL(group), 0xffff);
/* Init all DIOs to default edge polarity */
s626_debi_write(dev, S626_LP_WREDGSEL(group), 0);
/* Program all outputs to inactive state */
s626_debi_write(dev, S626_LP_WRDOUT(group), 0);
}
}
static int s626_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long group = (unsigned long)s->private;
if (comedi_dio_update_state(s, data))
s626_debi_write(dev, S626_LP_WRDOUT(group), s->state);
data[1] = s626_debi_read(dev, S626_LP_RDDIN(group));
return insn->n;
}
static int s626_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long group = (unsigned long)s->private;
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
s626_debi_write(dev, S626_LP_WRDOUT(group), s->io_bits);
return insn->n;
}
/*
* Now this function initializes the value of the counter (data[0])
* and set the subdevice. To complete with trigger and interrupt
* configuration.
*
* FIXME: data[0] is supposed to be an INSN_CONFIG_xxx constant indicating
* what is being configured, but this function appears to be using data[0]
* as a variable.
*/
static int s626_enc_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
/* Operating mode is Counter. */
S626_SET_STD_ENCMODE(S626_ENCMODE_COUNTER) |
/* Active high clock. */
S626_SET_STD_CLKPOL(S626_CLKPOL_POS) |
/* Clock multiplier is 1x. */
S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
/* Enabled by index */
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
/* uint16_t disable_int_src = true; */
/* uint32_t Preloadvalue; //Counter initial value */
u16 value_latchsrc = S626_LATCHSRC_AB_READ;
u16 enab = S626_CLKENAB_ALWAYS;
/* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
s626_set_mode(dev, chan, setup, true);
s626_preload(dev, chan, data[0]);
s626_pulse_index(dev, chan);
s626_set_latch_source(dev, chan, value_latchsrc);
s626_set_enable(dev, chan, (enab != 0));
return insn->n;
}
static int s626_enc_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
u16 cntr_latch_reg = S626_LP_CNTR(chan);
int i;
for (i = 0; i < insn->n; i++) {
unsigned int val;
/*
* Read the counter's output latch LSW/MSW.
* Latches on LSW read.
*/
val = s626_debi_read(dev, cntr_latch_reg);
val |= (s626_debi_read(dev, cntr_latch_reg + 2) << 16);
data[i] = val;
}
return insn->n;
}
static int s626_enc_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
/* Set the preload register */
s626_preload(dev, chan, data[0]);
/*
* Software index pulse forces the preload register to load
* into the counter
*/
s626_set_load_trig(dev, chan, 0);
s626_pulse_index(dev, chan);
s626_set_load_trig(dev, chan, 2);
return 1;
}
static void s626_write_misc2(struct comedi_device *dev, u16 new_image)
{
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
s626_debi_write(dev, S626_LP_WRMISC2, new_image);
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WDISABLE);
}
static void s626_counters_init(struct comedi_device *dev)
{
int chan;
u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
/* Operating mode is counter. */
S626_SET_STD_ENCMODE(S626_ENCMODE_COUNTER) |
/* Active high clock. */
S626_SET_STD_CLKPOL(S626_CLKPOL_POS) |
/* Clock multiplier is 1x. */
S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
/* Enabled by index */
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
/*
* Disable all counter interrupts and clear any captured counter events.
*/
for (chan = 0; chan < S626_ENCODER_CHANNELS; chan++) {
s626_set_mode(dev, chan, setup, true);
s626_set_int_src(dev, chan, 0);
s626_reset_cap_flags(dev, chan);
s626_set_enable(dev, chan, S626_CLKENAB_ALWAYS);
}
}
static int s626_allocate_dma_buffers(struct comedi_device *dev)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct s626_private *devpriv = dev->private;
void *addr;
dma_addr_t appdma;
addr = dma_alloc_coherent(&pcidev->dev, S626_DMABUF_SIZE, &appdma,
GFP_KERNEL);
if (!addr)
return -ENOMEM;
devpriv->ana_buf.logical_base = addr;
devpriv->ana_buf.physical_base = appdma;
addr = dma_alloc_coherent(&pcidev->dev, S626_DMABUF_SIZE, &appdma,
GFP_KERNEL);
if (!addr)
return -ENOMEM;
devpriv->rps_buf.logical_base = addr;
devpriv->rps_buf.physical_base = appdma;
return 0;
}
static void s626_free_dma_buffers(struct comedi_device *dev)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct s626_private *devpriv = dev->private;
if (!devpriv)
return;
if (devpriv->rps_buf.logical_base)
dma_free_coherent(&pcidev->dev, S626_DMABUF_SIZE,
devpriv->rps_buf.logical_base,
devpriv->rps_buf.physical_base);
if (devpriv->ana_buf.logical_base)
dma_free_coherent(&pcidev->dev, S626_DMABUF_SIZE,
devpriv->ana_buf.logical_base,
devpriv->ana_buf.physical_base);
}
static int s626_initialize(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
dma_addr_t phys_buf;
u16 chan;
int i;
int ret;
/* Enable DEBI and audio pins, enable I2C interface */
s626_mc_enable(dev, S626_MC1_DEBI | S626_MC1_AUDIO | S626_MC1_I2C,
S626_P_MC1);
/*
* Configure DEBI operating mode
*
* Local bus is 16 bits wide
* Declare DEBI transfer timeout interval
* Set up byte lane steering
* Intel-compatible local bus (DEBI never times out)
*/
writel(S626_DEBI_CFG_SLAVE16 |
(S626_DEBI_TOUT << S626_DEBI_CFG_TOUT_BIT) | S626_DEBI_SWAP |
S626_DEBI_CFG_INTEL, dev->mmio + S626_P_DEBICFG);
/* Disable MMU paging */
writel(S626_DEBI_PAGE_DISABLE, dev->mmio + S626_P_DEBIPAGE);
/* Init GPIO so that ADC Start* is negated */
writel(S626_GPIO_BASE | S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
/* I2C device address for onboard eeprom (revb) */
devpriv->i2c_adrs = 0xA0;
/*
* Issue an I2C ABORT command to halt any I2C
* operation in progress and reset BUSY flag.
*/
writel(S626_I2C_CLKSEL | S626_I2C_ABORT,
dev->mmio + S626_P_I2CSTAT);
s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
ret = comedi_timeout(dev, NULL, NULL, s626_i2c_handshake_eoc, 0);
if (ret)
return ret;
/*
* Per SAA7146 data sheet, write to STATUS
* reg twice to reset all I2C error flags.
*/
for (i = 0; i < 2; i++) {
writel(S626_I2C_CLKSEL, dev->mmio + S626_P_I2CSTAT);
s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
ret = comedi_timeout(dev, NULL,
NULL, s626_i2c_handshake_eoc, 0);
if (ret)
return ret;
}
/*
* Init audio interface functional attributes: set DAC/ADC
* serial clock rates, invert DAC serial clock so that
* DAC data setup times are satisfied, enable DAC serial
* clock out.
*/
writel(S626_ACON2_INIT, dev->mmio + S626_P_ACON2);
/*
* Set up TSL1 slot list, which is used to control the
* accumulation of ADC data: S626_RSD1 = shift data in on SD1.
* S626_SIB_A1 = store data uint8_t at next available location
* in FB BUFFER1 register.
*/
writel(S626_RSD1 | S626_SIB_A1, dev->mmio + S626_P_TSL1);
writel(S626_RSD1 | S626_SIB_A1 | S626_EOS,
dev->mmio + S626_P_TSL1 + 4);
/* Enable TSL1 slot list so that it executes all the time */
writel(S626_ACON1_ADCSTART, dev->mmio + S626_P_ACON1);
/*
* Initialize RPS registers used for ADC
*/
/* Physical start of RPS program */
writel((u32)devpriv->rps_buf.physical_base,
dev->mmio + S626_P_RPSADDR1);
/* RPS program performs no explicit mem writes */
writel(0, dev->mmio + S626_P_RPSPAGE1);
/* Disable RPS timeouts */
writel(0, dev->mmio + S626_P_RPS1_TOUT);
#if 0
/*
* SAA7146 BUG WORKAROUND
*
* Initialize SAA7146 ADC interface to a known state by
* invoking ADCs until FB BUFFER 1 register shows that it
* is correctly receiving ADC data. This is necessary
* because the SAA7146 ADC interface does not start up in
* a defined state after a PCI reset.
*/
{
struct comedi_subdevice *s = dev->read_subdev;
u8 poll_list;
u16 adc_data;
u16 start_val;
u16 index;
unsigned int data[16];
/* Create a simple polling list for analog input channel 0 */
poll_list = S626_EOPL;
s626_reset_adc(dev, &poll_list);
/* Get initial ADC value */
s626_ai_rinsn(dev, s, NULL, data);
start_val = data[0];
/*
* VERSION 2.01 CHANGE: TIMEOUT ADDED TO PREVENT HANGED
* EXECUTION.
*
* Invoke ADCs until the new ADC value differs from the initial
* value or a timeout occurs. The timeout protects against the
* possibility that the driver is restarting and the ADC data is
* a fixed value resulting from the applied ADC analog input
* being unusually quiet or at the rail.
*/
for (index = 0; index < 500; index++) {
s626_ai_rinsn(dev, s, NULL, data);
adc_data = data[0];
if (adc_data != start_val)
break;
}
}
#endif /* SAA7146 BUG WORKAROUND */
/*
* Initialize the DAC interface
*/
/*
* Init Audio2's output DMAC attributes:
* burst length = 1 DWORD
* threshold = 1 DWORD.
*/
writel(0, dev->mmio + S626_P_PCI_BT_A);
/*
* Init Audio2's output DMA physical addresses. The protection
* address is set to 1 DWORD past the base address so that a
* single DWORD will be transferred each time a DMA transfer is
* enabled.
*/
phys_buf = devpriv->ana_buf.physical_base +
(S626_DAC_WDMABUF_OS * sizeof(u32));
writel((u32)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
writel((u32)(phys_buf + sizeof(u32)),
dev->mmio + S626_P_PROTA2_OUT);
/*
* Cache Audio2's output DMA buffer logical address. This is
* where DAC data is buffered for A2 output DMA transfers.
*/
devpriv->dac_wbuf = (u32 *)devpriv->ana_buf.logical_base +
S626_DAC_WDMABUF_OS;
/*
* Audio2's output channels does not use paging. The
* protection violation handling bit is set so that the
* DMAC will automatically halt and its PCI address pointer
* will be reset when the protection address is reached.
*/
writel(8, dev->mmio + S626_P_PAGEA2_OUT);
/*
* Initialize time slot list 2 (TSL2), which is used to control
* the clock generation for and serialization of data to be sent
* to the DAC devices. Slot 0 is a NOP that is used to trap TSL
* execution; this permits other slots to be safely modified
* without first turning off the TSL sequencer (which is
* apparently impossible to do). Also, SD3 (which is driven by a
* pull-up resistor) is shifted in and stored to the MSB of
* FB_BUFFER2 to be used as evidence that the slot sequence has
* not yet finished executing.
*/
/* Slot 0: Trap TSL execution, shift 0xFF into FB_BUFFER2 */
writel(S626_XSD2 | S626_RSD3 | S626_SIB_A2 | S626_EOS,
dev->mmio + S626_VECTPORT(0));
/*
* Initialize slot 1, which is constant. Slot 1 causes a
* DWORD to be transferred from audio channel 2's output FIFO
* to the FIFO's output buffer so that it can be serialized
* and sent to the DAC during subsequent slots. All remaining
* slots are dynamically populated as required by the target
* DAC device.
*/
/* Slot 1: Fetch DWORD from Audio2's output FIFO */
writel(S626_LF_A2, dev->mmio + S626_VECTPORT(1));
/* Start DAC's audio interface (TSL2) running */
writel(S626_ACON1_DACSTART, dev->mmio + S626_P_ACON1);
/*
* Init Trim DACs to calibrated values. Do it twice because the
* SAA7146 audio channel does not always reset properly and
* sometimes causes the first few TrimDAC writes to malfunction.
*/
s626_load_trim_dacs(dev);
ret = s626_load_trim_dacs(dev);
if (ret)
return ret;
/*
* Manually init all gate array hardware in case this is a soft
* reset (we have no way of determining whether this is a warm
* or cold start). This is necessary because the gate array will
* reset only in response to a PCI hard reset; there is no soft
* reset function.
*/
/*
* Init all DAC outputs to 0V and init all DAC setpoint and
* polarity images.
*/
for (chan = 0; chan < S626_DAC_CHANNELS; chan++) {
ret = s626_set_dac(dev, chan, 0);
if (ret)
return ret;
}
/* Init counters */
s626_counters_init(dev);
/*
* Without modifying the state of the Battery Backup enab, disable
* the watchdog timer, set DIO channels 0-5 to operate in the
* standard DIO (vs. counter overflow) mode, disable the battery
* charger, and reset the watchdog interval selector to zero.
*/
s626_write_misc2(dev, (s626_debi_read(dev, S626_LP_RDMISC2) &
S626_MISC2_BATT_ENABLE));
/* Initialize the digital I/O subsystem */
s626_dio_init(dev);
return 0;
}
static int s626_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct s626_private *devpriv;
struct comedi_subdevice *s;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->mmio = pci_ioremap_bar(pcidev, 0);
if (!dev->mmio)
return -ENOMEM;
/* disable master interrupt */
writel(0, dev->mmio + S626_P_IER);
/* soft reset */
writel(S626_MC1_SOFT_RESET, dev->mmio + S626_P_MC1);
/* DMA FIXME DMA// */
ret = s626_allocate_dma_buffers(dev);
if (ret)
return ret;
if (pcidev->irq) {
ret = request_irq(pcidev->irq, s626_irq_handler, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0)
dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, 6);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = S626_ADC_CHANNELS;
s->maxdata = 0x3fff;
s->range_table = &s626_range_table;
s->len_chanlist = S626_ADC_CHANNELS;
s->insn_read = s626_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = s626_ai_cmd;
s->do_cmdtest = s626_ai_cmdtest;
s->cancel = s626_ai_cancel;
}
s = &dev->subdevices[1];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = S626_DAC_CHANNELS;
s->maxdata = 0x3fff;
s->range_table = &range_bipolar10;
s->insn_write = s626_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[2];
/* digital I/O subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->io_bits = 0xffff;
s->private = (void *)0; /* DIO group 0 */
s->range_table = &range_digital;
s->insn_config = s626_dio_insn_config;
s->insn_bits = s626_dio_insn_bits;
s = &dev->subdevices[3];
/* digital I/O subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->io_bits = 0xffff;
s->private = (void *)1; /* DIO group 1 */
s->range_table = &range_digital;
s->insn_config = s626_dio_insn_config;
s->insn_bits = s626_dio_insn_bits;
s = &dev->subdevices[4];
/* digital I/O subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->io_bits = 0xffff;
s->private = (void *)2; /* DIO group 2 */
s->range_table = &range_digital;
s->insn_config = s626_dio_insn_config;
s->insn_bits = s626_dio_insn_bits;
s = &dev->subdevices[5];
/* encoder (counter) subdevice */
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL;
s->n_chan = S626_ENCODER_CHANNELS;
s->maxdata = 0xffffff;
s->range_table = &range_unknown;
s->insn_config = s626_enc_insn_config;
s->insn_read = s626_enc_insn_read;
s->insn_write = s626_enc_insn_write;
return s626_initialize(dev);
}
static void s626_detach(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
if (devpriv) {
/* stop ai_command */
devpriv->ai_cmd_running = 0;
if (dev->mmio) {
/* interrupt mask */
/* Disable master interrupt */
writel(0, dev->mmio + S626_P_IER);
/* Clear board's IRQ status flag */
writel(S626_IRQ_GPIO3 | S626_IRQ_RPS1,
dev->mmio + S626_P_ISR);
/* Disable the watchdog timer and battery charger. */
s626_write_misc2(dev, 0);
/* Close all interfaces on 7146 device */
writel(S626_MC1_SHUTDOWN, dev->mmio + S626_P_MC1);
writel(S626_ACON1_BASE, dev->mmio + S626_P_ACON1);
}
}
comedi_pci_detach(dev);
s626_free_dma_buffers(dev);
}
static struct comedi_driver s626_driver = {
.driver_name = "s626",
.module = THIS_MODULE,
.auto_attach = s626_auto_attach,
.detach = s626_detach,
};
static int s626_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &s626_driver, id->driver_data);
}
/*
* For devices with vendor:device id == 0x1131:0x7146 you must specify
* also subvendor:subdevice ids, because otherwise it will conflict with
* Philips SAA7146 media/dvb based cards.
*/
static const struct pci_device_id s626_pci_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146,
0x6000, 0x0272) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, s626_pci_table);
static struct pci_driver s626_pci_driver = {
.name = "s626",
.id_table = s626_pci_table,
.probe = s626_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(s626_driver, s626_pci_driver);
MODULE_AUTHOR("Gianluca Palli <[email protected]>");
MODULE_DESCRIPTION("Sensoray 626 Comedi driver module");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/s626.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* vmk80xx.c
* Velleman USB Board Low-Level Driver
*
* Copyright (C) 2009 Manuel Gebele <[email protected]>, Germany
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: vmk80xx
* Description: Velleman USB Board Low-Level Driver
* Devices: [Velleman] K8055 (K8055/VM110), K8061 (K8061/VM140),
* VM110 (K8055/VM110), VM140 (K8061/VM140)
* Author: Manuel Gebele <[email protected]>
* Updated: Sun, 10 May 2009 11:14:59 +0200
* Status: works
*
* Supports:
* - analog input
* - analog output
* - digital input
* - digital output
* - counter
* - pwm
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <linux/comedi/comedi_usb.h>
enum {
DEVICE_VMK8055,
DEVICE_VMK8061
};
#define VMK8055_DI_REG 0x00
#define VMK8055_DO_REG 0x01
#define VMK8055_AO1_REG 0x02
#define VMK8055_AO2_REG 0x03
#define VMK8055_AI1_REG 0x02
#define VMK8055_AI2_REG 0x03
#define VMK8055_CNT1_REG 0x04
#define VMK8055_CNT2_REG 0x06
#define VMK8061_CH_REG 0x01
#define VMK8061_DI_REG 0x01
#define VMK8061_DO_REG 0x01
#define VMK8061_PWM_REG1 0x01
#define VMK8061_PWM_REG2 0x02
#define VMK8061_CNT_REG 0x02
#define VMK8061_AO_REG 0x02
#define VMK8061_AI_REG1 0x02
#define VMK8061_AI_REG2 0x03
#define VMK8055_CMD_RST 0x00
#define VMK8055_CMD_DEB1_TIME 0x01
#define VMK8055_CMD_DEB2_TIME 0x02
#define VMK8055_CMD_RST_CNT1 0x03
#define VMK8055_CMD_RST_CNT2 0x04
#define VMK8055_CMD_WRT_AD 0x05
#define VMK8061_CMD_RD_AI 0x00
#define VMK8061_CMR_RD_ALL_AI 0x01 /* !non-active! */
#define VMK8061_CMD_SET_AO 0x02
#define VMK8061_CMD_SET_ALL_AO 0x03 /* !non-active! */
#define VMK8061_CMD_OUT_PWM 0x04
#define VMK8061_CMD_RD_DI 0x05
#define VMK8061_CMD_DO 0x06 /* !non-active! */
#define VMK8061_CMD_CLR_DO 0x07
#define VMK8061_CMD_SET_DO 0x08
#define VMK8061_CMD_RD_CNT 0x09 /* TODO: completely pointless? */
#define VMK8061_CMD_RST_CNT 0x0a /* TODO: completely pointless? */
#define VMK8061_CMD_RD_VERSION 0x0b /* internal usage */
#define VMK8061_CMD_RD_JMP_STAT 0x0c /* TODO: not implemented yet */
#define VMK8061_CMD_RD_PWR_STAT 0x0d /* internal usage */
#define VMK8061_CMD_RD_DO 0x0e
#define VMK8061_CMD_RD_AO 0x0f
#define VMK8061_CMD_RD_PWM 0x10
#define IC3_VERSION BIT(0)
#define IC6_VERSION BIT(1)
#define MIN_BUF_SIZE 64
#define PACKET_TIMEOUT 10000 /* ms */
enum vmk80xx_model {
VMK8055_MODEL,
VMK8061_MODEL
};
static const struct comedi_lrange vmk8061_range = {
2, {
UNI_RANGE(5),
UNI_RANGE(10)
}
};
struct vmk80xx_board {
const char *name;
enum vmk80xx_model model;
const struct comedi_lrange *range;
int ai_nchans;
unsigned int ai_maxdata;
int ao_nchans;
int di_nchans;
unsigned int cnt_maxdata;
int pwm_nchans;
unsigned int pwm_maxdata;
};
static const struct vmk80xx_board vmk80xx_boardinfo[] = {
[DEVICE_VMK8055] = {
.name = "K8055 (VM110)",
.model = VMK8055_MODEL,
.range = &range_unipolar5,
.ai_nchans = 2,
.ai_maxdata = 0x00ff,
.ao_nchans = 2,
.di_nchans = 6,
.cnt_maxdata = 0xffff,
},
[DEVICE_VMK8061] = {
.name = "K8061 (VM140)",
.model = VMK8061_MODEL,
.range = &vmk8061_range,
.ai_nchans = 8,
.ai_maxdata = 0x03ff,
.ao_nchans = 8,
.di_nchans = 8,
.cnt_maxdata = 0, /* unknown, device is not writeable */
.pwm_nchans = 1,
.pwm_maxdata = 0x03ff,
},
};
struct vmk80xx_private {
struct usb_endpoint_descriptor *ep_rx;
struct usb_endpoint_descriptor *ep_tx;
struct semaphore limit_sem;
unsigned char *usb_rx_buf;
unsigned char *usb_tx_buf;
enum vmk80xx_model model;
};
static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
{
struct vmk80xx_private *devpriv = dev->private;
struct usb_device *usb = comedi_to_usb_dev(dev);
__u8 tx_addr;
__u8 rx_addr;
unsigned int tx_pipe;
unsigned int rx_pipe;
size_t tx_size;
size_t rx_size;
tx_addr = devpriv->ep_tx->bEndpointAddress;
rx_addr = devpriv->ep_rx->bEndpointAddress;
tx_pipe = usb_sndbulkpipe(usb, tx_addr);
rx_pipe = usb_rcvbulkpipe(usb, rx_addr);
tx_size = usb_endpoint_maxp(devpriv->ep_tx);
rx_size = usb_endpoint_maxp(devpriv->ep_rx);
usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, tx_size, NULL,
PACKET_TIMEOUT);
usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, rx_size, NULL,
PACKET_TIMEOUT);
}
static int vmk80xx_read_packet(struct comedi_device *dev)
{
struct vmk80xx_private *devpriv = dev->private;
struct usb_device *usb = comedi_to_usb_dev(dev);
struct usb_endpoint_descriptor *ep;
unsigned int pipe;
if (devpriv->model == VMK8061_MODEL) {
vmk80xx_do_bulk_msg(dev);
return 0;
}
ep = devpriv->ep_rx;
pipe = usb_rcvintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf,
usb_endpoint_maxp(ep), NULL,
PACKET_TIMEOUT);
}
static int vmk80xx_write_packet(struct comedi_device *dev, int cmd)
{
struct vmk80xx_private *devpriv = dev->private;
struct usb_device *usb = comedi_to_usb_dev(dev);
struct usb_endpoint_descriptor *ep;
unsigned int pipe;
devpriv->usb_tx_buf[0] = cmd;
if (devpriv->model == VMK8061_MODEL) {
vmk80xx_do_bulk_msg(dev);
return 0;
}
ep = devpriv->ep_tx;
pipe = usb_sndintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf,
usb_endpoint_maxp(ep), NULL,
PACKET_TIMEOUT);
}
static int vmk80xx_reset_device(struct comedi_device *dev)
{
struct vmk80xx_private *devpriv = dev->private;
size_t size;
int retval;
size = usb_endpoint_maxp(devpriv->ep_tx);
memset(devpriv->usb_tx_buf, 0, size);
retval = vmk80xx_write_packet(dev, VMK8055_CMD_RST);
if (retval)
return retval;
/* set outputs to known state as we cannot read them */
return vmk80xx_write_packet(dev, VMK8055_CMD_WRT_AD);
}
static int vmk80xx_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
int chan;
int reg[2];
int n;
down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
switch (devpriv->model) {
case VMK8055_MODEL:
if (!chan)
reg[0] = VMK8055_AI1_REG;
else
reg[0] = VMK8055_AI2_REG;
break;
case VMK8061_MODEL:
default:
reg[0] = VMK8061_AI_REG1;
reg[1] = VMK8061_AI_REG2;
devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AI;
devpriv->usb_tx_buf[VMK8061_CH_REG] = chan;
break;
}
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
if (devpriv->model == VMK8055_MODEL) {
data[n] = devpriv->usb_rx_buf[reg[0]];
continue;
}
/* VMK8061_MODEL */
data[n] = devpriv->usb_rx_buf[reg[0]] + 256 *
devpriv->usb_rx_buf[reg[1]];
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
int chan;
int cmd;
int reg;
int n;
down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
switch (devpriv->model) {
case VMK8055_MODEL:
cmd = VMK8055_CMD_WRT_AD;
if (!chan)
reg = VMK8055_AO1_REG;
else
reg = VMK8055_AO2_REG;
break;
default: /* NOTE: avoid compiler warnings */
cmd = VMK8061_CMD_SET_AO;
reg = VMK8061_AO_REG;
devpriv->usb_tx_buf[VMK8061_CH_REG] = chan;
break;
}
for (n = 0; n < insn->n; n++) {
devpriv->usb_tx_buf[reg] = data[n];
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
int chan;
int reg;
int n;
down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
reg = VMK8061_AO_REG - 1;
devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AO;
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
data[n] = devpriv->usb_rx_buf[reg + chan];
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
unsigned char *rx_buf;
int reg;
int retval;
down(&devpriv->limit_sem);
rx_buf = devpriv->usb_rx_buf;
if (devpriv->model == VMK8061_MODEL) {
reg = VMK8061_DI_REG;
devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
} else {
reg = VMK8055_DI_REG;
}
retval = vmk80xx_read_packet(dev);
if (!retval) {
if (devpriv->model == VMK8055_MODEL)
data[1] = (((rx_buf[reg] >> 4) & 0x03) |
((rx_buf[reg] << 2) & 0x04) |
((rx_buf[reg] >> 3) & 0x18));
else
data[1] = rx_buf[reg];
retval = 2;
}
up(&devpriv->limit_sem);
return retval;
}
static int vmk80xx_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
unsigned char *rx_buf = devpriv->usb_rx_buf;
unsigned char *tx_buf = devpriv->usb_tx_buf;
int reg, cmd;
int ret = 0;
if (devpriv->model == VMK8061_MODEL) {
reg = VMK8061_DO_REG;
cmd = VMK8061_CMD_DO;
} else { /* VMK8055_MODEL */
reg = VMK8055_DO_REG;
cmd = VMK8055_CMD_WRT_AD;
}
down(&devpriv->limit_sem);
if (comedi_dio_update_state(s, data)) {
tx_buf[reg] = s->state;
ret = vmk80xx_write_packet(dev, cmd);
if (ret)
goto out;
}
if (devpriv->model == VMK8061_MODEL) {
tx_buf[0] = VMK8061_CMD_RD_DO;
ret = vmk80xx_read_packet(dev);
if (ret)
goto out;
data[1] = rx_buf[reg];
} else {
data[1] = s->state;
}
out:
up(&devpriv->limit_sem);
return ret ? ret : insn->n;
}
static int vmk80xx_cnt_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
int chan;
int reg[2];
int n;
down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
switch (devpriv->model) {
case VMK8055_MODEL:
if (!chan)
reg[0] = VMK8055_CNT1_REG;
else
reg[0] = VMK8055_CNT2_REG;
break;
case VMK8061_MODEL:
default:
reg[0] = VMK8061_CNT_REG;
reg[1] = VMK8061_CNT_REG;
devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_CNT;
break;
}
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
if (devpriv->model == VMK8055_MODEL)
data[n] = devpriv->usb_rx_buf[reg[0]];
else /* VMK8061_MODEL */
data[n] = devpriv->usb_rx_buf[reg[0] * (chan + 1) + 1]
+ 256 * devpriv->usb_rx_buf[reg[1] * 2 + 2];
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_cnt_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int cmd;
int reg;
int ret;
down(&devpriv->limit_sem);
switch (data[0]) {
case INSN_CONFIG_RESET:
if (devpriv->model == VMK8055_MODEL) {
if (!chan) {
cmd = VMK8055_CMD_RST_CNT1;
reg = VMK8055_CNT1_REG;
} else {
cmd = VMK8055_CMD_RST_CNT2;
reg = VMK8055_CNT2_REG;
}
devpriv->usb_tx_buf[reg] = 0x00;
} else {
cmd = VMK8061_CMD_RST_CNT;
}
ret = vmk80xx_write_packet(dev, cmd);
break;
default:
ret = -EINVAL;
break;
}
up(&devpriv->limit_sem);
return ret ? ret : insn->n;
}
static int vmk80xx_cnt_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
unsigned long debtime;
unsigned long val;
int chan;
int cmd;
int n;
down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
if (!chan)
cmd = VMK8055_CMD_DEB1_TIME;
else
cmd = VMK8055_CMD_DEB2_TIME;
for (n = 0; n < insn->n; n++) {
debtime = data[n];
if (debtime == 0)
debtime = 1;
/* TODO: Prevent overflows */
if (debtime > 7450)
debtime = 7450;
val = int_sqrt(debtime * 1000 / 115);
if (((val + 1) * val) < debtime * 1000 / 115)
val += 1;
devpriv->usb_tx_buf[6 + chan] = val;
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_pwm_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
unsigned char *tx_buf;
unsigned char *rx_buf;
int reg[2];
int n;
down(&devpriv->limit_sem);
tx_buf = devpriv->usb_tx_buf;
rx_buf = devpriv->usb_rx_buf;
reg[0] = VMK8061_PWM_REG1;
reg[1] = VMK8061_PWM_REG2;
tx_buf[0] = VMK8061_CMD_RD_PWM;
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
data[n] = rx_buf[reg[0]] + 4 * rx_buf[reg[1]];
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_pwm_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
unsigned char *tx_buf;
int reg[2];
int cmd;
int n;
down(&devpriv->limit_sem);
tx_buf = devpriv->usb_tx_buf;
reg[0] = VMK8061_PWM_REG1;
reg[1] = VMK8061_PWM_REG2;
cmd = VMK8061_CMD_OUT_PWM;
/*
* The followin piece of code was translated from the inline
* assembler code in the DLL source code.
*
* asm
* mov eax, k ; k is the value (data[n])
* and al, 03h ; al are the lower 8 bits of eax
* mov lo, al ; lo is the low part (tx_buf[reg[0]])
* mov eax, k
* shr eax, 2 ; right shift eax register by 2
* mov hi, al ; hi is the high part (tx_buf[reg[1]])
* end;
*/
for (n = 0; n < insn->n; n++) {
tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03);
tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff;
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&devpriv->limit_sem);
return n;
}
static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
{
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc;
int i;
if (iface_desc->desc.bNumEndpoints != 2)
return -ENODEV;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
ep_desc = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep_desc) ||
usb_endpoint_is_bulk_in(ep_desc)) {
if (!devpriv->ep_rx)
devpriv->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc) ||
usb_endpoint_is_bulk_out(ep_desc)) {
if (!devpriv->ep_tx)
devpriv->ep_tx = ep_desc;
continue;
}
}
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
return 0;
}
static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
{
struct vmk80xx_private *devpriv = dev->private;
size_t size;
size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_rx_buf)
return -ENOMEM;
size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf)
return -ENOMEM;
return 0;
}
static int vmk80xx_init_subdevices(struct comedi_device *dev)
{
const struct vmk80xx_board *board = dev->board_ptr;
struct vmk80xx_private *devpriv = dev->private;
struct comedi_subdevice *s;
int n_subd;
int ret;
down(&devpriv->limit_sem);
if (devpriv->model == VMK8055_MODEL)
n_subd = 5;
else
n_subd = 6;
ret = comedi_alloc_subdevices(dev, n_subd);
if (ret) {
up(&devpriv->limit_sem);
return ret;
}
/* Analog input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = board->ai_nchans;
s->maxdata = board->ai_maxdata;
s->range_table = board->range;
s->insn_read = vmk80xx_ai_insn_read;
/* Analog output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = board->ao_nchans;
s->maxdata = 0x00ff;
s->range_table = board->range;
s->insn_write = vmk80xx_ao_insn_write;
if (devpriv->model == VMK8061_MODEL) {
s->subdev_flags |= SDF_READABLE;
s->insn_read = vmk80xx_ao_insn_read;
}
/* Digital input subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = board->di_nchans;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = vmk80xx_di_insn_bits;
/* Digital output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = vmk80xx_do_insn_bits;
/* Counter subdevice */
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE;
s->n_chan = 2;
s->maxdata = board->cnt_maxdata;
s->insn_read = vmk80xx_cnt_insn_read;
s->insn_config = vmk80xx_cnt_insn_config;
if (devpriv->model == VMK8055_MODEL) {
s->subdev_flags |= SDF_WRITABLE;
s->insn_write = vmk80xx_cnt_insn_write;
}
/* PWM subdevice */
if (devpriv->model == VMK8061_MODEL) {
s = &dev->subdevices[5];
s->type = COMEDI_SUBD_PWM;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = board->pwm_nchans;
s->maxdata = board->pwm_maxdata;
s->insn_read = vmk80xx_pwm_insn_read;
s->insn_write = vmk80xx_pwm_insn_write;
}
up(&devpriv->limit_sem);
return 0;
}
static int vmk80xx_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct usb_interface *intf = comedi_to_usb_interface(dev);
const struct vmk80xx_board *board = NULL;
struct vmk80xx_private *devpriv;
int ret;
if (context < ARRAY_SIZE(vmk80xx_boardinfo))
board = &vmk80xx_boardinfo[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
devpriv->model = board->model;
sema_init(&devpriv->limit_sem, 8);
ret = vmk80xx_find_usb_endpoints(dev);
if (ret)
return ret;
ret = vmk80xx_alloc_usb_buffers(dev);
if (ret)
return ret;
usb_set_intfdata(intf, devpriv);
if (devpriv->model == VMK8055_MODEL)
vmk80xx_reset_device(dev);
return vmk80xx_init_subdevices(dev);
}
static void vmk80xx_detach(struct comedi_device *dev)
{
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct vmk80xx_private *devpriv = dev->private;
if (!devpriv)
return;
down(&devpriv->limit_sem);
usb_set_intfdata(intf, NULL);
kfree(devpriv->usb_rx_buf);
kfree(devpriv->usb_tx_buf);
up(&devpriv->limit_sem);
}
static struct comedi_driver vmk80xx_driver = {
.module = THIS_MODULE,
.driver_name = "vmk80xx",
.auto_attach = vmk80xx_auto_attach,
.detach = vmk80xx_detach,
};
static int vmk80xx_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return comedi_usb_auto_config(intf, &vmk80xx_driver, id->driver_info);
}
static const struct usb_device_id vmk80xx_usb_id_table[] = {
{ USB_DEVICE(0x10cf, 0x5500), .driver_info = DEVICE_VMK8055 },
{ USB_DEVICE(0x10cf, 0x5501), .driver_info = DEVICE_VMK8055 },
{ USB_DEVICE(0x10cf, 0x5502), .driver_info = DEVICE_VMK8055 },
{ USB_DEVICE(0x10cf, 0x5503), .driver_info = DEVICE_VMK8055 },
{ USB_DEVICE(0x10cf, 0x8061), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8062), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8063), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8064), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8065), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8066), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8067), .driver_info = DEVICE_VMK8061 },
{ USB_DEVICE(0x10cf, 0x8068), .driver_info = DEVICE_VMK8061 },
{ }
};
MODULE_DEVICE_TABLE(usb, vmk80xx_usb_id_table);
static struct usb_driver vmk80xx_usb_driver = {
.name = "vmk80xx",
.id_table = vmk80xx_usb_id_table,
.probe = vmk80xx_usb_probe,
.disconnect = comedi_usb_auto_unconfig,
};
module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver);
MODULE_AUTHOR("Manuel Gebele <[email protected]>");
MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/vmk80xx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_1500.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
*
* ADDI-DATA GmbH
* Dieselstrasse 3
* D-77833 Ottersweier
* Tel: +19(0)7223/9493-0
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* [email protected]
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include "amcc_s5933.h"
#include "z8536.h"
/*
* PCI Bar 0 Register map (devpriv->amcc)
* see amcc_s5933.h for register and bit defines
*/
/*
* PCI Bar 1 Register map (dev->iobase)
* see z8536.h for Z8536 internal registers and bit defines
*/
#define APCI1500_Z8536_PORTC_REG 0x00
#define APCI1500_Z8536_PORTB_REG 0x01
#define APCI1500_Z8536_PORTA_REG 0x02
#define APCI1500_Z8536_CTRL_REG 0x03
/*
* PCI Bar 2 Register map (devpriv->addon)
*/
#define APCI1500_CLK_SEL_REG 0x00
#define APCI1500_DI_REG 0x00
#define APCI1500_DO_REG 0x02
struct apci1500_private {
unsigned long amcc;
unsigned long addon;
unsigned int clk_src;
/* Digital trigger configuration [0]=AND [1]=OR */
unsigned int pm[2]; /* Pattern Mask */
unsigned int pt[2]; /* Pattern Transition */
unsigned int pp[2]; /* Pattern Polarity */
};
static unsigned int z8536_read(struct comedi_device *dev, unsigned int reg)
{
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&dev->spinlock, flags);
outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG);
val = inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
return val;
}
static void z8536_write(struct comedi_device *dev,
unsigned int val, unsigned int reg)
{
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG);
outb(val, dev->iobase + APCI1500_Z8536_CTRL_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
}
static void z8536_reset(struct comedi_device *dev)
{
unsigned long flags;
/*
* Even if the state of the Z8536 is not known, the following
* sequence will reset it and put it in State 0.
*/
spin_lock_irqsave(&dev->spinlock, flags);
inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
outb(1, dev->iobase + APCI1500_Z8536_CTRL_REG);
outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
/* Disable all Ports and Counter/Timers */
z8536_write(dev, 0x00, Z8536_CFG_CTRL_REG);
/*
* Port A is connected to Ditial Input channels 0-7.
* Configure the port to allow interrupt detection.
*/
z8536_write(dev, Z8536_PAB_MODE_PTS_BIT |
Z8536_PAB_MODE_SB |
Z8536_PAB_MODE_PMS_DISABLE,
Z8536_PA_MODE_REG);
z8536_write(dev, 0xff, Z8536_PB_DPP_REG);
z8536_write(dev, 0xff, Z8536_PA_DD_REG);
/*
* Port B is connected to Ditial Input channels 8-13.
* Configure the port to allow interrupt detection.
*
* NOTE: Bits 7 and 6 of Port B are connected to internal
* diagnostic signals and bit 7 is inverted.
*/
z8536_write(dev, Z8536_PAB_MODE_PTS_BIT |
Z8536_PAB_MODE_SB |
Z8536_PAB_MODE_PMS_DISABLE,
Z8536_PB_MODE_REG);
z8536_write(dev, 0x7f, Z8536_PB_DPP_REG);
z8536_write(dev, 0xff, Z8536_PB_DD_REG);
/*
* Not sure what Port C is connected to...
*/
z8536_write(dev, 0x09, Z8536_PC_DPP_REG);
z8536_write(dev, 0x0e, Z8536_PC_DD_REG);
/*
* Clear and disable all interrupt sources.
*
* Just in case, the reset of the Z8536 should have already
* done this.
*/
z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PA_CMDSTAT_REG);
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG);
z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PB_CMDSTAT_REG);
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG);
z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(0));
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(0));
z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(1));
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(1));
z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(2));
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(2));
/* Disable all interrupts */
z8536_write(dev, 0x00, Z8536_INT_CTRL_REG);
}
static void apci1500_port_enable(struct comedi_device *dev, bool enable)
{
unsigned int cfg;
cfg = z8536_read(dev, Z8536_CFG_CTRL_REG);
if (enable)
cfg |= (Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE);
else
cfg &= ~(Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE);
z8536_write(dev, cfg, Z8536_CFG_CTRL_REG);
}
static void apci1500_timer_enable(struct comedi_device *dev,
unsigned int chan, bool enable)
{
unsigned int bit;
unsigned int cfg;
if (chan == 0)
bit = Z8536_CFG_CTRL_CT1E;
else if (chan == 1)
bit = Z8536_CFG_CTRL_CT2E;
else
bit = Z8536_CFG_CTRL_PCE_CT3E;
cfg = z8536_read(dev, Z8536_CFG_CTRL_REG);
if (enable) {
cfg |= bit;
} else {
cfg &= ~bit;
z8536_write(dev, 0x00, Z8536_CT_CMDSTAT_REG(chan));
}
z8536_write(dev, cfg, Z8536_CFG_CTRL_REG);
}
static bool apci1500_ack_irq(struct comedi_device *dev,
unsigned int reg)
{
unsigned int val;
val = z8536_read(dev, reg);
if ((val & Z8536_STAT_IE_IP) == Z8536_STAT_IE_IP) {
val &= 0x0f; /* preserve any write bits */
val |= Z8536_CMD_CLR_IP_IUS;
z8536_write(dev, val, reg);
return true;
}
return false;
}
static irqreturn_t apci1500_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct apci1500_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short status = 0;
unsigned int val;
val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
if (!(val & INTCSR_INTR_ASSERTED))
return IRQ_NONE;
if (apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG))
status |= 0x01; /* port a event (inputs 0-7) */
if (apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG)) {
/* Tests if this is an external error */
val = inb(dev->iobase + APCI1500_Z8536_PORTB_REG);
val &= 0xc0;
if (val) {
if (val & 0x80) /* voltage error */
status |= 0x40;
if (val & 0x40) /* short circuit error */
status |= 0x80;
} else {
status |= 0x02; /* port b event (inputs 8-13) */
}
}
/*
* NOTE: The 'status' returned by the sample matches the
* interrupt mask information from the APCI-1500 Users Manual.
*
* Mask Meaning
* ---------- ------------------------------------------
* 0b00000001 Event 1 has occurred
* 0b00000010 Event 2 has occurred
* 0b00000100 Counter/timer 1 has run down (not implemented)
* 0b00001000 Counter/timer 2 has run down (not implemented)
* 0b00010000 Counter 3 has run down (not implemented)
* 0b00100000 Watchdog has run down (not implemented)
* 0b01000000 Voltage error
* 0b10000000 Short-circuit error
*/
comedi_buf_write_samples(s, &status, 1);
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int apci1500_di_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
/* Disables the main interrupt on the board */
z8536_write(dev, 0x00, Z8536_INT_CTRL_REG);
/* Disable Ports A & B */
apci1500_port_enable(dev, false);
/* Ack any pending interrupts */
apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG);
apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG);
/* Disable pattern interrupts */
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG);
z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG);
/* Enable Ports A & B */
apci1500_port_enable(dev, true);
return 0;
}
static int apci1500_di_inttrig_start(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
struct apci1500_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int pa_mode = Z8536_PAB_MODE_PMS_DISABLE;
unsigned int pb_mode = Z8536_PAB_MODE_PMS_DISABLE;
unsigned int pa_trig = trig_num & 0x01;
unsigned int pb_trig = (trig_num >> 1) & 0x01;
bool valid_trig = false;
unsigned int val;
if (trig_num != cmd->start_arg)
return -EINVAL;
/* Disable Ports A & B */
apci1500_port_enable(dev, false);
/* Set Port A for selected trigger pattern */
z8536_write(dev, devpriv->pm[pa_trig] & 0xff, Z8536_PA_PM_REG);
z8536_write(dev, devpriv->pt[pa_trig] & 0xff, Z8536_PA_PT_REG);
z8536_write(dev, devpriv->pp[pa_trig] & 0xff, Z8536_PA_PP_REG);
/* Set Port B for selected trigger pattern */
z8536_write(dev, (devpriv->pm[pb_trig] >> 8) & 0xff, Z8536_PB_PM_REG);
z8536_write(dev, (devpriv->pt[pb_trig] >> 8) & 0xff, Z8536_PB_PT_REG);
z8536_write(dev, (devpriv->pp[pb_trig] >> 8) & 0xff, Z8536_PB_PP_REG);
/* Set Port A trigger mode (if enabled) and enable interrupt */
if (devpriv->pm[pa_trig] & 0xff) {
pa_mode = pa_trig ? Z8536_PAB_MODE_PMS_AND
: Z8536_PAB_MODE_PMS_OR;
val = z8536_read(dev, Z8536_PA_MODE_REG);
val &= ~Z8536_PAB_MODE_PMS_MASK;
val |= (pa_mode | Z8536_PAB_MODE_IMO);
z8536_write(dev, val, Z8536_PA_MODE_REG);
z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PA_CMDSTAT_REG);
valid_trig = true;
dev_dbg(dev->class_dev,
"Port A configured for %s mode pattern detection\n",
pa_trig ? "AND" : "OR");
}
/* Set Port B trigger mode (if enabled) and enable interrupt */
if (devpriv->pm[pb_trig] & 0xff00) {
pb_mode = pb_trig ? Z8536_PAB_MODE_PMS_AND
: Z8536_PAB_MODE_PMS_OR;
val = z8536_read(dev, Z8536_PB_MODE_REG);
val &= ~Z8536_PAB_MODE_PMS_MASK;
val |= (pb_mode | Z8536_PAB_MODE_IMO);
z8536_write(dev, val, Z8536_PB_MODE_REG);
z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PB_CMDSTAT_REG);
valid_trig = true;
dev_dbg(dev->class_dev,
"Port B configured for %s mode pattern detection\n",
pb_trig ? "AND" : "OR");
}
/* Enable Ports A & B */
apci1500_port_enable(dev, true);
if (!valid_trig) {
dev_dbg(dev->class_dev,
"digital trigger %d is not configured\n", trig_num);
return -EINVAL;
}
/* Authorizes the main interrupt on the board */
z8536_write(dev, Z8536_INT_CTRL_MIE | Z8536_INT_CTRL_DLC,
Z8536_INT_CTRL_REG);
return 0;
}
static int apci1500_di_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
s->async->inttrig = apci1500_di_inttrig_start;
return 0;
}
static int apci1500_di_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
/* Step 3: check if arguments are trivially valid */
/*
* Internal start source triggers:
*
* 0 AND mode for Port A (digital inputs 0-7)
* AND mode for Port B (digital inputs 8-13 and internal signals)
*
* 1 OR mode for Port A (digital inputs 0-7)
* AND mode for Port B (digital inputs 8-13 and internal signals)
*
* 2 AND mode for Port A (digital inputs 0-7)
* OR mode for Port B (digital inputs 8-13 and internal signals)
*
* 3 OR mode for Port A (digital inputs 0-7)
* OR mode for Port B (digital inputs 8-13 and internal signals)
*/
err |= comedi_check_trigger_arg_max(&cmd->start_arg, 3);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* Step 4: fix up any arguments */
/* Step 5: check channel list if it exists */
return 0;
}
/*
* The pattern-recognition logic must be configured before the digital
* input async command is started.
*
* Digital input channels 0 to 13 can generate interrupts. Channels 14
* and 15 are connected to internal board status/diagnostic signals.
*
* Channel 14 - Voltage error (the external supply is < 5V)
* Channel 15 - Short-circuit/overtemperature error
*
* data[0] : INSN_CONFIG_DIGITAL_TRIG
* data[1] : trigger number
* 0 = AND mode
* 1 = OR mode
* data[2] : configuration operation:
* COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
* COMEDI_DIGITAL_TRIG_ENABLE_EDGES = edge interrupts
* COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = level interrupts
* data[3] : left-shift for data[4] and data[5]
* data[4] : rising-edge/high level channels
* data[5] : falling-edge/low level channels
*/
static int apci1500_di_cfg_trig(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci1500_private *devpriv = dev->private;
unsigned int trig = data[1];
unsigned int shift = data[3];
unsigned int hi_mask;
unsigned int lo_mask;
unsigned int chan_mask;
unsigned int old_mask;
unsigned int pm;
unsigned int pt;
unsigned int pp;
unsigned int invalid_chan;
if (trig > 1) {
dev_dbg(dev->class_dev,
"invalid digital trigger number (0=AND, 1=OR)\n");
return -EINVAL;
}
if (shift <= 16) {
hi_mask = data[4] << shift;
lo_mask = data[5] << shift;
old_mask = (1U << shift) - 1;
invalid_chan = (data[4] | data[5]) >> (16 - shift);
} else {
hi_mask = 0;
lo_mask = 0;
old_mask = 0xffff;
invalid_chan = data[4] | data[5];
}
chan_mask = hi_mask | lo_mask;
if (invalid_chan) {
dev_dbg(dev->class_dev, "invalid digital trigger channel\n");
return -EINVAL;
}
pm = devpriv->pm[trig] & old_mask;
pt = devpriv->pt[trig] & old_mask;
pp = devpriv->pp[trig] & old_mask;
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
/* clear trigger configuration */
pm = 0;
pt = 0;
pp = 0;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
pm |= chan_mask; /* enable channels */
pt |= chan_mask; /* enable edge detection */
pp |= hi_mask; /* rising-edge channels */
pp &= ~lo_mask; /* falling-edge channels */
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
pm |= chan_mask; /* enable channels */
pt &= ~chan_mask; /* enable level detection */
pp |= hi_mask; /* high level channels */
pp &= ~lo_mask; /* low level channels */
break;
default:
return -EINVAL;
}
/*
* The AND mode trigger can only have one channel (max) enabled
* for edge detection.
*/
if (trig == 0) {
int ret = 0;
unsigned int src;
src = pt & 0xff;
if (src)
ret |= comedi_check_trigger_is_unique(src);
src = (pt >> 8) & 0xff;
if (src)
ret |= comedi_check_trigger_is_unique(src);
if (ret) {
dev_dbg(dev->class_dev,
"invalid AND trigger configuration\n");
return ret;
}
}
/* save the trigger configuration */
devpriv->pm[trig] = pm;
devpriv->pt[trig] = pt;
devpriv->pp[trig] = pp;
return insn->n;
}
static int apci1500_di_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
return apci1500_di_cfg_trig(dev, s, insn, data);
default:
return -EINVAL;
}
}
static int apci1500_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci1500_private *devpriv = dev->private;
data[1] = inw(devpriv->addon + APCI1500_DI_REG);
return insn->n;
}
static int apci1500_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci1500_private *devpriv = dev->private;
if (comedi_dio_update_state(s, data))
outw(s->state, devpriv->addon + APCI1500_DO_REG);
data[1] = s->state;
return insn->n;
}
static int apci1500_timer_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci1500_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val;
switch (data[0]) {
case INSN_CONFIG_ARM:
val = data[1] & s->maxdata;
z8536_write(dev, val & 0xff, Z8536_CT_RELOAD_LSB_REG(chan));
z8536_write(dev, (val >> 8) & 0xff,
Z8536_CT_RELOAD_MSB_REG(chan));
apci1500_timer_enable(dev, chan, true);
z8536_write(dev, Z8536_CT_CMDSTAT_GCB,
Z8536_CT_CMDSTAT_REG(chan));
break;
case INSN_CONFIG_DISARM:
apci1500_timer_enable(dev, chan, false);
break;
case INSN_CONFIG_GET_COUNTER_STATUS:
data[1] = 0;
val = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
if (val & Z8536_CT_STAT_CIP)
data[1] |= COMEDI_COUNTER_COUNTING;
if (val & Z8536_CT_CMDSTAT_GCB)
data[1] |= COMEDI_COUNTER_ARMED;
if (val & Z8536_STAT_IP) {
data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
apci1500_ack_irq(dev, Z8536_CT_CMDSTAT_REG(chan));
}
data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
COMEDI_COUNTER_TERMINAL_COUNT;
break;
case INSN_CONFIG_SET_COUNTER_MODE:
/* Simulate the 8254 timer modes */
switch (data[1]) {
case I8254_MODE0:
/* Interrupt on Terminal Count */
val = Z8536_CT_MODE_ECE |
Z8536_CT_MODE_DCS_ONESHOT;
break;
case I8254_MODE1:
/* Hardware Retriggerable One-Shot */
val = Z8536_CT_MODE_ETE |
Z8536_CT_MODE_DCS_ONESHOT;
break;
case I8254_MODE2:
/* Rate Generator */
val = Z8536_CT_MODE_CSC |
Z8536_CT_MODE_DCS_PULSE;
break;
case I8254_MODE3:
/* Square Wave Mode */
val = Z8536_CT_MODE_CSC |
Z8536_CT_MODE_DCS_SQRWAVE;
break;
case I8254_MODE4:
/* Software Triggered Strobe */
val = Z8536_CT_MODE_REB |
Z8536_CT_MODE_DCS_PULSE;
break;
case I8254_MODE5:
/* Hardware Triggered Strobe (watchdog) */
val = Z8536_CT_MODE_EOE |
Z8536_CT_MODE_ETE |
Z8536_CT_MODE_REB |
Z8536_CT_MODE_DCS_PULSE;
break;
default:
return -EINVAL;
}
apci1500_timer_enable(dev, chan, false);
z8536_write(dev, val, Z8536_CT_MODE_REG(chan));
break;
case INSN_CONFIG_SET_CLOCK_SRC:
if (data[1] > 2)
return -EINVAL;
devpriv->clk_src = data[1];
if (devpriv->clk_src == 2)
devpriv->clk_src = 3;
outw(devpriv->clk_src, devpriv->addon + APCI1500_CLK_SEL_REG);
break;
case INSN_CONFIG_GET_CLOCK_SRC:
switch (devpriv->clk_src) {
case 0:
data[1] = 0; /* 111.86 kHz / 2 */
data[2] = 17879; /* 17879 ns (approx) */
break;
case 1:
data[1] = 1; /* 3.49 kHz / 2 */
data[2] = 573066; /* 573066 ns (approx) */
break;
case 3:
data[1] = 2; /* 1.747 kHz / 2 */
data[2] = 1164822; /* 1164822 ns (approx) */
break;
default:
return -EINVAL;
}
break;
case INSN_CONFIG_SET_GATE_SRC:
if (chan == 0)
return -EINVAL;
val = z8536_read(dev, Z8536_CT_MODE_REG(chan));
val &= Z8536_CT_MODE_EGE;
if (data[1] == 1)
val |= Z8536_CT_MODE_EGE;
else if (data[1] > 1)
return -EINVAL;
z8536_write(dev, val, Z8536_CT_MODE_REG(chan));
break;
case INSN_CONFIG_GET_GATE_SRC:
if (chan == 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return insn->n;
}
static int apci1500_timer_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int cmd;
cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */
cmd |= Z8536_CT_CMD_TCB; /* set trigger */
/* software trigger a timer, it only makes sense to do one write */
if (insn->n)
z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan));
return insn->n;
}
static int apci1500_timer_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int cmd;
unsigned int val;
int i;
cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */
cmd |= Z8536_CT_CMD_RCC; /* set RCC */
for (i = 0; i < insn->n; i++) {
z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan));
val = z8536_read(dev, Z8536_CT_VAL_MSB_REG(chan)) << 8;
val |= z8536_read(dev, Z8536_CT_VAL_LSB_REG(chan));
data[i] = val;
}
return insn->n;
}
static int apci1500_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct apci1500_private *devpriv;
struct comedi_subdevice *s;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 1);
devpriv->amcc = pci_resource_start(pcidev, 0);
devpriv->addon = pci_resource_start(pcidev, 2);
z8536_reset(dev);
if (pcidev->irq > 0) {
ret = request_irq(pcidev->irq, apci1500_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0)
dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
/* Digital Input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci1500_di_insn_bits;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 1;
s->insn_config = apci1500_di_insn_config;
s->do_cmdtest = apci1500_di_cmdtest;
s->do_cmd = apci1500_di_cmd;
s->cancel = apci1500_di_cancel;
}
/* Digital Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci1500_do_insn_bits;
/* reset all the digital outputs */
outw(0x0, devpriv->addon + APCI1500_DO_REG);
/* Counter/Timer(Watchdog) subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_TIMER;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 3;
s->maxdata = 0xffff;
s->range_table = &range_unknown;
s->insn_config = apci1500_timer_insn_config;
s->insn_write = apci1500_timer_insn_write;
s->insn_read = apci1500_timer_insn_read;
/* Enable the PCI interrupt */
if (dev->irq) {
outl(0x2000 | INTCSR_INBOX_FULL_INT,
devpriv->amcc + AMCC_OP_REG_INTCSR);
inl(devpriv->amcc + AMCC_OP_REG_IMB1);
inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
outl(INTCSR_INBOX_INTR_STATUS | 0x2000 | INTCSR_INBOX_FULL_INT,
devpriv->amcc + AMCC_OP_REG_INTCSR);
}
return 0;
}
static void apci1500_detach(struct comedi_device *dev)
{
struct apci1500_private *devpriv = dev->private;
if (devpriv->amcc)
outl(0x0, devpriv->amcc + AMCC_OP_REG_INTCSR);
comedi_pci_detach(dev);
}
static struct comedi_driver apci1500_driver = {
.driver_name = "addi_apci_1500",
.module = THIS_MODULE,
.auto_attach = apci1500_auto_attach,
.detach = apci1500_detach,
};
static int apci1500_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &apci1500_driver, id->driver_data);
}
static const struct pci_device_id apci1500_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80fc) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci1500_pci_table);
static struct pci_driver apci1500_pci_driver = {
.name = "addi_apci_1500",
.id_table = apci1500_pci_table,
.probe = apci1500_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1500_driver, apci1500_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("ADDI-DATA APCI-1500, 16 channel DI / 16 channel DO boards");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/addi_apci_1500.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_labpc_isadma.c
* ISA DMA support for National Instruments Lab-PC series boards and
* compatibles.
*
* Extracted from ni_labpc.c:
* Copyright (C) 2001-2003 Frank Mori Hess <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_isadma.h>
#include "ni_labpc.h"
#include "ni_labpc_regs.h"
#include "ni_labpc_isadma.h"
/* size in bytes of dma buffer */
#define LABPC_ISADMA_BUFFER_SIZE 0xff00
/* utility function that suggests a dma transfer size in bytes */
static unsigned int labpc_suggest_transfer_size(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int maxbytes)
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int sample_size = comedi_bytes_per_sample(s);
unsigned int size;
unsigned int freq;
if (cmd->convert_src == TRIG_TIMER)
freq = 1000000000 / cmd->convert_arg;
else
/* return some default value */
freq = 0xffffffff;
/* make buffer fill in no more than 1/3 second */
size = (freq / 3) * sample_size;
/* set a minimum and maximum size allowed */
if (size > maxbytes)
size = maxbytes;
else if (size < sample_size)
size = sample_size;
return size;
}
void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct labpc_private *devpriv = dev->private;
struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int sample_size = comedi_bytes_per_sample(s);
/* set appropriate size of transfer */
desc->size = labpc_suggest_transfer_size(dev, s, desc->maxsize);
if (cmd->stop_src == TRIG_COUNT &&
devpriv->count * sample_size < desc->size)
desc->size = devpriv->count * sample_size;
comedi_isadma_program(desc);
/* set CMD3 bits for caller to enable DMA and interrupt */
devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
}
EXPORT_SYMBOL_GPL(labpc_setup_dma);
void labpc_drain_dma(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int max_samples = comedi_bytes_to_samples(s, desc->size);
unsigned int residue;
unsigned int nsamples;
unsigned int leftover;
/*
* residue is the number of bytes left to be done on the dma
* transfer. It should always be zero at this point unless
* the stop_src is set to external triggering.
*/
residue = comedi_isadma_disable(desc->chan);
/*
* Figure out how many samples to read for this transfer and
* how many will be stored for next time.
*/
nsamples = max_samples - comedi_bytes_to_samples(s, residue);
if (cmd->stop_src == TRIG_COUNT) {
if (devpriv->count <= nsamples) {
nsamples = devpriv->count;
leftover = 0;
} else {
leftover = devpriv->count - nsamples;
if (leftover > max_samples)
leftover = max_samples;
}
devpriv->count -= nsamples;
} else {
leftover = max_samples;
}
desc->size = comedi_samples_to_bytes(s, leftover);
comedi_buf_write_samples(s, desc->virt_addr, nsamples);
}
EXPORT_SYMBOL_GPL(labpc_drain_dma);
static void handle_isa_dma(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
labpc_drain_dma(dev);
if (desc->size)
comedi_isadma_program(desc);
/* clear dma tc interrupt */
devpriv->write_byte(dev, 0x1, DMATC_CLEAR_REG);
}
void labpc_handle_dma_status(struct comedi_device *dev)
{
const struct labpc_boardinfo *board = dev->board_ptr;
struct labpc_private *devpriv = dev->private;
/*
* if a dma terminal count of external stop trigger
* has occurred
*/
if (devpriv->stat1 & STAT1_GATA0 ||
(board->is_labpc1200 && devpriv->stat2 & STAT2_OUTA1))
handle_isa_dma(dev);
}
EXPORT_SYMBOL_GPL(labpc_handle_dma_status);
void labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
{
struct labpc_private *devpriv = dev->private;
/* only DMA channels 3 and 1 are valid */
if (dma_chan != 1 && dma_chan != 3)
return;
/* DMA uses 1 buffer */
devpriv->dma = comedi_isadma_alloc(dev, 1, dma_chan, dma_chan,
LABPC_ISADMA_BUFFER_SIZE,
COMEDI_ISADMA_READ);
}
EXPORT_SYMBOL_GPL(labpc_init_dma_chan);
void labpc_free_dma_chan(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
if (devpriv)
comedi_isadma_free(devpriv->dma);
}
EXPORT_SYMBOL_GPL(labpc_free_dma_chan);
static int __init ni_labpc_isadma_init_module(void)
{
return 0;
}
module_init(ni_labpc_isadma_init_module);
static void __exit ni_labpc_isadma_cleanup_module(void)
{
}
module_exit(ni_labpc_isadma_cleanup_module);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi NI Lab-PC ISA DMA support");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ni_labpc_isadma.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/daqboard2000.c
* hardware driver for IOtech DAQboard/2000
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <[email protected]>
*/
/*
* Driver: daqboard2000
* Description: IOTech DAQBoard/2000
* Author: Anders Blomdell <[email protected]>
* Status: works
* Updated: Mon, 14 Apr 2008 15:28:52 +0100
* Devices: [IOTech] DAQBoard/2000 (daqboard2000)
*
* Much of the functionality of this driver was determined from reading
* the source code for the Windows driver.
*
* The FPGA on the board requires firmware, which is available from
* https://www.comedi.org in the comedi_nonfree_firmware tarball.
*
* Configuration options: not applicable, uses PCI auto config
*/
/*
* This card was obviously never intended to leave the Windows world,
* since it lacked all kind of hardware documentation (except for cable
* pinouts, plug and pray has something to catch up with yet).
*
* With some help from our swedish distributor, we got the Windows sourcecode
* for the card, and here are the findings so far.
*
* 1. A good document that describes the PCI interface chip is 9080db-106.pdf
* available from http://www.plxtech.com/products/io/pci9080
*
* 2. The initialization done so far is:
* a. program the FPGA (windows code sans a lot of error messages)
* b.
*
* 3. Analog out seems to work OK with DAC's disabled, if DAC's are enabled,
* you have to output values to all enabled DAC's until result appears, I
* guess that it has something to do with pacer clocks, but the source
* gives me no clues. I'll keep it simple so far.
*
* 4. Analog in.
* Each channel in the scanlist seems to be controlled by four
* control words:
*
* Word0:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ! | | | ! | | | ! | | | ! | | | !
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Word1:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ! | | | ! | | | ! | | | ! | | | !
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | |
* +------+------+ | | | | +-- Digital input (??)
* | | | | +---- 10 us settling time
* | | | +------ Suspend acquisition (last to scan)
* | | +-------- Simultaneous sample and hold
* | +---------- Signed data format
* +------------------------- Correction offset low
*
* Word2:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ! | | | ! | | | ! | | | ! | | | !
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | |
* +-----+ +--+--+ +++ +++ +--+--+
* | | | | +----- Expansion channel
* | | | +----------- Expansion gain
* | | +--------------- Channel (low)
* | +--------------------- Correction offset high
* +----------------------------- Correction gain low
* Word3:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ! | | | ! | | | ! | | | ! | | | !
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | |
* +------+------+ | | +-+-+ | | +-- Low bank enable
* | | | | | +---- High bank enable
* | | | | +------ Hi/low select
* | | | +---------- Gain (1,?,2,4,8,16,32,64)
* | | +-------------- differential/single ended
* | +---------------- Unipolar
* +------------------------- Correction gain high
*
* 999. The card seems to have an incredible amount of capabilities, but
* trying to reverse engineer them from the Windows source is beyond my
* patience.
*
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include <linux/comedi/comedi_8255.h>
#include "plx9080.h"
#define DB2K_FIRMWARE "daqboard2000_firmware.bin"
static const struct comedi_lrange db2k_ai_range = {
13, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125),
BIP_RANGE(0.156),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
UNI_RANGE(0.625),
UNI_RANGE(0.3125)
}
};
/*
* Register Memory Map
*/
#define DB2K_REG_ACQ_CONTROL 0x00 /* u16 (w) */
#define DB2K_REG_ACQ_STATUS 0x00 /* u16 (r) */
#define DB2K_REG_ACQ_SCAN_LIST_FIFO 0x02 /* u16 */
#define DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW 0x04 /* u32 */
#define DB2K_REG_ACQ_SCAN_COUNTER 0x08 /* u16 */
#define DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH 0x0a /* u16 */
#define DB2K_REG_ACQ_TRIGGER_COUNT 0x0c /* u16 */
#define DB2K_REG_ACQ_RESULTS_FIFO 0x10 /* u16 */
#define DB2K_REG_ACQ_RESULTS_SHADOW 0x14 /* u16 */
#define DB2K_REG_ACQ_ADC_RESULT 0x18 /* u16 */
#define DB2K_REG_DAC_SCAN_COUNTER 0x1c /* u16 */
#define DB2K_REG_DAC_CONTROL 0x20 /* u16 (w) */
#define DB2K_REG_DAC_STATUS 0x20 /* u16 (r) */
#define DB2K_REG_DAC_FIFO 0x24 /* s16 */
#define DB2K_REG_DAC_PACER_CLOCK_DIV 0x2a /* u16 */
#define DB2K_REG_REF_DACS 0x2c /* u16 */
#define DB2K_REG_DIO_CONTROL 0x30 /* u16 */
#define DB2K_REG_P3_HSIO_DATA 0x32 /* s16 */
#define DB2K_REG_P3_CONTROL 0x34 /* u16 */
#define DB2K_REG_CAL_EEPROM_CONTROL 0x36 /* u16 */
#define DB2K_REG_DAC_SETTING(x) (0x38 + (x) * 2) /* s16 */
#define DB2K_REG_DIO_P2_EXP_IO_8_BIT 0x40 /* s16 */
#define DB2K_REG_COUNTER_TIMER_CONTROL 0x80 /* u16 */
#define DB2K_REG_COUNTER_INPUT(x) (0x88 + (x) * 2) /* s16 */
#define DB2K_REG_TIMER_DIV(x) (0xa0 + (x) * 2) /* u16 */
#define DB2K_REG_DMA_CONTROL 0xb0 /* u16 */
#define DB2K_REG_TRIG_CONTROL 0xb2 /* u16 */
#define DB2K_REG_CAL_EEPROM 0xb8 /* u16 */
#define DB2K_REG_ACQ_DIGITAL_MARK 0xba /* u16 */
#define DB2K_REG_TRIG_DACS 0xbc /* u16 */
#define DB2K_REG_DIO_P2_EXP_IO_16_BIT(x) (0xc0 + (x) * 2) /* s16 */
/* CPLD registers */
#define DB2K_REG_CPLD_STATUS 0x1000 /* u16 (r) */
#define DB2K_REG_CPLD_WDATA 0x1000 /* u16 (w) */
/* Scan Sequencer programming */
#define DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST 0x0011
#define DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST 0x0010
/* Prepare for acquisition */
#define DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO 0x0004
#define DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO 0x0002
#define DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE 0x0001
/* Pacer Clock Control */
#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL 0x0030
#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL 0x0032
#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE 0x0031
#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE_DAC_PACER 0x0034
#define DB2K_ACQ_CONTROL_ADC_PACER_DISABLE 0x0030
#define DB2K_ACQ_CONTROL_ADC_PACER_NORMAL_MODE 0x0060
#define DB2K_ACQ_CONTROL_ADC_PACER_COMPATIBILITY_MODE 0x0061
#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL_OUT_ENABLE 0x0008
#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL_RISING 0x0100
/* Acquisition status bits */
#define DB2K_ACQ_STATUS_RESULTS_FIFO_MORE_1_SAMPLE 0x0001
#define DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA 0x0002
#define DB2K_ACQ_STATUS_RESULTS_FIFO_OVERRUN 0x0004
#define DB2K_ACQ_STATUS_LOGIC_SCANNING 0x0008
#define DB2K_ACQ_STATUS_CONFIG_PIPE_FULL 0x0010
#define DB2K_ACQ_STATUS_SCAN_LIST_FIFO_EMPTY 0x0020
#define DB2K_ACQ_STATUS_ADC_NOT_READY 0x0040
#define DB2K_ACQ_STATUS_ARBITRATION_FAILURE 0x0080
#define DB2K_ACQ_STATUS_ADC_PACER_OVERRUN 0x0100
#define DB2K_ACQ_STATUS_DAC_PACER_OVERRUN 0x0200
/* DAC status */
#define DB2K_DAC_STATUS_DAC_FULL 0x0001
#define DB2K_DAC_STATUS_REF_BUSY 0x0002
#define DB2K_DAC_STATUS_TRIG_BUSY 0x0004
#define DB2K_DAC_STATUS_CAL_BUSY 0x0008
#define DB2K_DAC_STATUS_DAC_BUSY(x) (0x0010 << (x))
/* DAC control */
#define DB2K_DAC_CONTROL_ENABLE_BIT 0x0001
#define DB2K_DAC_CONTROL_DATA_IS_SIGNED 0x0002
#define DB2K_DAC_CONTROL_RESET_FIFO 0x0004
#define DB2K_DAC_CONTROL_DAC_DISABLE(x) (0x0020 + ((x) << 4))
#define DB2K_DAC_CONTROL_DAC_ENABLE(x) (0x0021 + ((x) << 4))
#define DB2K_DAC_CONTROL_PATTERN_DISABLE 0x0060
#define DB2K_DAC_CONTROL_PATTERN_ENABLE 0x0061
/* Trigger Control */
#define DB2K_TRIG_CONTROL_TYPE_ANALOG 0x0000
#define DB2K_TRIG_CONTROL_TYPE_TTL 0x0010
#define DB2K_TRIG_CONTROL_EDGE_HI_LO 0x0004
#define DB2K_TRIG_CONTROL_EDGE_LO_HI 0x0000
#define DB2K_TRIG_CONTROL_LEVEL_ABOVE 0x0000
#define DB2K_TRIG_CONTROL_LEVEL_BELOW 0x0004
#define DB2K_TRIG_CONTROL_SENSE_LEVEL 0x0002
#define DB2K_TRIG_CONTROL_SENSE_EDGE 0x0000
#define DB2K_TRIG_CONTROL_ENABLE 0x0001
#define DB2K_TRIG_CONTROL_DISABLE 0x0000
/* Reference Dac Selection */
#define DB2K_REF_DACS_SET 0x0080
#define DB2K_REF_DACS_SELECT_POS_REF 0x0100
#define DB2K_REF_DACS_SELECT_NEG_REF 0x0000
/* CPLD status bits */
#define DB2K_CPLD_STATUS_INIT 0x0002
#define DB2K_CPLD_STATUS_TXREADY 0x0004
#define DB2K_CPLD_VERSION_MASK 0xf000
/* "New CPLD" signature. */
#define DB2K_CPLD_VERSION_NEW 0x5000
enum db2k_boardid {
BOARD_DAQBOARD2000,
BOARD_DAQBOARD2001
};
struct db2k_boardtype {
const char *name;
unsigned int has_2_ao:1;/* false: 4 AO chans; true: 2 AO chans */
};
static const struct db2k_boardtype db2k_boardtypes[] = {
[BOARD_DAQBOARD2000] = {
.name = "daqboard2000",
.has_2_ao = true,
},
[BOARD_DAQBOARD2001] = {
.name = "daqboard2001",
},
};
struct db2k_private {
void __iomem *plx;
};
static void db2k_write_acq_scan_list_entry(struct comedi_device *dev, u16 entry)
{
writew(entry & 0x00ff, dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
writew((entry >> 8) & 0x00ff,
dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
}
static void db2k_setup_sampling(struct comedi_device *dev, int chan, int gain)
{
u16 word0, word1, word2, word3;
/* Channel 0-7 diff, channel 8-23 single ended */
word0 = 0;
word1 = 0x0004; /* Last scan */
word2 = (chan << 6) & 0x00c0;
switch (chan / 4) {
case 0:
word3 = 0x0001;
break;
case 1:
word3 = 0x0002;
break;
case 2:
word3 = 0x0005;
break;
case 3:
word3 = 0x0006;
break;
case 4:
word3 = 0x0041;
break;
case 5:
word3 = 0x0042;
break;
default:
word3 = 0;
break;
}
/* These should be read from EEPROM */
word2 |= 0x0800; /* offset */
word3 |= 0xc000; /* gain */
db2k_write_acq_scan_list_entry(dev, word0);
db2k_write_acq_scan_list_entry(dev, word1);
db2k_write_acq_scan_list_entry(dev, word2);
db2k_write_acq_scan_list_entry(dev, word3);
}
static int db2k_ai_status(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned long context)
{
unsigned int status;
status = readw(dev->mmio + DB2K_REG_ACQ_STATUS);
if (status & context)
return 0;
return -EBUSY;
}
static int db2k_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int gain, chan;
int ret;
int i;
writew(DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO |
DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO |
DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE,
dev->mmio + DB2K_REG_ACQ_CONTROL);
/*
* If pacer clock is not set to some high value (> 10 us), we
* risk multiple samples to be put into the result FIFO.
*/
/* 1 second, should be long enough */
writel(1000000, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW);
writew(0, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH);
gain = CR_RANGE(insn->chanspec);
chan = CR_CHAN(insn->chanspec);
/*
* This doesn't look efficient. I decided to take the conservative
* approach when I did the insn conversion. Perhaps it would be
* better to have broken it completely, then someone would have been
* forced to fix it. --ds
*/
for (i = 0; i < insn->n; i++) {
db2k_setup_sampling(dev, chan, gain);
/* Enable reading from the scanlist FIFO */
writew(DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST,
dev->mmio + DB2K_REG_ACQ_CONTROL);
ret = comedi_timeout(dev, s, insn, db2k_ai_status,
DB2K_ACQ_STATUS_CONFIG_PIPE_FULL);
if (ret)
return ret;
writew(DB2K_ACQ_CONTROL_ADC_PACER_ENABLE,
dev->mmio + DB2K_REG_ACQ_CONTROL);
ret = comedi_timeout(dev, s, insn, db2k_ai_status,
DB2K_ACQ_STATUS_LOGIC_SCANNING);
if (ret)
return ret;
ret =
comedi_timeout(dev, s, insn, db2k_ai_status,
DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA);
if (ret)
return ret;
data[i] = readw(dev->mmio + DB2K_REG_ACQ_RESULTS_FIFO);
writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
dev->mmio + DB2K_REG_ACQ_CONTROL);
writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
dev->mmio + DB2K_REG_ACQ_CONTROL);
}
return i;
}
static int db2k_ao_eoc(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned long context)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int status;
status = readw(dev->mmio + DB2K_REG_DAC_STATUS);
if ((status & DB2K_DAC_STATUS_DAC_BUSY(chan)) == 0)
return 0;
return -EBUSY;
}
static int db2k_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
int ret;
writew(val, dev->mmio + DB2K_REG_DAC_SETTING(chan));
ret = comedi_timeout(dev, s, insn, db2k_ao_eoc, 0);
if (ret)
return ret;
s->readback[chan] = val;
}
return insn->n;
}
static void db2k_reset_local_bus(struct comedi_device *dev)
{
struct db2k_private *devpriv = dev->private;
u32 cntrl;
cntrl = readl(devpriv->plx + PLX_REG_CNTRL);
cntrl |= PLX_CNTRL_RESET;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10);
cntrl &= ~PLX_CNTRL_RESET;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10);
}
static void db2k_reload_plx(struct comedi_device *dev)
{
struct db2k_private *devpriv = dev->private;
u32 cntrl;
cntrl = readl(devpriv->plx + PLX_REG_CNTRL);
cntrl &= ~PLX_CNTRL_EERELOAD;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10);
cntrl |= PLX_CNTRL_EERELOAD;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10);
cntrl &= ~PLX_CNTRL_EERELOAD;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10);
}
static void db2k_pulse_prog_pin(struct comedi_device *dev)
{
struct db2k_private *devpriv = dev->private;
u32 cntrl;
cntrl = readl(devpriv->plx + PLX_REG_CNTRL);
cntrl |= PLX_CNTRL_USERO;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10);
cntrl &= ~PLX_CNTRL_USERO;
writel(cntrl, devpriv->plx + PLX_REG_CNTRL);
mdelay(10); /* Not in the original code, but I like symmetry... */
}
static int db2k_wait_cpld_init(struct comedi_device *dev)
{
int result = -ETIMEDOUT;
int i;
u16 cpld;
/* timeout after 50 tries -> 5ms */
for (i = 0; i < 50; i++) {
cpld = readw(dev->mmio + DB2K_REG_CPLD_STATUS);
if (cpld & DB2K_CPLD_STATUS_INIT) {
result = 0;
break;
}
usleep_range(100, 1000);
}
udelay(5);
return result;
}
static int db2k_wait_cpld_txready(struct comedi_device *dev)
{
int i;
for (i = 0; i < 100; i++) {
if (readw(dev->mmio + DB2K_REG_CPLD_STATUS) &
DB2K_CPLD_STATUS_TXREADY) {
return 0;
}
udelay(1);
}
return -ETIMEDOUT;
}
static int db2k_write_cpld(struct comedi_device *dev, u16 data, bool new_cpld)
{
int result = 0;
if (new_cpld) {
result = db2k_wait_cpld_txready(dev);
if (result)
return result;
} else {
usleep_range(10, 20);
}
writew(data, dev->mmio + DB2K_REG_CPLD_WDATA);
if (!(readw(dev->mmio + DB2K_REG_CPLD_STATUS) & DB2K_CPLD_STATUS_INIT))
result = -EIO;
return result;
}
static int db2k_wait_fpga_programmed(struct comedi_device *dev)
{
struct db2k_private *devpriv = dev->private;
int i;
/* Time out after 200 tries -> 20ms */
for (i = 0; i < 200; i++) {
u32 cntrl = readl(devpriv->plx + PLX_REG_CNTRL);
/* General Purpose Input (USERI) set on FPGA "DONE". */
if (cntrl & PLX_CNTRL_USERI)
return 0;
usleep_range(100, 1000);
}
return -ETIMEDOUT;
}
static int db2k_load_firmware(struct comedi_device *dev, const u8 *cpld_array,
size_t len, unsigned long context)
{
struct db2k_private *devpriv = dev->private;
int result = -EIO;
u32 cntrl;
int retry;
size_t i;
bool new_cpld;
/* Look for FPGA start sequence in firmware. */
for (i = 0; i + 1 < len; i++) {
if (cpld_array[i] == 0xff && cpld_array[i + 1] == 0x20)
break;
}
if (i + 1 >= len) {
dev_err(dev->class_dev, "bad firmware - no start sequence\n");
return -EINVAL;
}
/* Check length is even. */
if ((len - i) & 1) {
dev_err(dev->class_dev,
"bad firmware - odd length (%zu = %zu - %zu)\n",
len - i, len, i);
return -EINVAL;
}
/* Strip firmware header. */
cpld_array += i;
len -= i;
/* Check to make sure the serial eeprom is present on the board */
cntrl = readl(devpriv->plx + PLX_REG_CNTRL);
if (!(cntrl & PLX_CNTRL_EEPRESENT))
return -EIO;
for (retry = 0; retry < 3; retry++) {
db2k_reset_local_bus(dev);
db2k_reload_plx(dev);
db2k_pulse_prog_pin(dev);
result = db2k_wait_cpld_init(dev);
if (result)
continue;
new_cpld = (readw(dev->mmio + DB2K_REG_CPLD_STATUS) &
DB2K_CPLD_VERSION_MASK) == DB2K_CPLD_VERSION_NEW;
for (; i < len; i += 2) {
u16 data = (cpld_array[i] << 8) + cpld_array[i + 1];
result = db2k_write_cpld(dev, data, new_cpld);
if (result)
break;
}
if (result == 0)
result = db2k_wait_fpga_programmed(dev);
if (result == 0) {
db2k_reset_local_bus(dev);
db2k_reload_plx(dev);
break;
}
}
return result;
}
static void db2k_adc_stop_dma_transfer(struct comedi_device *dev)
{
}
static void db2k_adc_disarm(struct comedi_device *dev)
{
/* Disable hardware triggers */
udelay(2);
writew(DB2K_TRIG_CONTROL_TYPE_ANALOG | DB2K_TRIG_CONTROL_DISABLE,
dev->mmio + DB2K_REG_TRIG_CONTROL);
udelay(2);
writew(DB2K_TRIG_CONTROL_TYPE_TTL | DB2K_TRIG_CONTROL_DISABLE,
dev->mmio + DB2K_REG_TRIG_CONTROL);
/* Stop the scan list FIFO from loading the configuration pipe */
udelay(2);
writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
dev->mmio + DB2K_REG_ACQ_CONTROL);
/* Stop the pacer clock */
udelay(2);
writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
dev->mmio + DB2K_REG_ACQ_CONTROL);
/* Stop the input dma (abort channel 1) */
db2k_adc_stop_dma_transfer(dev);
}
static void db2k_activate_reference_dacs(struct comedi_device *dev)
{
unsigned int val;
int timeout;
/* Set the + reference dac value in the FPGA */
writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_POS_REF,
dev->mmio + DB2K_REG_REF_DACS);
for (timeout = 0; timeout < 20; timeout++) {
val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
break;
udelay(2);
}
/* Set the - reference dac value in the FPGA */
writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_NEG_REF,
dev->mmio + DB2K_REG_REF_DACS);
for (timeout = 0; timeout < 20; timeout++) {
val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
break;
udelay(2);
}
}
static void db2k_initialize_ctrs(struct comedi_device *dev)
{
}
static void db2k_initialize_tmrs(struct comedi_device *dev)
{
}
static void db2k_dac_disarm(struct comedi_device *dev)
{
}
static void db2k_initialize_adc(struct comedi_device *dev)
{
db2k_adc_disarm(dev);
db2k_activate_reference_dacs(dev);
db2k_initialize_ctrs(dev);
db2k_initialize_tmrs(dev);
}
static int db2k_8255_cb(struct comedi_device *dev, int dir, int port, int data,
unsigned long iobase)
{
if (dir) {
writew(data, dev->mmio + iobase + port * 2);
return 0;
}
return readw(dev->mmio + iobase + port * 2);
}
static int db2k_auto_attach(struct comedi_device *dev, unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct db2k_boardtype *board;
struct db2k_private *devpriv;
struct comedi_subdevice *s;
int result;
if (context >= ARRAY_SIZE(db2k_boardtypes))
return -ENODEV;
board = &db2k_boardtypes[context];
if (!board->name)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
result = comedi_pci_enable(dev);
if (result)
return result;
devpriv->plx = pci_ioremap_bar(pcidev, 0);
dev->mmio = pci_ioremap_bar(pcidev, 2);
if (!devpriv->plx || !dev->mmio)
return -ENOMEM;
result = comedi_alloc_subdevices(dev, 3);
if (result)
return result;
result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
DB2K_FIRMWARE, db2k_load_firmware, 0);
if (result < 0)
return result;
db2k_initialize_adc(dev);
db2k_dac_disarm(dev);
s = &dev->subdevices[0];
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 24;
s->maxdata = 0xffff;
s->insn_read = db2k_ai_insn_read;
s->range_table = &db2k_ai_range;
s = &dev->subdevices[1];
/* ao subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->has_2_ao ? 2 : 4;
s->maxdata = 0xffff;
s->insn_write = db2k_ao_insn_write;
s->range_table = &range_bipolar10;
result = comedi_alloc_subdev_readback(s);
if (result)
return result;
s = &dev->subdevices[2];
return subdev_8255_init(dev, s, db2k_8255_cb,
DB2K_REG_DIO_P2_EXP_IO_8_BIT);
}
static void db2k_detach(struct comedi_device *dev)
{
struct db2k_private *devpriv = dev->private;
if (devpriv && devpriv->plx)
iounmap(devpriv->plx);
comedi_pci_detach(dev);
}
static struct comedi_driver db2k_driver = {
.driver_name = "daqboard2000",
.module = THIS_MODULE,
.auto_attach = db2k_auto_attach,
.detach = db2k_detach,
};
static int db2k_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &db2k_driver, id->driver_data);
}
static const struct pci_device_id db2k_pci_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_IOTECH, 0x0409, PCI_VENDOR_ID_IOTECH,
0x0002), .driver_data = BOARD_DAQBOARD2000, },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_IOTECH, 0x0409, PCI_VENDOR_ID_IOTECH,
0x0004), .driver_data = BOARD_DAQBOARD2001, },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, db2k_pci_table);
static struct pci_driver db2k_pci_driver = {
.name = "daqboard2000",
.id_table = db2k_pci_table,
.probe = db2k_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(db2k_driver, db2k_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DB2K_FIRMWARE);
| linux-master | drivers/comedi/drivers/daqboard2000.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* pcmuio.c
* Comedi driver for Winsystems PC-104 based 48/96-channel DIO boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2006 Calin A. Culianu <[email protected]>
*/
/*
* Driver: pcmuio
* Description: Winsystems PC-104 based 48/96-channel DIO boards.
* Devices: [Winsystems] PCM-UIO48A (pcmuio48), PCM-UIO96A (pcmuio96)
* Author: Calin Culianu <[email protected]>
* Updated: Fri, 13 Jan 2006 12:01:01 -0500
* Status: works
*
* A driver for the relatively straightforward-to-program PCM-UIO48A and
* PCM-UIO96A boards from Winsystems. These boards use either one or two
* (in the 96-DIO version) WS16C48 ASIC HighDensity I/O Chips (HDIO). This
* chip is interesting in that each I/O line is individually programmable
* for INPUT or OUTPUT (thus comedi_dio_config can be done on a per-channel
* basis). Also, each chip supports edge-triggered interrupts for the first
* 24 I/O lines. Of course, since the 96-channel version of the board has
* two ASICs, it can detect polarity changes on up to 48 I/O lines. Since
* this is essentially an (non-PnP) ISA board, I/O Address and IRQ selection
* are done through jumpers on the board. You need to pass that information
* to this driver as the first and second comedi_config option, respectively.
* Note that the 48-channel version uses 16 bytes of IO memory and the 96-
* channel version uses 32-bytes (in case you are worried about conflicts).
* The 48-channel board is split into two 24-channel comedi subdevices. The
* 96-channel board is split into 4 24-channel DIO subdevices.
*
* Note that IRQ support has been added, but it is untested.
*
* To use edge-detection IRQ support, pass the IRQs of both ASICS (for the
* 96 channel version) or just 1 ASIC (for 48-channel version). Then, use
* comedi_commands with TRIG_NOW. Your callback will be called each time an
* edge is triggered, and the data values will be two sample_t's, which
* should be concatenated to form one 32-bit unsigned int. This value is
* the mask of channels that had edges detected from your channel list. Note
* that the bits positions in the mask correspond to positions in your
* chanlist when you specified the command and *not* channel id's!
*
* To set the polarity of the edge-detection interrupts pass a nonzero value
* for either CR_RANGE or CR_AREF for edge-up polarity, or a zero value for
* both CR_RANGE and CR_AREF if you want edge-down polarity.
*
* In the 48-channel version:
*
* On subdev 0, the first 24 channels are edge-detect channels.
*
* In the 96-channel board you have the following channels that can do edge
* detection:
*
* subdev 0, channels 0-24 (first 24 channels of 1st ASIC)
* subdev 2, channels 0-24 (first 24 channels of 2nd ASIC)
*
* Configuration Options:
* [0] - I/O port base address
* [1] - IRQ (for first ASIC, or first 24 channels)
* [2] - IRQ (for second ASIC, pcmuio96 only - IRQ for chans 48-72
* can be the same as first irq!)
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
/*
* Register I/O map
*
* Offset Page 0 Page 1 Page 2 Page 3
* ------ ----------- ----------- ----------- -----------
* 0x00 Port 0 I/O Port 0 I/O Port 0 I/O Port 0 I/O
* 0x01 Port 1 I/O Port 1 I/O Port 1 I/O Port 1 I/O
* 0x02 Port 2 I/O Port 2 I/O Port 2 I/O Port 2 I/O
* 0x03 Port 3 I/O Port 3 I/O Port 3 I/O Port 3 I/O
* 0x04 Port 4 I/O Port 4 I/O Port 4 I/O Port 4 I/O
* 0x05 Port 5 I/O Port 5 I/O Port 5 I/O Port 5 I/O
* 0x06 INT_PENDING INT_PENDING INT_PENDING INT_PENDING
* 0x07 Page/Lock Page/Lock Page/Lock Page/Lock
* 0x08 N/A POL_0 ENAB_0 INT_ID0
* 0x09 N/A POL_1 ENAB_1 INT_ID1
* 0x0a N/A POL_2 ENAB_2 INT_ID2
*/
#define PCMUIO_PORT_REG(x) (0x00 + (x))
#define PCMUIO_INT_PENDING_REG 0x06
#define PCMUIO_PAGE_LOCK_REG 0x07
#define PCMUIO_LOCK_PORT(x) ((1 << (x)) & 0x3f)
#define PCMUIO_PAGE(x) (((x) & 0x3) << 6)
#define PCMUIO_PAGE_MASK PCMUIO_PAGE(3)
#define PCMUIO_PAGE_POL 1
#define PCMUIO_PAGE_ENAB 2
#define PCMUIO_PAGE_INT_ID 3
#define PCMUIO_PAGE_REG(x) (0x08 + (x))
#define PCMUIO_ASIC_IOSIZE 0x10
#define PCMUIO_MAX_ASICS 2
struct pcmuio_board {
const char *name;
const int num_asics;
};
static const struct pcmuio_board pcmuio_boards[] = {
{
.name = "pcmuio48",
.num_asics = 1,
}, {
.name = "pcmuio96",
.num_asics = 2,
},
};
struct pcmuio_asic {
spinlock_t pagelock; /* protects the page registers */
spinlock_t spinlock; /* protects member variables */
unsigned int enabled_mask;
unsigned int active:1;
};
struct pcmuio_private {
struct pcmuio_asic asics[PCMUIO_MAX_ASICS];
unsigned int irq2;
};
static inline unsigned long pcmuio_asic_iobase(struct comedi_device *dev,
int asic)
{
return dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
}
static inline int pcmuio_subdevice_to_asic(struct comedi_subdevice *s)
{
/*
* subdevice 0 and 1 are handled by the first asic
* subdevice 2 and 3 are handled by the second asic
*/
return s->index / 2;
}
static inline int pcmuio_subdevice_to_port(struct comedi_subdevice *s)
{
/*
* subdevice 0 and 2 use port registers 0-2
* subdevice 1 and 3 use port registers 3-5
*/
return (s->index % 2) ? 3 : 0;
}
static void pcmuio_write(struct comedi_device *dev, unsigned int val,
int asic, int page, int port)
{
struct pcmuio_private *devpriv = dev->private;
struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long iobase = pcmuio_asic_iobase(dev, asic);
unsigned long flags;
spin_lock_irqsave(&chip->pagelock, flags);
if (page == 0) {
/* Port registers are valid for any page */
outb(val & 0xff, iobase + PCMUIO_PORT_REG(port + 0));
outb((val >> 8) & 0xff, iobase + PCMUIO_PORT_REG(port + 1));
outb((val >> 16) & 0xff, iobase + PCMUIO_PORT_REG(port + 2));
} else {
outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
outb(val & 0xff, iobase + PCMUIO_PAGE_REG(0));
outb((val >> 8) & 0xff, iobase + PCMUIO_PAGE_REG(1));
outb((val >> 16) & 0xff, iobase + PCMUIO_PAGE_REG(2));
}
spin_unlock_irqrestore(&chip->pagelock, flags);
}
static unsigned int pcmuio_read(struct comedi_device *dev,
int asic, int page, int port)
{
struct pcmuio_private *devpriv = dev->private;
struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long iobase = pcmuio_asic_iobase(dev, asic);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&chip->pagelock, flags);
if (page == 0) {
/* Port registers are valid for any page */
val = inb(iobase + PCMUIO_PORT_REG(port + 0));
val |= (inb(iobase + PCMUIO_PORT_REG(port + 1)) << 8);
val |= (inb(iobase + PCMUIO_PORT_REG(port + 2)) << 16);
} else {
outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
val = inb(iobase + PCMUIO_PAGE_REG(0));
val |= (inb(iobase + PCMUIO_PAGE_REG(1)) << 8);
val |= (inb(iobase + PCMUIO_PAGE_REG(2)) << 16);
}
spin_unlock_irqrestore(&chip->pagelock, flags);
return val;
}
/*
* Each channel can be individually programmed for input or output.
* Writing a '0' to a channel causes the corresponding output pin
* to go to a high-z state (pulled high by an external 10K resistor).
* This allows it to be used as an input. When used in the input mode,
* a read reflects the inverted state of the I/O pin, such that a
* high on the pin will read as a '0' in the register. Writing a '1'
* to a bit position causes the pin to sink current (up to 12mA),
* effectively pulling it low.
*/
static int pcmuio_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int asic = pcmuio_subdevice_to_asic(s);
int port = pcmuio_subdevice_to_port(s);
unsigned int chanmask = (1 << s->n_chan) - 1;
unsigned int mask;
unsigned int val;
mask = comedi_dio_update_state(s, data);
if (mask) {
/*
* Outputs are inverted, invert the state and
* update the channels.
*
* The s->io_bits mask makes sure the input channels
* are '0' so that the outputs pins stay in a high
* z-state.
*/
val = ~s->state & chanmask;
val &= s->io_bits;
pcmuio_write(dev, val, asic, 0, port);
}
/* get inverted state of the channels from the port */
val = pcmuio_read(dev, asic, 0, port);
/* return the true state of the channels */
data[1] = ~val & chanmask;
return insn->n;
}
static int pcmuio_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int asic = pcmuio_subdevice_to_asic(s);
int port = pcmuio_subdevice_to_port(s);
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
if (data[0] == INSN_CONFIG_DIO_INPUT)
pcmuio_write(dev, s->io_bits, asic, 0, port);
return insn->n;
}
static void pcmuio_reset(struct comedi_device *dev)
{
const struct pcmuio_board *board = dev->board_ptr;
int asic;
for (asic = 0; asic < board->num_asics; ++asic) {
/* first, clear all the DIO port bits */
pcmuio_write(dev, 0, asic, 0, 0);
pcmuio_write(dev, 0, asic, 0, 3);
/* Next, clear all the paged registers for each page */
pcmuio_write(dev, 0, asic, PCMUIO_PAGE_POL, 0);
pcmuio_write(dev, 0, asic, PCMUIO_PAGE_ENAB, 0);
pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
}
}
/* chip->spinlock is already locked */
static void pcmuio_stop_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcmuio_private *devpriv = dev->private;
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
chip->enabled_mask = 0;
chip->active = 0;
s->async->inttrig = NULL;
/* disable all intrs for this subdev.. */
pcmuio_write(dev, 0, asic, PCMUIO_PAGE_ENAB, 0);
}
static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int triggered)
{
struct pcmuio_private *devpriv = dev->private;
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int val = 0;
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&chip->spinlock, flags);
if (!chip->active)
goto done;
if (!(triggered & chip->enabled_mask))
goto done;
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
if (triggered & (1 << chan))
val |= (1 << i);
}
comedi_buf_write_samples(s, &val, 1);
if (cmd->stop_src == TRIG_COUNT &&
s->async->scans_done >= cmd->stop_arg)
s->async->events |= COMEDI_CB_EOA;
done:
spin_unlock_irqrestore(&chip->spinlock, flags);
comedi_handle_events(dev, s);
}
static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
{
/* there are could be two asics so we can't use dev->read_subdev */
struct comedi_subdevice *s = &dev->subdevices[asic * 2];
unsigned long iobase = pcmuio_asic_iobase(dev, asic);
unsigned int val;
/* are there any interrupts pending */
val = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
if (!val)
return 0;
/* get, and clear, the pending interrupts */
val = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
/* handle the pending interrupts */
pcmuio_handle_intr_subdev(dev, s, val);
return 1;
}
static irqreturn_t pcmuio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcmuio_private *devpriv = dev->private;
int handled = 0;
if (irq == dev->irq)
handled += pcmuio_handle_asic_interrupt(dev, 0);
if (irq == devpriv->irq2)
handled += pcmuio_handle_asic_interrupt(dev, 1);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
/* chip->spinlock is already locked */
static void pcmuio_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcmuio_private *devpriv = dev->private;
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int bits = 0;
unsigned int pol_bits = 0;
int i;
chip->enabled_mask = 0;
chip->active = 1;
if (cmd->chanlist) {
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chanspec = cmd->chanlist[i];
unsigned int chan = CR_CHAN(chanspec);
unsigned int range = CR_RANGE(chanspec);
unsigned int aref = CR_AREF(chanspec);
bits |= (1 << chan);
pol_bits |= ((aref || range) ? 1 : 0) << chan;
}
}
bits &= ((1 << s->n_chan) - 1);
chip->enabled_mask = bits;
/* set pol and enab intrs for this subdev.. */
pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
}
static int pcmuio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcmuio_private *devpriv = dev->private;
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
if (chip->active)
pcmuio_stop_intr(dev, s);
spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
static int pcmuio_inttrig_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
struct pcmuio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
if (trig_num != cmd->start_arg)
return -EINVAL;
spin_lock_irqsave(&chip->spinlock, flags);
s->async->inttrig = NULL;
if (chip->active)
pcmuio_start_intr(dev, s);
spin_unlock_irqrestore(&chip->spinlock, flags);
return 1;
}
/*
* 'do_cmd' function for an 'INTERRUPT' subdevice.
*/
static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcmuio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
chip->active = 1;
/* Set up start of acquisition. */
if (cmd->start_src == TRIG_INT)
s->async->inttrig = pcmuio_inttrig_start_intr;
else /* TRIG_NOW */
pcmuio_start_intr(dev, s);
spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
static int pcmuio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
/* if (err) return 4; */
return 0;
}
static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcmuio_board *board = dev->board_ptr;
struct comedi_subdevice *s;
struct pcmuio_private *devpriv;
int ret;
int i;
ret = comedi_request_region(dev, it->options[0],
board->num_asics * PCMUIO_ASIC_IOSIZE);
if (ret)
return ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
struct pcmuio_asic *chip = &devpriv->asics[i];
spin_lock_init(&chip->pagelock);
spin_lock_init(&chip->spinlock);
}
pcmuio_reset(dev);
if (it->options[1]) {
/* request the irq for the 1st asic */
ret = request_irq(it->options[1], pcmuio_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
if (board->num_asics == 2) {
if (it->options[2] == dev->irq) {
/* the same irq (or none) is used by both asics */
devpriv->irq2 = it->options[2];
} else if (it->options[2]) {
/* request the irq for the 2nd asic */
ret = request_irq(it->options[2], pcmuio_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
devpriv->irq2 = it->options[2];
}
}
ret = comedi_alloc_subdevices(dev, board->num_asics * 2);
if (ret)
return ret;
for (i = 0; i < dev->n_subdevices; ++i) {
s = &dev->subdevices[i];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 24;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pcmuio_dio_insn_bits;
s->insn_config = pcmuio_dio_insn_config;
/* subdevices 0 and 2 can support interrupts */
if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) {
/* setup the interrupt subdevice */
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL |
SDF_PACKED;
s->len_chanlist = s->n_chan;
s->cancel = pcmuio_cancel;
s->do_cmd = pcmuio_cmd;
s->do_cmdtest = pcmuio_cmdtest;
}
}
return 0;
}
static void pcmuio_detach(struct comedi_device *dev)
{
struct pcmuio_private *devpriv = dev->private;
if (devpriv) {
pcmuio_reset(dev);
/* free the 2nd irq if used, the core will free the 1st one */
if (devpriv->irq2 && devpriv->irq2 != dev->irq)
free_irq(devpriv->irq2, dev);
}
comedi_legacy_detach(dev);
}
static struct comedi_driver pcmuio_driver = {
.driver_name = "pcmuio",
.module = THIS_MODULE,
.attach = pcmuio_attach,
.detach = pcmuio_detach,
.board_name = &pcmuio_boards[0].name,
.offset = sizeof(struct pcmuio_board),
.num_names = ARRAY_SIZE(pcmuio_boards),
};
module_comedi_driver(pcmuio_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/pcmuio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for the watchdog subdevice found on some addi-data boards
* Copyright (c) 2013 H Hartley Sweeten <[email protected]>
*
* Based on implementations in various addi-data COMEDI drivers.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <[email protected]>
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
#include "addi_tcw.h"
#include "addi_watchdog.h"
struct addi_watchdog_private {
unsigned long iobase;
unsigned int wdog_ctrl;
};
/*
* The watchdog subdevice is configured with two INSN_CONFIG instructions:
*
* Enable the watchdog and set the reload timeout:
* data[0] = INSN_CONFIG_ARM
* data[1] = timeout reload value
*
* Disable the watchdog:
* data[0] = INSN_CONFIG_DISARM
*/
static int addi_watchdog_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct addi_watchdog_private *spriv = s->private;
unsigned int reload;
switch (data[0]) {
case INSN_CONFIG_ARM:
spriv->wdog_ctrl = ADDI_TCW_CTRL_ENA;
reload = data[1] & s->maxdata;
outl(reload, spriv->iobase + ADDI_TCW_RELOAD_REG);
/* Time base is 20ms, let the user know the timeout */
dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
20 * reload + 20);
break;
case INSN_CONFIG_DISARM:
spriv->wdog_ctrl = 0;
break;
default:
return -EINVAL;
}
outl(spriv->wdog_ctrl, spriv->iobase + ADDI_TCW_CTRL_REG);
return insn->n;
}
static int addi_watchdog_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct addi_watchdog_private *spriv = s->private;
int i;
for (i = 0; i < insn->n; i++)
data[i] = inl(spriv->iobase + ADDI_TCW_STATUS_REG);
return insn->n;
}
static int addi_watchdog_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct addi_watchdog_private *spriv = s->private;
int i;
if (spriv->wdog_ctrl == 0) {
dev_warn(dev->class_dev, "watchdog is disabled\n");
return -EINVAL;
}
/* "ping" the watchdog */
for (i = 0; i < insn->n; i++) {
outl(spriv->wdog_ctrl | ADDI_TCW_CTRL_TRIG,
spriv->iobase + ADDI_TCW_CTRL_REG);
}
return insn->n;
}
void addi_watchdog_reset(unsigned long iobase)
{
outl(0x0, iobase + ADDI_TCW_CTRL_REG);
outl(0x0, iobase + ADDI_TCW_RELOAD_REG);
}
EXPORT_SYMBOL_GPL(addi_watchdog_reset);
int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase)
{
struct addi_watchdog_private *spriv;
spriv = comedi_alloc_spriv(s, sizeof(*spriv));
if (!spriv)
return -ENOMEM;
spriv->iobase = iobase;
s->type = COMEDI_SUBD_TIMER;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 0xff;
s->insn_config = addi_watchdog_insn_config;
s->insn_read = addi_watchdog_insn_read;
s->insn_write = addi_watchdog_insn_write;
return 0;
}
EXPORT_SYMBOL_GPL(addi_watchdog_init);
static int __init addi_watchdog_module_init(void)
{
return 0;
}
module_init(addi_watchdog_module_init);
static void __exit addi_watchdog_module_exit(void)
{
}
module_exit(addi_watchdog_module_exit);
MODULE_DESCRIPTION("ADDI-DATA Watchdog subdevice");
MODULE_AUTHOR("H Hartley Sweeten <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/addi_watchdog.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* cb_pcidas.c
* Developed by Ivan Martinez and Frank Mori Hess, with valuable help from
* David Schleef and the rest of the Comedi developers comunity.
*
* Copyright (C) 2001-2003 Ivan Martinez <[email protected]>
* Copyright (C) 2001,2002 Frank Mori Hess <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <[email protected]>
*/
/*
* Driver: cb_pcidas
* Description: MeasurementComputing PCI-DAS series
* with the AMCC S5933 PCI controller
* Devices: [Measurement Computing] PCI-DAS1602/16 (cb_pcidas),
* PCI-DAS1602/16jr, PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr,
* PCI-DAS1000, PCI-DAS1001, PCI_DAS1002
* Author: Ivan Martinez <[email protected]>,
* Frank Mori Hess <[email protected]>
* Updated: 2003-3-11
*
* Status:
* There are many reports of the driver being used with most of the
* supported cards. Despite no detailed log is maintained, it can
* be said that the driver is quite tested and stable.
*
* The boards may be autocalibrated using the comedi_calibrate
* utility.
*
* Configuration options: not applicable, uses PCI auto config
*
* For commands, the scanned channels must be consecutive
* (i.e. 4-5-6-7, 2-3-4,...), and must all have the same
* range and aref.
*
* AI Triggering:
* For start_src == TRIG_EXT, the A/D EXTERNAL TRIGGER IN (pin 45) is used.
* For 1602 series, the start_arg is interpreted as follows:
* start_arg == 0 => gated trigger (level high)
* start_arg == CR_INVERT => gated trigger (level low)
* start_arg == CR_EDGE => Rising edge
* start_arg == CR_EDGE | CR_INVERT => Falling edge
* For the other boards the trigger will be done on rising edge
*/
/*
* TODO:
* analog triggering on 1602 series
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include <linux/comedi/comedi_8255.h>
#include <linux/comedi/comedi_8254.h>
#include "amcc_s5933.h"
#define AI_BUFFER_SIZE 1024 /* max ai fifo size */
#define AO_BUFFER_SIZE 1024 /* max ao fifo size */
/*
* PCI BAR1 Register map (devpriv->pcibar1)
*/
#define PCIDAS_CTRL_REG 0x00 /* INTERRUPT / ADC FIFO register */
#define PCIDAS_CTRL_INT(x) (((x) & 0x3) << 0)
#define PCIDAS_CTRL_INT_NONE PCIDAS_CTRL_INT(0) /* no int selected */
#define PCIDAS_CTRL_INT_EOS PCIDAS_CTRL_INT(1) /* int on end of scan */
#define PCIDAS_CTRL_INT_FHF PCIDAS_CTRL_INT(2) /* int on fifo half full */
#define PCIDAS_CTRL_INT_FNE PCIDAS_CTRL_INT(3) /* int on fifo not empty */
#define PCIDAS_CTRL_INT_MASK PCIDAS_CTRL_INT(3) /* mask of int select bits */
#define PCIDAS_CTRL_INTE BIT(2) /* int enable */
#define PCIDAS_CTRL_DAHFIE BIT(3) /* dac half full int enable */
#define PCIDAS_CTRL_EOAIE BIT(4) /* end of acq. int enable */
#define PCIDAS_CTRL_DAHFI BIT(5) /* dac half full status / clear */
#define PCIDAS_CTRL_EOAI BIT(6) /* end of acq. int status / clear */
#define PCIDAS_CTRL_INT_CLR BIT(7) /* int status / clear */
#define PCIDAS_CTRL_EOBI BIT(9) /* end of burst int status */
#define PCIDAS_CTRL_ADHFI BIT(10) /* half-full int status */
#define PCIDAS_CTRL_ADNEI BIT(11) /* fifo not empty int status (latch) */
#define PCIDAS_CTRL_ADNE BIT(12) /* fifo not empty status (realtime) */
#define PCIDAS_CTRL_DAEMIE BIT(12) /* dac empty int enable */
#define PCIDAS_CTRL_LADFUL BIT(13) /* fifo overflow / clear */
#define PCIDAS_CTRL_DAEMI BIT(14) /* dac fifo empty int status / clear */
#define PCIDAS_CTRL_AI_INT (PCIDAS_CTRL_EOAI | PCIDAS_CTRL_EOBI | \
PCIDAS_CTRL_ADHFI | PCIDAS_CTRL_ADNEI | \
PCIDAS_CTRL_LADFUL)
#define PCIDAS_CTRL_AO_INT (PCIDAS_CTRL_DAHFI | PCIDAS_CTRL_DAEMI)
#define PCIDAS_AI_REG 0x02 /* ADC CHANNEL MUX AND CONTROL reg */
#define PCIDAS_AI_FIRST(x) ((x) & 0xf)
#define PCIDAS_AI_LAST(x) (((x) & 0xf) << 4)
#define PCIDAS_AI_CHAN(x) (PCIDAS_AI_FIRST(x) | PCIDAS_AI_LAST(x))
#define PCIDAS_AI_GAIN(x) (((x) & 0x3) << 8)
#define PCIDAS_AI_SE BIT(10) /* Inputs in single-ended mode */
#define PCIDAS_AI_UNIP BIT(11) /* Analog front-end unipolar mode */
#define PCIDAS_AI_PACER(x) (((x) & 0x3) << 12)
#define PCIDAS_AI_PACER_SW PCIDAS_AI_PACER(0) /* software pacer */
#define PCIDAS_AI_PACER_INT PCIDAS_AI_PACER(1) /* int. pacer */
#define PCIDAS_AI_PACER_EXTN PCIDAS_AI_PACER(2) /* ext. falling edge */
#define PCIDAS_AI_PACER_EXTP PCIDAS_AI_PACER(3) /* ext. rising edge */
#define PCIDAS_AI_PACER_MASK PCIDAS_AI_PACER(3) /* pacer source bits */
#define PCIDAS_AI_EOC BIT(14) /* adc not busy */
#define PCIDAS_TRIG_REG 0x04 /* TRIGGER CONTROL/STATUS register */
#define PCIDAS_TRIG_SEL(x) (((x) & 0x3) << 0)
#define PCIDAS_TRIG_SEL_NONE PCIDAS_TRIG_SEL(0) /* no start trigger */
#define PCIDAS_TRIG_SEL_SW PCIDAS_TRIG_SEL(1) /* software start trigger */
#define PCIDAS_TRIG_SEL_EXT PCIDAS_TRIG_SEL(2) /* ext. start trigger */
#define PCIDAS_TRIG_SEL_ANALOG PCIDAS_TRIG_SEL(3) /* ext. analog trigger */
#define PCIDAS_TRIG_SEL_MASK PCIDAS_TRIG_SEL(3) /* start trigger mask */
#define PCIDAS_TRIG_POL BIT(2) /* invert trigger (1602 only) */
#define PCIDAS_TRIG_MODE BIT(3) /* edge/level triggered (1602 only) */
#define PCIDAS_TRIG_EN BIT(4) /* enable external start trigger */
#define PCIDAS_TRIG_BURSTE BIT(5) /* burst mode enable */
#define PCIDAS_TRIG_CLR BIT(7) /* clear external trigger */
#define PCIDAS_CALIB_REG 0x06 /* CALIBRATION register */
#define PCIDAS_CALIB_8800_SEL BIT(8) /* select 8800 caldac */
#define PCIDAS_CALIB_TRIM_SEL BIT(9) /* select ad7376 trim pot */
#define PCIDAS_CALIB_DAC08_SEL BIT(10) /* select dac08 caldac */
#define PCIDAS_CALIB_SRC(x) (((x) & 0x7) << 11)
#define PCIDAS_CALIB_EN BIT(14) /* calibration source enable */
#define PCIDAS_CALIB_DATA BIT(15) /* serial data bit going to caldac */
#define PCIDAS_AO_REG 0x08 /* dac control and status register */
#define PCIDAS_AO_EMPTY BIT(0) /* fifo empty, write clear (1602) */
#define PCIDAS_AO_DACEN BIT(1) /* dac enable */
#define PCIDAS_AO_START BIT(2) /* start/arm fifo (1602) */
#define PCIDAS_AO_PACER(x) (((x) & 0x3) << 3) /* (1602) */
#define PCIDAS_AO_PACER_SW PCIDAS_AO_PACER(0) /* software pacer */
#define PCIDAS_AO_PACER_INT PCIDAS_AO_PACER(1) /* int. pacer */
#define PCIDAS_AO_PACER_EXTN PCIDAS_AO_PACER(2) /* ext. falling edge */
#define PCIDAS_AO_PACER_EXTP PCIDAS_AO_PACER(3) /* ext. rising edge */
#define PCIDAS_AO_PACER_MASK PCIDAS_AO_PACER(3) /* pacer source bits */
#define PCIDAS_AO_CHAN_EN(c) BIT(5 + ((c) & 0x1))
#define PCIDAS_AO_CHAN_MASK (PCIDAS_AO_CHAN_EN(0) | PCIDAS_AO_CHAN_EN(1))
#define PCIDAS_AO_UPDATE_BOTH BIT(7) /* update both dacs */
#define PCIDAS_AO_RANGE(c, r) (((r) & 0x3) << (8 + 2 * ((c) & 0x1)))
#define PCIDAS_AO_RANGE_MASK(c) PCIDAS_AO_RANGE((c), 0x3)
/*
* PCI BAR2 Register map (devpriv->pcibar2)
*/
#define PCIDAS_AI_DATA_REG 0x00
#define PCIDAS_AI_FIFO_CLR_REG 0x02
/*
* PCI BAR3 Register map (dev->iobase)
*/
#define PCIDAS_AI_8254_BASE 0x00
#define PCIDAS_8255_BASE 0x04
#define PCIDAS_AO_8254_BASE 0x08
/*
* PCI BAR4 Register map (devpriv->pcibar4)
*/
#define PCIDAS_AO_DATA_REG(x) (0x00 + ((x) * 2))
#define PCIDAS_AO_FIFO_REG 0x00
#define PCIDAS_AO_FIFO_CLR_REG 0x02
/* analog input ranges for most boards */
static const struct comedi_lrange cb_pcidas_ranges = {
8, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25)
}
};
/* pci-das1001 input ranges */
static const struct comedi_lrange cb_pcidas_alt_ranges = {
8, {
BIP_RANGE(10),
BIP_RANGE(1),
BIP_RANGE(0.1),
BIP_RANGE(0.01),
UNI_RANGE(10),
UNI_RANGE(1),
UNI_RANGE(0.1),
UNI_RANGE(0.01)
}
};
/* analog output ranges */
static const struct comedi_lrange cb_pcidas_ao_ranges = {
4, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(10)
}
};
enum cb_pcidas_boardid {
BOARD_PCIDAS1602_16,
BOARD_PCIDAS1200,
BOARD_PCIDAS1602_12,
BOARD_PCIDAS1200_JR,
BOARD_PCIDAS1602_16_JR,
BOARD_PCIDAS1000,
BOARD_PCIDAS1001,
BOARD_PCIDAS1002,
};
struct cb_pcidas_board {
const char *name;
int ai_speed; /* fastest conversion period in ns */
int ao_scan_speed; /* analog output scan speed for 1602 series */
int fifo_size; /* number of samples fifo can hold */
unsigned int is_16bit; /* ai/ao is 1=16-bit; 0=12-bit */
unsigned int use_alt_range:1; /* use alternate ai range table */
unsigned int has_ao:1; /* has 2 analog output channels */
unsigned int has_ao_fifo:1; /* analog output has fifo */
unsigned int has_ad8402:1; /* trimpot type 1=AD8402; 0=AD7376 */
unsigned int has_dac08:1;
unsigned int is_1602:1;
};
static const struct cb_pcidas_board cb_pcidas_boards[] = {
[BOARD_PCIDAS1602_16] = {
.name = "pci-das1602/16",
.ai_speed = 5000,
.ao_scan_speed = 10000,
.fifo_size = 512,
.is_16bit = 1,
.has_ao = 1,
.has_ao_fifo = 1,
.has_ad8402 = 1,
.has_dac08 = 1,
.is_1602 = 1,
},
[BOARD_PCIDAS1200] = {
.name = "pci-das1200",
.ai_speed = 3200,
.fifo_size = 1024,
.has_ao = 1,
},
[BOARD_PCIDAS1602_12] = {
.name = "pci-das1602/12",
.ai_speed = 3200,
.ao_scan_speed = 4000,
.fifo_size = 1024,
.has_ao = 1,
.has_ao_fifo = 1,
.is_1602 = 1,
},
[BOARD_PCIDAS1200_JR] = {
.name = "pci-das1200/jr",
.ai_speed = 3200,
.fifo_size = 1024,
},
[BOARD_PCIDAS1602_16_JR] = {
.name = "pci-das1602/16/jr",
.ai_speed = 5000,
.fifo_size = 512,
.is_16bit = 1,
.has_ad8402 = 1,
.has_dac08 = 1,
.is_1602 = 1,
},
[BOARD_PCIDAS1000] = {
.name = "pci-das1000",
.ai_speed = 4000,
.fifo_size = 1024,
},
[BOARD_PCIDAS1001] = {
.name = "pci-das1001",
.ai_speed = 6800,
.fifo_size = 1024,
.use_alt_range = 1,
.has_ao = 1,
},
[BOARD_PCIDAS1002] = {
.name = "pci-das1002",
.ai_speed = 6800,
.fifo_size = 1024,
.has_ao = 1,
},
};
struct cb_pcidas_private {
struct comedi_8254 *ao_pacer;
/* base addresses */
unsigned long amcc; /* pcibar0 */
unsigned long pcibar1;
unsigned long pcibar2;
unsigned long pcibar4;
/* bits to write to registers */
unsigned int ctrl;
unsigned int amcc_intcsr;
unsigned int ao_ctrl;
/* fifo buffers */
unsigned short ai_buffer[AI_BUFFER_SIZE];
unsigned short ao_buffer[AO_BUFFER_SIZE];
unsigned int calib_src;
};
static int cb_pcidas_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int status;
status = inw(devpriv->pcibar1 + PCIDAS_AI_REG);
if (status & PCIDAS_AI_EOC)
return 0;
return -EBUSY;
}
static int cb_pcidas_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
unsigned int bits;
int ret;
int n;
/* enable calibration input if appropriate */
if (insn->chanspec & CR_ALT_SOURCE) {
outw(PCIDAS_CALIB_EN | PCIDAS_CALIB_SRC(devpriv->calib_src),
devpriv->pcibar1 + PCIDAS_CALIB_REG);
chan = 0;
} else {
outw(0, devpriv->pcibar1 + PCIDAS_CALIB_REG);
}
/* set mux limits and gain */
bits = PCIDAS_AI_CHAN(chan) | PCIDAS_AI_GAIN(range);
/* set unipolar/bipolar */
if (comedi_range_is_unipolar(s, range))
bits |= PCIDAS_AI_UNIP;
/* set single-ended/differential */
if (aref != AREF_DIFF)
bits |= PCIDAS_AI_SE;
outw(bits, devpriv->pcibar1 + PCIDAS_AI_REG);
/* clear fifo */
outw(0, devpriv->pcibar2 + PCIDAS_AI_FIFO_CLR_REG);
/* convert n samples */
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outw(0, devpriv->pcibar2 + PCIDAS_AI_DATA_REG);
/* wait for conversion to end */
ret = comedi_timeout(dev, s, insn, cb_pcidas_ai_eoc, 0);
if (ret)
return ret;
/* read data */
data[n] = inw(devpriv->pcibar2 + PCIDAS_AI_DATA_REG);
}
/* return the number of samples read/written */
return n;
}
static int cb_pcidas_ai_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidas_private *devpriv = dev->private;
int id = data[0];
unsigned int source = data[1];
switch (id) {
case INSN_CONFIG_ALT_SOURCE:
if (source >= 8) {
dev_err(dev->class_dev,
"invalid calibration source: %i\n",
source);
return -EINVAL;
}
devpriv->calib_src = source;
break;
default:
return -EINVAL;
}
return insn->n;
}
/* analog output insn for pcidas-1000 and 1200 series */
static int cb_pcidas_ao_nofifo_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val = s->readback[chan];
unsigned long flags;
int i;
/* set channel and range */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->ao_ctrl &= ~(PCIDAS_AO_UPDATE_BOTH |
PCIDAS_AO_RANGE_MASK(chan));
devpriv->ao_ctrl |= PCIDAS_AO_DACEN | PCIDAS_AO_RANGE(chan, range);
outw(devpriv->ao_ctrl, devpriv->pcibar1 + PCIDAS_AO_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
for (i = 0; i < insn->n; i++) {
val = data[i];
outw(val, devpriv->pcibar4 + PCIDAS_AO_DATA_REG(chan));
}
s->readback[chan] = val;
return insn->n;
}
/* analog output insn for pcidas-1602 series */
static int cb_pcidas_ao_fifo_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val = s->readback[chan];
unsigned long flags;
int i;
/* clear dac fifo */
outw(0, devpriv->pcibar4 + PCIDAS_AO_FIFO_CLR_REG);
/* set channel and range */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->ao_ctrl &= ~(PCIDAS_AO_CHAN_MASK | PCIDAS_AO_RANGE_MASK(chan) |
PCIDAS_AO_PACER_MASK);
devpriv->ao_ctrl |= PCIDAS_AO_DACEN | PCIDAS_AO_RANGE(chan, range) |
PCIDAS_AO_CHAN_EN(chan) | PCIDAS_AO_START;
outw(devpriv->ao_ctrl, devpriv->pcibar1 + PCIDAS_AO_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
for (i = 0; i < insn->n; i++) {
val = data[i];
outw(val, devpriv->pcibar4 + PCIDAS_AO_FIFO_REG);
}
s->readback[chan] = val;
return insn->n;
}
static int cb_pcidas_eeprom_ready(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int status;
status = inb(devpriv->amcc + AMCC_OP_REG_MCSR_NVCMD);
if ((status & MCSR_NV_BUSY) == 0)
return 0;
return -EBUSY;
}
static int cb_pcidas_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int ret;
int i;
for (i = 0; i < insn->n; i++) {
/* make sure eeprom is ready */
ret = comedi_timeout(dev, s, insn, cb_pcidas_eeprom_ready, 0);
if (ret)
return ret;
/* set address (chan) and read operation */
outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_LOW_ADDR,
devpriv->amcc + AMCC_OP_REG_MCSR_NVCMD);
outb(chan & 0xff, devpriv->amcc + AMCC_OP_REG_MCSR_NVDATA);
outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_HIGH_ADDR,
devpriv->amcc + AMCC_OP_REG_MCSR_NVCMD);
outb((chan >> 8) & 0xff,
devpriv->amcc + AMCC_OP_REG_MCSR_NVDATA);
outb(MCSR_NV_ENABLE | MCSR_NV_READ,
devpriv->amcc + AMCC_OP_REG_MCSR_NVCMD);
/* wait for data to be returned */
ret = comedi_timeout(dev, s, insn, cb_pcidas_eeprom_ready, 0);
if (ret)
return ret;
data[i] = inb(devpriv->amcc + AMCC_OP_REG_MCSR_NVDATA);
}
return insn->n;
}
static void cb_pcidas_calib_write(struct comedi_device *dev,
unsigned int val, unsigned int len,
bool trimpot)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int calib_bits;
unsigned int bit;
calib_bits = PCIDAS_CALIB_EN | PCIDAS_CALIB_SRC(devpriv->calib_src);
if (trimpot) {
/* select trimpot */
calib_bits |= PCIDAS_CALIB_TRIM_SEL;
outw(calib_bits, devpriv->pcibar1 + PCIDAS_CALIB_REG);
}
/* write bitstream to calibration device */
for (bit = 1 << (len - 1); bit; bit >>= 1) {
if (val & bit)
calib_bits |= PCIDAS_CALIB_DATA;
else
calib_bits &= ~PCIDAS_CALIB_DATA;
udelay(1);
outw(calib_bits, devpriv->pcibar1 + PCIDAS_CALIB_REG);
}
udelay(1);
calib_bits = PCIDAS_CALIB_EN | PCIDAS_CALIB_SRC(devpriv->calib_src);
if (!trimpot) {
/* select caldac */
outw(calib_bits | PCIDAS_CALIB_8800_SEL,
devpriv->pcibar1 + PCIDAS_CALIB_REG);
udelay(1);
}
/* latch value to trimpot/caldac */
outw(calib_bits, devpriv->pcibar1 + PCIDAS_CALIB_REG);
}
static int cb_pcidas_caldac_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
if (insn->n) {
unsigned int val = data[insn->n - 1];
if (s->readback[chan] != val) {
/* write 11-bit channel/value to caldac */
cb_pcidas_calib_write(dev, (chan << 8) | val, 11,
false);
s->readback[chan] = val;
}
}
return insn->n;
}
static void cb_pcidas_dac08_write(struct comedi_device *dev, unsigned int val)
{
struct cb_pcidas_private *devpriv = dev->private;
val |= PCIDAS_CALIB_EN | PCIDAS_CALIB_SRC(devpriv->calib_src);
/* latch the new value into the caldac */
outw(val, devpriv->pcibar1 + PCIDAS_CALIB_REG);
udelay(1);
outw(val | PCIDAS_CALIB_DAC08_SEL,
devpriv->pcibar1 + PCIDAS_CALIB_REG);
udelay(1);
outw(val, devpriv->pcibar1 + PCIDAS_CALIB_REG);
udelay(1);
}
static int cb_pcidas_dac08_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
if (insn->n) {
unsigned int val = data[insn->n - 1];
if (s->readback[chan] != val) {
cb_pcidas_dac08_write(dev, val);
s->readback[chan] = val;
}
}
return insn->n;
}
static void cb_pcidas_trimpot_write(struct comedi_device *dev,
unsigned int chan, unsigned int val)
{
const struct cb_pcidas_board *board = dev->board_ptr;
if (board->has_ad8402) {
/* write 10-bit channel/value to AD8402 trimpot */
cb_pcidas_calib_write(dev, (chan << 8) | val, 10, true);
} else {
/* write 7-bit value to AD7376 trimpot */
cb_pcidas_calib_write(dev, val, 7, true);
}
}
static int cb_pcidas_trimpot_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
if (insn->n) {
unsigned int val = data[insn->n - 1];
if (s->readback[chan] != val) {
cb_pcidas_trimpot_write(dev, chan, val);
s->readback[chan] = val;
}
}
return insn->n;
}
static int cb_pcidas_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
int i;
for (i = 1; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned int range = CR_RANGE(cmd->chanlist[i]);
if (chan != (chan0 + i) % s->n_chan) {
dev_dbg(dev->class_dev,
"entries in chanlist must be consecutive channels, counting upwards\n");
return -EINVAL;
}
if (range != range0) {
dev_dbg(dev->class_dev,
"entries in chanlist must all have the same gain\n");
return -EINVAL;
}
}
return 0;
}
static int cb_pcidas_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct cb_pcidas_board *board = dev->board_ptr;
int err = 0;
unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src,
TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src,
TRIG_TIMER | TRIG_NOW | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
err |= comedi_check_trigger_is_unique(cmd->convert_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW)
err |= -EINVAL;
if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW)
err |= -EINVAL;
if (cmd->start_src == TRIG_EXT &&
(cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT))
err |= -EINVAL;
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
switch (cmd->start_src) {
case TRIG_NOW:
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
break;
case TRIG_EXT:
/* External trigger, only CR_EDGE and CR_INVERT flags allowed */
if ((cmd->start_arg
& (CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT))) != 0) {
cmd->start_arg &= ~(CR_FLAGS_MASK &
~(CR_EDGE | CR_INVERT));
err |= -EINVAL;
}
if (!board->is_1602 && (cmd->start_arg & CR_INVERT)) {
cmd->start_arg &= (CR_FLAGS_MASK & ~CR_INVERT);
err |= -EINVAL;
}
break;
}
if (cmd->scan_begin_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
board->ai_speed *
cmd->chanlist_len);
}
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
board->ai_speed);
}
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->scan_begin_arg;
comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
}
if (cmd->convert_src == TRIG_TIMER) {
arg = cmd->convert_arg;
comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
if (cmd->chanlist && cmd->chanlist_len > 0)
err |= cb_pcidas_ai_check_chanlist(dev, s, cmd);
if (err)
return 5;
return 0;
}
static int cb_pcidas_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct cb_pcidas_board *board = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
unsigned int bits;
unsigned long flags;
/* make sure PCIDAS_CALIB_EN is disabled */
outw(0, devpriv->pcibar1 + PCIDAS_CALIB_REG);
/* initialize before settings pacer source and count values */
outw(PCIDAS_TRIG_SEL_NONE, devpriv->pcibar1 + PCIDAS_TRIG_REG);
/* clear fifo */
outw(0, devpriv->pcibar2 + PCIDAS_AI_FIFO_CLR_REG);
/* set mux limits, gain and pacer source */
bits = PCIDAS_AI_FIRST(CR_CHAN(cmd->chanlist[0])) |
PCIDAS_AI_LAST(CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])) |
PCIDAS_AI_GAIN(range0);
/* set unipolar/bipolar */
if (comedi_range_is_unipolar(s, range0))
bits |= PCIDAS_AI_UNIP;
/* set singleended/differential */
if (CR_AREF(cmd->chanlist[0]) != AREF_DIFF)
bits |= PCIDAS_AI_SE;
/* set pacer source */
if (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT)
bits |= PCIDAS_AI_PACER_EXTP;
else
bits |= PCIDAS_AI_PACER_INT;
outw(bits, devpriv->pcibar1 + PCIDAS_AI_REG);
/* load counters */
if (cmd->scan_begin_src == TRIG_TIMER ||
cmd->convert_src == TRIG_TIMER) {
comedi_8254_update_divisors(dev->pacer);
comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
}
/* enable interrupts */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->ctrl |= PCIDAS_CTRL_INTE;
devpriv->ctrl &= ~PCIDAS_CTRL_INT_MASK;
if (cmd->flags & CMDF_WAKE_EOS) {
if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) {
/* interrupt end of burst */
devpriv->ctrl |= PCIDAS_CTRL_INT_EOS;
} else {
/* interrupt fifo not empty */
devpriv->ctrl |= PCIDAS_CTRL_INT_FNE;
}
} else {
/* interrupt fifo half full */
devpriv->ctrl |= PCIDAS_CTRL_INT_FHF;
}
/* enable (and clear) interrupts */
outw(devpriv->ctrl |
PCIDAS_CTRL_EOAI | PCIDAS_CTRL_INT_CLR | PCIDAS_CTRL_LADFUL,
devpriv->pcibar1 + PCIDAS_CTRL_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
/* set start trigger and burst mode */
bits = 0;
if (cmd->start_src == TRIG_NOW) {
bits |= PCIDAS_TRIG_SEL_SW;
} else { /* TRIG_EXT */
bits |= PCIDAS_TRIG_SEL_EXT | PCIDAS_TRIG_EN | PCIDAS_TRIG_CLR;
if (board->is_1602) {
if (cmd->start_arg & CR_INVERT)
bits |= PCIDAS_TRIG_POL;
if (cmd->start_arg & CR_EDGE)
bits |= PCIDAS_TRIG_MODE;
}
}
if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1)
bits |= PCIDAS_TRIG_BURSTE;
outw(bits, devpriv->pcibar1 + PCIDAS_TRIG_REG);
return 0;
}
static int cb_pcidas_ao_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
if (cmd->chanlist_len > 1) {
unsigned int chan1 = CR_CHAN(cmd->chanlist[1]);
if (chan0 != 0 || chan1 != 1) {
dev_dbg(dev->class_dev,
"channels must be ordered channel 0, channel 1 in chanlist\n");
return -EINVAL;
}
}
return 0;
}
static int cb_pcidas_ao_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct cb_pcidas_board *board = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->scan_begin_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
board->ao_scan_speed);
}
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
unsigned int arg = cmd->scan_begin_arg;
comedi_8254_cascade_ns_to_timer(devpriv->ao_pacer,
&arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
if (cmd->chanlist && cmd->chanlist_len > 0)
err |= cb_pcidas_ao_check_chanlist(dev, s, cmd);
if (err)
return 5;
return 0;
}
static int cb_pcidas_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
/* disable interrupts */
devpriv->ctrl &= ~(PCIDAS_CTRL_INTE | PCIDAS_CTRL_EOAIE);
outw(devpriv->ctrl, devpriv->pcibar1 + PCIDAS_CTRL_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
/* disable start trigger source and burst mode */
outw(PCIDAS_TRIG_SEL_NONE, devpriv->pcibar1 + PCIDAS_TRIG_REG);
outw(PCIDAS_AI_PACER_SW, devpriv->pcibar1 + PCIDAS_AI_REG);
return 0;
}
static void cb_pcidas_ao_load_fifo(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int nsamples)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned int nbytes;
nsamples = comedi_nsamples_left(s, nsamples);
nbytes = comedi_buf_read_samples(s, devpriv->ao_buffer, nsamples);
nsamples = comedi_bytes_to_samples(s, nbytes);
outsw(devpriv->pcibar4 + PCIDAS_AO_FIFO_REG,
devpriv->ao_buffer, nsamples);
}
static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
const struct cb_pcidas_board *board = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned long flags;
if (trig_num != cmd->start_arg)
return -EINVAL;
cb_pcidas_ao_load_fifo(dev, s, board->fifo_size);
/* enable dac half-full and empty interrupts */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->ctrl |= PCIDAS_CTRL_DAEMIE | PCIDAS_CTRL_DAHFIE;
/* enable and clear interrupts */
outw(devpriv->ctrl | PCIDAS_CTRL_DAEMI | PCIDAS_CTRL_DAHFI,
devpriv->pcibar1 + PCIDAS_CTRL_REG);
/* start dac */
devpriv->ao_ctrl |= PCIDAS_AO_START | PCIDAS_AO_DACEN | PCIDAS_AO_EMPTY;
outw(devpriv->ao_ctrl, devpriv->pcibar1 + PCIDAS_AO_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
async->inttrig = NULL;
return 0;
}
static int cb_pcidas_ao_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct cb_pcidas_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int i;
unsigned long flags;
/* set channel limits, gain */
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned int range = CR_RANGE(cmd->chanlist[i]);
/* enable channel */
devpriv->ao_ctrl |= PCIDAS_AO_CHAN_EN(chan);
/* set range */
devpriv->ao_ctrl |= PCIDAS_AO_RANGE(chan, range);
}
/* disable analog out before settings pacer source and count values */
outw(devpriv->ao_ctrl, devpriv->pcibar1 + PCIDAS_AO_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
/* clear fifo */
outw(0, devpriv->pcibar4 + PCIDAS_AO_FIFO_CLR_REG);
/* load counters */
if (cmd->scan_begin_src == TRIG_TIMER) {
comedi_8254_update_divisors(devpriv->ao_pacer);
comedi_8254_pacer_enable(devpriv->ao_pacer, 1, 2, true);
}
/* set pacer source */
spin_lock_irqsave(&dev->spinlock, flags);
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
devpriv->ao_ctrl |= PCIDAS_AO_PACER_INT;
break;
case TRIG_EXT:
devpriv->ao_ctrl |= PCIDAS_AO_PACER_EXTP;
break;
default:
spin_unlock_irqrestore(&dev->spinlock, flags);
dev_err(dev->class_dev, "error setting dac pacer source\n");
return -1;
}
spin_unlock_irqrestore(&dev->spinlock, flags);
async->inttrig = cb_pcidas_ao_inttrig;
return 0;
}
static int cb_pcidas_ao_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct cb_pcidas_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
/* disable interrupts */
devpriv->ctrl &= ~(PCIDAS_CTRL_DAHFIE | PCIDAS_CTRL_DAEMIE);
outw(devpriv->ctrl, devpriv->pcibar1 + PCIDAS_CTRL_REG);
/* disable output */
devpriv->ao_ctrl &= ~(PCIDAS_AO_DACEN | PCIDAS_AO_PACER_MASK);
outw(devpriv->ao_ctrl, devpriv->pcibar1 + PCIDAS_AO_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
static unsigned int cb_pcidas_ao_interrupt(struct comedi_device *dev,
unsigned int status)
{
const struct cb_pcidas_board *board = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int irq_clr = 0;
if (status & PCIDAS_CTRL_DAEMI) {
irq_clr |= PCIDAS_CTRL_DAEMI;
if (inw(devpriv->pcibar4 + PCIDAS_AO_REG) & PCIDAS_AO_EMPTY) {
if (cmd->stop_src == TRIG_COUNT &&
async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
} else {
dev_err(dev->class_dev, "dac fifo underflow\n");
async->events |= COMEDI_CB_ERROR;
}
}
} else if (status & PCIDAS_CTRL_DAHFI) {
irq_clr |= PCIDAS_CTRL_DAHFI;
cb_pcidas_ao_load_fifo(dev, s, board->fifo_size / 2);
}
comedi_handle_events(dev, s);
return irq_clr;
}
static unsigned int cb_pcidas_ai_interrupt(struct comedi_device *dev,
unsigned int status)
{
const struct cb_pcidas_board *board = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int irq_clr = 0;
if (status & PCIDAS_CTRL_ADHFI) {
unsigned int num_samples;
irq_clr |= PCIDAS_CTRL_INT_CLR;
/* FIFO is half-full - read data */
num_samples = comedi_nsamples_left(s, board->fifo_size / 2);
insw(devpriv->pcibar2 + PCIDAS_AI_DATA_REG,
devpriv->ai_buffer, num_samples);
comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
if (cmd->stop_src == TRIG_COUNT &&
async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
} else if (status & (PCIDAS_CTRL_ADNEI | PCIDAS_CTRL_EOBI)) {
unsigned int i;
irq_clr |= PCIDAS_CTRL_INT_CLR;
/* FIFO is not empty - read data until empty or timeoout */
for (i = 0; i < 10000; i++) {
unsigned short val;
/* break if fifo is empty */
if ((inw(devpriv->pcibar1 + PCIDAS_CTRL_REG) &
PCIDAS_CTRL_ADNE) == 0)
break;
val = inw(devpriv->pcibar2 + PCIDAS_AI_DATA_REG);
comedi_buf_write_samples(s, &val, 1);
if (cmd->stop_src == TRIG_COUNT &&
async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
break;
}
}
} else if (status & PCIDAS_CTRL_EOAI) {
irq_clr |= PCIDAS_CTRL_EOAI;
dev_err(dev->class_dev,
"bug! encountered end of acquisition interrupt?\n");
}
/* check for fifo overflow */
if (status & PCIDAS_CTRL_LADFUL) {
irq_clr |= PCIDAS_CTRL_LADFUL;
dev_err(dev->class_dev, "fifo overflow\n");
async->events |= COMEDI_CB_ERROR;
}
comedi_handle_events(dev, s);
return irq_clr;
}
static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct cb_pcidas_private *devpriv = dev->private;
unsigned int irq_clr = 0;
unsigned int amcc_status;
unsigned int status;
if (!dev->attached)
return IRQ_NONE;
amcc_status = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
if ((INTCSR_INTR_ASSERTED & amcc_status) == 0)
return IRQ_NONE;
/* make sure mailbox 4 is empty */
inl_p(devpriv->amcc + AMCC_OP_REG_IMB4);
/* clear interrupt on amcc s5933 */
outl(devpriv->amcc_intcsr | INTCSR_INBOX_INTR_STATUS,
devpriv->amcc + AMCC_OP_REG_INTCSR);
status = inw(devpriv->pcibar1 + PCIDAS_CTRL_REG);
/* handle analog output interrupts */
if (status & PCIDAS_CTRL_AO_INT)
irq_clr |= cb_pcidas_ao_interrupt(dev, status);
/* handle analog input interrupts */
if (status & PCIDAS_CTRL_AI_INT)
irq_clr |= cb_pcidas_ai_interrupt(dev, status);
if (irq_clr) {
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->ctrl | irq_clr,
devpriv->pcibar1 + PCIDAS_CTRL_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
}
return IRQ_HANDLED;
}
static int cb_pcidas_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct cb_pcidas_board *board = NULL;
struct cb_pcidas_private *devpriv;
struct comedi_subdevice *s;
int i;
int ret;
if (context < ARRAY_SIZE(cb_pcidas_boards))
board = &cb_pcidas_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
devpriv->amcc = pci_resource_start(pcidev, 0);
devpriv->pcibar1 = pci_resource_start(pcidev, 1);
devpriv->pcibar2 = pci_resource_start(pcidev, 2);
dev->iobase = pci_resource_start(pcidev, 3);
if (board->has_ao)
devpriv->pcibar4 = pci_resource_start(pcidev, 4);
/* disable and clear interrupts on amcc s5933 */
outl(INTCSR_INBOX_INTR_STATUS,
devpriv->amcc + AMCC_OP_REG_INTCSR);
ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
"cb_pcidas", dev);
if (ret) {
dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
pcidev->irq);
return ret;
}
dev->irq = pcidev->irq;
dev->pacer = comedi_8254_init(dev->iobase + PCIDAS_AI_8254_BASE,
I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
devpriv->ao_pacer = comedi_8254_init(dev->iobase + PCIDAS_AO_8254_BASE,
I8254_OSC_BASE_10MHZ,
I8254_IO8, 0);
if (!devpriv->ao_pacer)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 7);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 16;
s->maxdata = board->is_16bit ? 0xffff : 0x0fff;
s->range_table = board->use_alt_range ? &cb_pcidas_alt_ranges
: &cb_pcidas_ranges;
s->insn_read = cb_pcidas_ai_insn_read;
s->insn_config = cb_pcidas_ai_insn_config;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = s->n_chan;
s->do_cmd = cb_pcidas_ai_cmd;
s->do_cmdtest = cb_pcidas_ai_cmdtest;
s->cancel = cb_pcidas_ai_cancel;
}
/* Analog Output subdevice */
s = &dev->subdevices[1];
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = 2;
s->maxdata = board->is_16bit ? 0xffff : 0x0fff;
s->range_table = &cb_pcidas_ao_ranges;
s->insn_write = (board->has_ao_fifo)
? cb_pcidas_ao_fifo_insn_write
: cb_pcidas_ao_nofifo_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
if (dev->irq && board->has_ao_fifo) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
s->len_chanlist = s->n_chan;
s->do_cmdtest = cb_pcidas_ao_cmdtest;
s->do_cmd = cb_pcidas_ao_cmd;
s->cancel = cb_pcidas_ao_cancel;
}
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* 8255 */
s = &dev->subdevices[2];
ret = subdev_8255_init(dev, s, NULL, PCIDAS_8255_BASE);
if (ret)
return ret;
/* Memory subdevice - serial EEPROM */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 256;
s->maxdata = 0xff;
s->insn_read = cb_pcidas_eeprom_insn_read;
/* Calibration subdevice - 8800 caldac */
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 8;
s->maxdata = 0xff;
s->insn_write = cb_pcidas_caldac_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
for (i = 0; i < s->n_chan; i++) {
unsigned int val = s->maxdata / 2;
/* write 11-bit channel/value to caldac */
cb_pcidas_calib_write(dev, (i << 8) | val, 11, false);
s->readback[i] = val;
}
/* Calibration subdevice - trim potentiometer */
s = &dev->subdevices[5];
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
if (board->has_ad8402) {
/*
* pci-das1602/16 have an AD8402 trimpot:
* chan 0 : adc gain
* chan 1 : adc postgain offset
*/
s->n_chan = 2;
s->maxdata = 0xff;
} else {
/* all other boards have an AD7376 trimpot */
s->n_chan = 1;
s->maxdata = 0x7f;
}
s->insn_write = cb_pcidas_trimpot_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
for (i = 0; i < s->n_chan; i++) {
cb_pcidas_trimpot_write(dev, i, s->maxdata / 2);
s->readback[i] = s->maxdata / 2;
}
/* Calibration subdevice - pci-das1602/16 pregain offset (dac08) */
s = &dev->subdevices[6];
if (board->has_dac08) {
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 1;
s->maxdata = 0xff;
s->insn_write = cb_pcidas_dac08_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
for (i = 0; i < s->n_chan; i++) {
cb_pcidas_dac08_write(dev, s->maxdata / 2);
s->readback[i] = s->maxdata / 2;
}
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* make sure mailbox 4 is empty */
inl(devpriv->amcc + AMCC_OP_REG_IMB4);
/* Set bits to enable incoming mailbox interrupts on amcc s5933. */
devpriv->amcc_intcsr = INTCSR_INBOX_BYTE(3) | INTCSR_INBOX_SELECT(3) |
INTCSR_INBOX_FULL_INT;
/* clear and enable interrupt on amcc s5933 */
outl(devpriv->amcc_intcsr | INTCSR_INBOX_INTR_STATUS,
devpriv->amcc + AMCC_OP_REG_INTCSR);
return 0;
}
static void cb_pcidas_detach(struct comedi_device *dev)
{
struct cb_pcidas_private *devpriv = dev->private;
if (devpriv) {
if (devpriv->amcc)
outl(INTCSR_INBOX_INTR_STATUS,
devpriv->amcc + AMCC_OP_REG_INTCSR);
kfree(devpriv->ao_pacer);
}
comedi_pci_detach(dev);
}
static struct comedi_driver cb_pcidas_driver = {
.driver_name = "cb_pcidas",
.module = THIS_MODULE,
.auto_attach = cb_pcidas_auto_attach,
.detach = cb_pcidas_detach,
};
static int cb_pcidas_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &cb_pcidas_driver,
id->driver_data);
}
static const struct pci_device_id cb_pcidas_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0001), BOARD_PCIDAS1602_16 },
{ PCI_VDEVICE(CB, 0x000f), BOARD_PCIDAS1200 },
{ PCI_VDEVICE(CB, 0x0010), BOARD_PCIDAS1602_12 },
{ PCI_VDEVICE(CB, 0x0019), BOARD_PCIDAS1200_JR },
{ PCI_VDEVICE(CB, 0x001c), BOARD_PCIDAS1602_16_JR },
{ PCI_VDEVICE(CB, 0x004c), BOARD_PCIDAS1000 },
{ PCI_VDEVICE(CB, 0x001a), BOARD_PCIDAS1001 },
{ PCI_VDEVICE(CB, 0x001b), BOARD_PCIDAS1002 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, cb_pcidas_pci_table);
static struct pci_driver cb_pcidas_pci_driver = {
.name = "cb_pcidas",
.id_table = cb_pcidas_pci_table,
.probe = cb_pcidas_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcidas_driver, cb_pcidas_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for MeasurementComputing PCI-DAS series");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/cb_pcidas.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_parport.c
* Comedi driver for standard parallel port
*
* For more information see:
* http://retired.beyondlogic.org/spp/parallel.htm
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2001 David A. Schleef <[email protected]>
*/
/*
* Driver: comedi_parport
* Description: Standard PC parallel port
* Author: ds
* Status: works in immediate mode
* Devices: [standard] parallel port (comedi_parport)
* Updated: Tue, 30 Apr 2002 21:11:45 -0700
*
* A cheap and easy way to get a few more digital I/O lines. Steal
* additional parallel ports from old computers or your neighbors'
* computers.
*
* Option list:
* 0: I/O port base for the parallel port.
* 1: IRQ (optional)
*
* Parallel Port Lines:
*
* pin subdev chan type name
* ----- ------ ---- ---- --------------
* 1 2 0 DO strobe
* 2 0 0 DIO data 0
* 3 0 1 DIO data 1
* 4 0 2 DIO data 2
* 5 0 3 DIO data 3
* 6 0 4 DIO data 4
* 7 0 5 DIO data 5
* 8 0 6 DIO data 6
* 9 0 7 DIO data 7
* 10 1 3 DI ack
* 11 1 4 DI busy
* 12 1 2 DI paper out
* 13 1 1 DI select in
* 14 2 1 DO auto LF
* 15 1 0 DI error
* 16 2 2 DO init
* 17 2 3 DO select printer
* 18-25 ground
*
* When an IRQ is configured subdevice 3 pretends to be a digital
* input subdevice, but it always returns 0 when read. However, if
* you run a command with scan_begin_src=TRIG_EXT, it uses pin 10
* as a external trigger, which can be used to wake up tasks.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
/*
* Register map
*/
#define PARPORT_DATA_REG 0x00
#define PARPORT_STATUS_REG 0x01
#define PARPORT_CTRL_REG 0x02
#define PARPORT_CTRL_IRQ_ENA BIT(4)
#define PARPORT_CTRL_BIDIR_ENA BIT(5)
static int parport_data_reg_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + PARPORT_DATA_REG);
data[1] = inb(dev->iobase + PARPORT_DATA_REG);
return insn->n;
}
static int parport_data_reg_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int ctrl;
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0xff);
if (ret)
return ret;
ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
if (s->io_bits)
ctrl &= ~PARPORT_CTRL_BIDIR_ENA;
else
ctrl |= PARPORT_CTRL_BIDIR_ENA;
outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
return insn->n;
}
static int parport_status_reg_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inb(dev->iobase + PARPORT_STATUS_REG) >> 3;
return insn->n;
}
static int parport_ctrl_reg_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int ctrl;
if (comedi_dio_update_state(s, data)) {
ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
ctrl &= (PARPORT_CTRL_IRQ_ENA | PARPORT_CTRL_BIDIR_ENA);
ctrl |= s->state;
outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
}
data[1] = s->state;
return insn->n;
}
static int parport_intr_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = 0;
return insn->n;
}
static int parport_intr_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* Step 4: fix up any arguments */
/* Step 5: check channel list if it exists */
return 0;
}
static int parport_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int ctrl;
ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
ctrl |= PARPORT_CTRL_IRQ_ENA;
outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
return 0;
}
static int parport_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int ctrl;
ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
ctrl &= ~PARPORT_CTRL_IRQ_ENA;
outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
return 0;
}
static irqreturn_t parport_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
unsigned int ctrl;
unsigned short val = 0;
ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
if (!(ctrl & PARPORT_CTRL_IRQ_ENA))
return IRQ_NONE;
comedi_buf_write_samples(s, &val, 1);
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int parport_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x03);
if (ret)
return ret;
if (it->options[1]) {
ret = request_irq(it->options[1], parport_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, dev->irq ? 4 : 3);
if (ret)
return ret;
/* Digial I/O subdevice - Parallel port DATA register */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_data_reg_insn_bits;
s->insn_config = parport_data_reg_insn_config;
/* Digial Input subdevice - Parallel port STATUS register */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 5;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_status_reg_insn_bits;
/* Digial Output subdevice - Parallel port CONTROL register */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_ctrl_reg_insn_bits;
if (dev->irq) {
/* Digial Input subdevice - Interrupt support */
s = &dev->subdevices[3];
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_intr_insn_bits;
s->len_chanlist = 1;
s->do_cmdtest = parport_intr_cmdtest;
s->do_cmd = parport_intr_cmd;
s->cancel = parport_intr_cancel;
}
outb(0, dev->iobase + PARPORT_DATA_REG);
outb(0, dev->iobase + PARPORT_CTRL_REG);
return 0;
}
static struct comedi_driver parport_driver = {
.driver_name = "comedi_parport",
.module = THIS_MODULE,
.attach = parport_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(parport_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi: Standard parallel port driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/comedi_parport.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for NI PCMCIA MIO E series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <[email protected]>
*/
/*
* Driver: ni_mio_cs
* Description: National Instruments DAQCard E series
* Author: ds
* Status: works
* Devices: [National Instruments] DAQCard-AI-16XE-50 (ni_mio_cs),
* DAQCard-AI-16E-4, DAQCard-6062E, DAQCard-6024E, DAQCard-6036E
* Updated: Thu Oct 23 19:43:17 CDT 2003
*
* See the notes in the ni_atmio.o driver.
*/
/*
* The real guts of the driver is in ni_mio_common.c, which is
* included by all the E series drivers.
*
* References for specifications:
* 341080a.pdf DAQCard E Series Register Level Programmer Manual
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/comedi/comedi_pcmcia.h>
#include <linux/comedi/comedi_8255.h>
#include "ni_stc.h"
/*
* AT specific setup
*/
static const struct ni_board_struct ni_boards[] = {
{
.name = "DAQCard-ai-16xe-50",
.device_id = 0x010d,
.n_adchan = 16,
.ai_maxdata = 0xffff,
.ai_fifo_depth = 1024,
.gainlkup = ai_gain_8,
.ai_speed = 5000,
.caldac = { dac8800, dac8043 },
}, {
.name = "DAQCard-ai-16e-4",
.device_id = 0x010c,
.n_adchan = 16,
.ai_maxdata = 0x0fff,
.ai_fifo_depth = 1024,
.gainlkup = ai_gain_16,
.ai_speed = 4000,
.caldac = { mb88341 }, /* verified */
}, {
.name = "DAQCard-6062E",
.device_id = 0x02c4,
.n_adchan = 16,
.ai_maxdata = 0x0fff,
.ai_fifo_depth = 8192,
.gainlkup = ai_gain_16,
.ai_speed = 2000,
.n_aochan = 2,
.ao_maxdata = 0x0fff,
.ao_fifo_depth = 2048,
.ao_range_table = &range_bipolar10,
.ao_speed = 1176,
.caldac = { ad8804_debug }, /* verified */
}, {
/* specs incorrect! */
.name = "DAQCard-6024E",
.device_id = 0x075e,
.n_adchan = 16,
.ai_maxdata = 0x0fff,
.ai_fifo_depth = 1024,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.ao_maxdata = 0x0fff,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000000,
.caldac = { ad8804_debug },
}, {
/* specs incorrect! */
.name = "DAQCard-6036E",
.device_id = 0x0245,
.n_adchan = 16,
.ai_maxdata = 0xffff,
.ai_fifo_depth = 1024,
.alwaysdither = 1,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.ao_maxdata = 0xffff,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000000,
.caldac = { ad8804_debug },
},
#if 0
{
.name = "DAQCard-6715",
.device_id = 0x0000, /* unknown */
.n_aochan = 8,
.ao_maxdata = 0x0fff,
.ao_671x = 8192,
.caldac = { mb88341, mb88341 },
},
#endif
};
#include "ni_mio_common.c"
static const void *ni_getboardtype(struct comedi_device *dev,
struct pcmcia_device *link)
{
static const struct ni_board_struct *board;
int i;
for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
board = &ni_boards[i];
if (board->device_id == link->card_id)
return board;
}
return NULL;
}
static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data)
{
int base, ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
for (base = 0x000; base < 0x400; base += 0x20) {
p_dev->resource[0]->start = base;
ret = pcmcia_request_io(p_dev);
if (!ret)
return 0;
}
return -ENODEV;
}
static int mio_cs_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
static const struct ni_board_struct *board;
int ret;
board = ni_getboardtype(dev, link);
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
ret = comedi_pcmcia_enable(dev, mio_pcmcia_config_loop);
if (ret)
return ret;
dev->iobase = link->resource[0]->start;
link->priv = dev;
ret = pcmcia_request_irq(link, ni_E_interrupt);
if (ret)
return ret;
dev->irq = link->irq;
ret = ni_alloc_private(dev);
if (ret)
return ret;
return ni_E_init(dev, 0, 1);
}
static void mio_cs_detach(struct comedi_device *dev)
{
mio_common_detach(dev);
comedi_pcmcia_disable(dev);
}
static struct comedi_driver driver_ni_mio_cs = {
.driver_name = "ni_mio_cs",
.module = THIS_MODULE,
.auto_attach = mio_cs_auto_attach,
.detach = mio_cs_detach,
};
static int cs_attach(struct pcmcia_device *link)
{
return comedi_pcmcia_auto_config(link, &driver_ni_mio_cs);
}
static const struct pcmcia_device_id ni_mio_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010d), /* DAQCard-ai-16xe-50 */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010c), /* DAQCard-ai-16e-4 */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x02c4), /* DAQCard-6062E */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x075e), /* DAQCard-6024E */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0245), /* DAQCard-6036E */
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, ni_mio_cs_ids);
static struct pcmcia_driver ni_mio_cs_driver = {
.name = "ni_mio_cs",
.owner = THIS_MODULE,
.id_table = ni_mio_cs_ids,
.probe = cs_attach,
.remove = comedi_pcmcia_auto_unconfig,
};
module_comedi_pcmcia_driver(driver_ni_mio_cs, ni_mio_cs_driver);
MODULE_DESCRIPTION("Comedi driver for National Instruments DAQCard E series");
MODULE_AUTHOR("David A. Schleef <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/ni_mio_cs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/cb_pcidda.c
* Driver for the ComputerBoards / MeasurementComputing PCI-DDA series.
*
* Copyright (C) 2001 Ivan Martinez <[email protected]>
* Copyright (C) 2001 Frank Mori Hess <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <[email protected]>
*/
/*
* Driver: cb_pcidda
* Description: MeasurementComputing PCI-DDA series
* Devices: [Measurement Computing] PCI-DDA08/12 (pci-dda08/12),
* PCI-DDA04/12 (pci-dda04/12), PCI-DDA02/12 (pci-dda02/12),
* PCI-DDA08/16 (pci-dda08/16), PCI-DDA04/16 (pci-dda04/16),
* PCI-DDA02/16 (pci-dda02/16)
* Author: Ivan Martinez <[email protected]>
* Frank Mori Hess <[email protected]>
* Status: works
*
* Configuration options: not applicable, uses PCI auto config
*
* Only simple analog output writing is supported.
*/
#include <linux/module.h>
#include <linux/comedi/comedi_pci.h>
#include <linux/comedi/comedi_8255.h>
#define EEPROM_SIZE 128 /* number of entries in eeprom */
/* maximum number of ao channels for supported boards */
#define MAX_AO_CHANNELS 8
/* Digital I/O registers */
#define CB_DDA_DIO0_8255_BASE 0x00
#define CB_DDA_DIO1_8255_BASE 0x04
/* DAC registers */
#define CB_DDA_DA_CTRL_REG 0x00 /* D/A Control Register */
#define CB_DDA_DA_CTRL_SU BIT(0) /* Simultaneous update */
#define CB_DDA_DA_CTRL_EN BIT(1) /* Enable specified DAC */
#define CB_DDA_DA_CTRL_DAC(x) ((x) << 2) /* Specify DAC channel */
#define CB_DDA_DA_CTRL_RANGE2V5 (0 << 6) /* 2.5V range */
#define CB_DDA_DA_CTRL_RANGE5V (2 << 6) /* 5V range */
#define CB_DDA_DA_CTRL_RANGE10V (3 << 6) /* 10V range */
#define CB_DDA_DA_CTRL_UNIP BIT(8) /* Unipolar range */
#define DACALIBRATION1 4 /* D/A CALIBRATION REGISTER 1 */
/* write bits */
/* serial data input for eeprom, caldacs, reference dac */
#define SERIAL_IN_BIT 0x1
#define CAL_CHANNEL_MASK (0x7 << 1)
#define CAL_CHANNEL_BITS(channel) (((channel) << 1) & CAL_CHANNEL_MASK)
/* read bits */
#define CAL_COUNTER_MASK 0x1f
/* calibration counter overflow status bit */
#define CAL_COUNTER_OVERFLOW_BIT 0x20
/* analog output is less than reference dac voltage */
#define AO_BELOW_REF_BIT 0x40
#define SERIAL_OUT_BIT 0x80 /* serial data out, for reading from eeprom */
#define DACALIBRATION2 6 /* D/A CALIBRATION REGISTER 2 */
#define SELECT_EEPROM_BIT 0x1 /* send serial data in to eeprom */
/* don't send serial data to MAX542 reference dac */
#define DESELECT_REF_DAC_BIT 0x2
/* don't send serial data to caldac n */
#define DESELECT_CALDAC_BIT(n) (0x4 << (n))
/* manual says to set this bit with no explanation */
#define DUMMY_BIT 0x40
#define CB_DDA_DA_DATA_REG(x) (0x08 + ((x) * 2))
/* Offsets for the caldac channels */
#define CB_DDA_CALDAC_FINE_GAIN 0
#define CB_DDA_CALDAC_COURSE_GAIN 1
#define CB_DDA_CALDAC_COURSE_OFFSET 2
#define CB_DDA_CALDAC_FINE_OFFSET 3
static const struct comedi_lrange cb_pcidda_ranges = {
6, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5)
}
};
enum cb_pcidda_boardid {
BOARD_DDA02_12,
BOARD_DDA04_12,
BOARD_DDA08_12,
BOARD_DDA02_16,
BOARD_DDA04_16,
BOARD_DDA08_16,
};
struct cb_pcidda_board {
const char *name;
int ao_chans;
int ao_bits;
};
static const struct cb_pcidda_board cb_pcidda_boards[] = {
[BOARD_DDA02_12] = {
.name = "pci-dda02/12",
.ao_chans = 2,
.ao_bits = 12,
},
[BOARD_DDA04_12] = {
.name = "pci-dda04/12",
.ao_chans = 4,
.ao_bits = 12,
},
[BOARD_DDA08_12] = {
.name = "pci-dda08/12",
.ao_chans = 8,
.ao_bits = 12,
},
[BOARD_DDA02_16] = {
.name = "pci-dda02/16",
.ao_chans = 2,
.ao_bits = 16,
},
[BOARD_DDA04_16] = {
.name = "pci-dda04/16",
.ao_chans = 4,
.ao_bits = 16,
},
[BOARD_DDA08_16] = {
.name = "pci-dda08/16",
.ao_chans = 8,
.ao_bits = 16,
},
};
struct cb_pcidda_private {
unsigned long daqio;
/* bits last written to da calibration register 1 */
unsigned int dac_cal1_bits;
/* current range settings for output channels */
unsigned int ao_range[MAX_AO_CHANNELS];
u16 eeprom_data[EEPROM_SIZE]; /* software copy of board's eeprom */
};
/* lowlevel read from eeprom */
static unsigned int cb_pcidda_serial_in(struct comedi_device *dev)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int value = 0;
int i;
const int value_width = 16; /* number of bits wide values are */
for (i = 1; i <= value_width; i++) {
/* read bits most significant bit first */
if (inw_p(devpriv->daqio + DACALIBRATION1) & SERIAL_OUT_BIT)
value |= 1 << (value_width - i);
}
return value;
}
/* lowlevel write to eeprom/dac */
static void cb_pcidda_serial_out(struct comedi_device *dev, unsigned int value,
unsigned int num_bits)
{
struct cb_pcidda_private *devpriv = dev->private;
int i;
for (i = 1; i <= num_bits; i++) {
/* send bits most significant bit first */
if (value & (1 << (num_bits - i)))
devpriv->dac_cal1_bits |= SERIAL_IN_BIT;
else
devpriv->dac_cal1_bits &= ~SERIAL_IN_BIT;
outw_p(devpriv->dac_cal1_bits, devpriv->daqio + DACALIBRATION1);
}
}
/* reads a 16 bit value from board's eeprom */
static unsigned int cb_pcidda_read_eeprom(struct comedi_device *dev,
unsigned int address)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int i;
unsigned int cal2_bits;
unsigned int value;
/* one caldac for every two dac channels */
const int max_num_caldacs = 4;
/* bits to send to tell eeprom we want to read */
const int read_instruction = 0x6;
const int instruction_length = 3;
const int address_length = 8;
/* send serial output stream to eeprom */
cal2_bits = SELECT_EEPROM_BIT | DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
cal2_bits |= DESELECT_CALDAC_BIT(i);
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
/* tell eeprom we want to read */
cb_pcidda_serial_out(dev, read_instruction, instruction_length);
/* send address we want to read from */
cb_pcidda_serial_out(dev, address, address_length);
value = cb_pcidda_serial_in(dev);
/* deactivate eeprom */
cal2_bits &= ~SELECT_EEPROM_BIT;
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
return value;
}
/* writes to 8 bit calibration dacs */
static void cb_pcidda_write_caldac(struct comedi_device *dev,
unsigned int caldac, unsigned int channel,
unsigned int value)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int cal2_bits;
unsigned int i;
/* caldacs use 3 bit channel specification */
const int num_channel_bits = 3;
const int num_caldac_bits = 8; /* 8 bit calibration dacs */
/* one caldac for every two dac channels */
const int max_num_caldacs = 4;
/* write 3 bit channel */
cb_pcidda_serial_out(dev, channel, num_channel_bits);
/* write 8 bit caldac value */
cb_pcidda_serial_out(dev, value, num_caldac_bits);
/*
* latch stream into appropriate caldac deselect reference dac
*/
cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
cal2_bits |= DESELECT_CALDAC_BIT(i);
/* activate the caldac we want */
cal2_bits &= ~DESELECT_CALDAC_BIT(caldac);
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
/* deactivate caldac */
cal2_bits |= DESELECT_CALDAC_BIT(caldac);
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
}
/* set caldacs to eeprom values for given channel and range */
static void cb_pcidda_calibrate(struct comedi_device *dev, unsigned int channel,
unsigned int range)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int caldac = channel / 2; /* two caldacs per channel */
unsigned int chan = 4 * (channel % 2); /* caldac channel base */
unsigned int index = 2 * range + 12 * channel;
unsigned int offset;
unsigned int gain;
/* save range so we can tell when we need to readjust calibration */
devpriv->ao_range[channel] = range;
/* get values from eeprom data */
offset = devpriv->eeprom_data[0x7 + index];
gain = devpriv->eeprom_data[0x8 + index];
/* set caldacs */
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_COURSE_OFFSET,
(offset >> 8) & 0xff);
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_FINE_OFFSET,
offset & 0xff);
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_COURSE_GAIN,
(gain >> 8) & 0xff);
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_FINE_GAIN,
gain & 0xff);
}
static int cb_pcidda_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int channel = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int ctrl;
unsigned int i;
if (range != devpriv->ao_range[channel])
cb_pcidda_calibrate(dev, channel, range);
ctrl = CB_DDA_DA_CTRL_EN | CB_DDA_DA_CTRL_DAC(channel);
switch (range) {
case 0:
case 3:
ctrl |= CB_DDA_DA_CTRL_RANGE10V;
break;
case 1:
case 4:
ctrl |= CB_DDA_DA_CTRL_RANGE5V;
break;
case 2:
case 5:
ctrl |= CB_DDA_DA_CTRL_RANGE2V5;
break;
}
if (range > 2)
ctrl |= CB_DDA_DA_CTRL_UNIP;
outw(ctrl, devpriv->daqio + CB_DDA_DA_CTRL_REG);
for (i = 0; i < insn->n; i++)
outw(data[i], devpriv->daqio + CB_DDA_DA_DATA_REG(channel));
return insn->n;
}
static int cb_pcidda_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct cb_pcidda_board *board = NULL;
struct cb_pcidda_private *devpriv;
struct comedi_subdevice *s;
int i;
int ret;
if (context < ARRAY_SIZE(cb_pcidda_boards))
board = &cb_pcidda_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
devpriv->daqio = pci_resource_start(pcidev, 3);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->ao_chans;
s->maxdata = (1 << board->ao_bits) - 1;
s->range_table = &cb_pcidda_ranges;
s->insn_write = cb_pcidda_ao_insn_write;
/* two 8255 digital io subdevices */
for (i = 0; i < 2; i++) {
s = &dev->subdevices[1 + i];
ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
if (ret)
return ret;
}
/* Read the caldac eeprom data */
for (i = 0; i < EEPROM_SIZE; i++)
devpriv->eeprom_data[i] = cb_pcidda_read_eeprom(dev, i);
/* set calibrations dacs */
for (i = 0; i < board->ao_chans; i++)
cb_pcidda_calibrate(dev, i, devpriv->ao_range[i]);
return 0;
}
static struct comedi_driver cb_pcidda_driver = {
.driver_name = "cb_pcidda",
.module = THIS_MODULE,
.auto_attach = cb_pcidda_auto_attach,
.detach = comedi_pci_detach,
};
static int cb_pcidda_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &cb_pcidda_driver,
id->driver_data);
}
static const struct pci_device_id cb_pcidda_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0020), BOARD_DDA02_12 },
{ PCI_VDEVICE(CB, 0x0021), BOARD_DDA04_12 },
{ PCI_VDEVICE(CB, 0x0022), BOARD_DDA08_12 },
{ PCI_VDEVICE(CB, 0x0023), BOARD_DDA02_16 },
{ PCI_VDEVICE(CB, 0x0024), BOARD_DDA04_16 },
{ PCI_VDEVICE(CB, 0x0025), BOARD_DDA08_16 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, cb_pcidda_pci_table);
static struct pci_driver cb_pcidda_pci_driver = {
.name = "cb_pcidda",
.id_table = cb_pcidda_pci_table,
.probe = cb_pcidda_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcidda_driver, cb_pcidda_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/cb_pcidda.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_bond.c
* A Comedi driver to 'bond' or merge multiple drivers and devices as one.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
* Copyright (C) 2005 Calin A. Culianu <[email protected]>
*/
/*
* Driver: comedi_bond
* Description: A driver to 'bond' (merge) multiple subdevices from multiple
* devices together as one.
* Devices:
* Author: ds
* Updated: Mon, 10 Oct 00:18:25 -0500
* Status: works
*
* This driver allows you to 'bond' (merge) multiple comedi subdevices
* (coming from possibly difference boards and/or drivers) together. For
* example, if you had a board with 2 different DIO subdevices, and
* another with 1 DIO subdevice, you could 'bond' them with this driver
* so that they look like one big fat DIO subdevice. This makes writing
* applications slightly easier as you don't have to worry about managing
* different subdevices in the application -- you just worry about
* indexing one linear array of channel id's.
*
* Right now only DIO subdevices are supported as that's the personal itch
* I am scratching with this driver. If you want to add support for AI and AO
* subdevs, go right on ahead and do so!
*
* Commands aren't supported -- although it would be cool if they were.
*
* Configuration Options:
* List of comedi-minors to bond. All subdevices of the same type
* within each minor will be concatenated together in the order given here.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/comedi.h>
#include <linux/comedi/comedilib.h>
#include <linux/comedi/comedidev.h>
struct bonded_device {
struct comedi_device *dev;
unsigned int minor;
unsigned int subdev;
unsigned int nchans;
};
struct comedi_bond_private {
char name[256];
struct bonded_device **devs;
unsigned int ndevs;
unsigned int nchans;
};
static int bonding_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct comedi_bond_private *devpriv = dev->private;
unsigned int n_left, n_done, base_chan;
unsigned int write_mask, data_bits;
struct bonded_device **devs;
write_mask = data[0];
data_bits = data[1];
base_chan = CR_CHAN(insn->chanspec);
/* do a maximum of 32 channels, starting from base_chan. */
n_left = devpriv->nchans - base_chan;
if (n_left > 32)
n_left = 32;
n_done = 0;
devs = devpriv->devs;
do {
struct bonded_device *bdev = *devs++;
if (base_chan < bdev->nchans) {
/* base channel falls within bonded device */
unsigned int b_chans, b_mask, b_write_mask, b_data_bits;
int ret;
/*
* Get num channels to do for bonded device and set
* up mask and data bits for bonded device.
*/
b_chans = bdev->nchans - base_chan;
if (b_chans > n_left)
b_chans = n_left;
b_mask = (b_chans < 32) ? ((1 << b_chans) - 1)
: 0xffffffff;
b_write_mask = (write_mask >> n_done) & b_mask;
b_data_bits = (data_bits >> n_done) & b_mask;
/* Read/Write the new digital lines. */
ret = comedi_dio_bitfield2(bdev->dev, bdev->subdev,
b_write_mask, &b_data_bits,
base_chan);
if (ret < 0)
return ret;
/* Place read bits into data[1]. */
data[1] &= ~(b_mask << n_done);
data[1] |= (b_data_bits & b_mask) << n_done;
/*
* Set up for following bonded device (if still have
* channels to read/write).
*/
base_chan = 0;
n_done += b_chans;
n_left -= b_chans;
} else {
/* Skip bonded devices before base channel. */
base_chan -= bdev->nchans;
}
} while (n_left);
return insn->n;
}
static int bonding_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct comedi_bond_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int ret;
struct bonded_device *bdev;
struct bonded_device **devs;
/*
* Locate bonded subdevice and adjust channel.
*/
devs = devpriv->devs;
for (bdev = *devs++; chan >= bdev->nchans; bdev = *devs++)
chan -= bdev->nchans;
/*
* The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
* configuration instruction INSN_CONFIG_DIO_OUTPUT,
* INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_QUERY.
*
* Note that INSN_CONFIG_DIO_OUTPUT == COMEDI_OUTPUT,
* and INSN_CONFIG_DIO_INPUT == COMEDI_INPUT. This is deliberate ;)
*/
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
case INSN_CONFIG_DIO_INPUT:
ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, data[0]);
break;
case INSN_CONFIG_DIO_QUERY:
ret = comedi_dio_get_config(bdev->dev, bdev->subdev, chan,
&data[1]);
break;
default:
ret = -EINVAL;
break;
}
if (ret >= 0)
ret = insn->n;
return ret;
}
static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_bond_private *devpriv = dev->private;
DECLARE_BITMAP(devs_opened, COMEDI_NUM_BOARD_MINORS);
int i;
memset(&devs_opened, 0, sizeof(devs_opened));
devpriv->name[0] = 0;
/*
* Loop through all comedi devices specified on the command-line,
* building our device list.
*/
for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
char file[sizeof("/dev/comediXXXXXX")];
int minor = it->options[i];
struct comedi_device *d;
int sdev = -1, nchans;
struct bonded_device *bdev;
struct bonded_device **devs;
if (minor < 0 || minor >= COMEDI_NUM_BOARD_MINORS) {
dev_err(dev->class_dev,
"Minor %d is invalid!\n", minor);
return -EINVAL;
}
if (minor == dev->minor) {
dev_err(dev->class_dev,
"Cannot bond this driver to itself!\n");
return -EINVAL;
}
if (test_and_set_bit(minor, devs_opened)) {
dev_err(dev->class_dev,
"Minor %d specified more than once!\n", minor);
return -EINVAL;
}
snprintf(file, sizeof(file), "/dev/comedi%d", minor);
file[sizeof(file) - 1] = 0;
d = comedi_open(file);
if (!d) {
dev_err(dev->class_dev,
"Minor %u could not be opened\n", minor);
return -ENODEV;
}
/* Do DIO, as that's all we support now.. */
while ((sdev = comedi_find_subdevice_by_type(d, COMEDI_SUBD_DIO,
sdev + 1)) > -1) {
nchans = comedi_get_n_channels(d, sdev);
if (nchans <= 0) {
dev_err(dev->class_dev,
"comedi_get_n_channels() returned %d on minor %u subdev %d!\n",
nchans, minor, sdev);
return -EINVAL;
}
bdev = kmalloc(sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
bdev->dev = d;
bdev->minor = minor;
bdev->subdev = sdev;
bdev->nchans = nchans;
devpriv->nchans += nchans;
/*
* Now put bdev pointer at end of devpriv->devs array
* list..
*/
/* ergh.. ugly.. we need to realloc :( */
devs = krealloc(devpriv->devs,
(devpriv->ndevs + 1) * sizeof(*devs),
GFP_KERNEL);
if (!devs) {
dev_err(dev->class_dev,
"Could not allocate memory. Out of memory?\n");
kfree(bdev);
return -ENOMEM;
}
devpriv->devs = devs;
devpriv->devs[devpriv->ndevs++] = bdev;
{
/* Append dev:subdev to devpriv->name */
char buf[20];
snprintf(buf, sizeof(buf), "%u:%u ",
bdev->minor, bdev->subdev);
strlcat(devpriv->name, buf,
sizeof(devpriv->name));
}
}
}
if (!devpriv->nchans) {
dev_err(dev->class_dev, "No channels found!\n");
return -EINVAL;
}
return 0;
}
static int bonding_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_bond_private *devpriv;
struct comedi_subdevice *s;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
/*
* Setup our bonding from config params.. sets up our private struct..
*/
ret = do_dev_config(dev, it);
if (ret)
return ret;
dev->board_name = devpriv->name;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = devpriv->nchans;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = bonding_dio_insn_bits;
s->insn_config = bonding_dio_insn_config;
dev_info(dev->class_dev,
"%s: %s attached, %u channels from %u devices\n",
dev->driver->driver_name, dev->board_name,
devpriv->nchans, devpriv->ndevs);
return 0;
}
static void bonding_detach(struct comedi_device *dev)
{
struct comedi_bond_private *devpriv = dev->private;
if (devpriv && devpriv->devs) {
DECLARE_BITMAP(devs_closed, COMEDI_NUM_BOARD_MINORS);
memset(&devs_closed, 0, sizeof(devs_closed));
while (devpriv->ndevs--) {
struct bonded_device *bdev;
bdev = devpriv->devs[devpriv->ndevs];
if (!bdev)
continue;
if (!test_and_set_bit(bdev->minor, devs_closed))
comedi_close(bdev->dev);
kfree(bdev);
}
kfree(devpriv->devs);
devpriv->devs = NULL;
}
}
static struct comedi_driver bonding_driver = {
.driver_name = "comedi_bond",
.module = THIS_MODULE,
.attach = bonding_attach,
.detach = bonding_detach,
};
module_comedi_driver(bonding_driver);
MODULE_AUTHOR("Calin A. Culianu");
MODULE_DESCRIPTION("comedi_bond: A driver for COMEDI to bond multiple COMEDI devices together as one.");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/comedi_bond.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/das800.c
* Driver for Keitley das800 series boards and compatibles
* Copyright (C) 2000 Frank Mori Hess <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: das800
* Description: Keithley Metrabyte DAS800 (& compatibles)
* Author: Frank Mori Hess <[email protected]>
* Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
* DAS-802 (das-802),
* [Measurement Computing] CIO-DAS800 (cio-das800),
* CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
* CIO-DAS802/16 (cio-das802/16)
* Status: works, cio-das802/16 untested - email me if you have tested it
*
* Configuration options:
* [0] - I/O port base address
* [1] - IRQ (optional, required for timed or externally triggered conversions)
*
* Notes:
* IRQ can be omitted, although the cmd interface will not work without it.
*
* All entries in the channel/gain list must use the same gain and be
* consecutive channels counting upwards in channel number (these are
* hardware limitations.)
*
* I've never tested the gain setting stuff since I only have a
* DAS-800 board with fixed gain.
*
* The cio-das802/16 does not have a fifo-empty status bit! Therefore
* only fifo-half-full transfers are possible with this card.
*
* cmd triggers supported:
* start_src: TRIG_NOW | TRIG_EXT
* scan_begin_src: TRIG_FOLLOW
* scan_end_src: TRIG_COUNT
* convert_src: TRIG_TIMER | TRIG_EXT
* stop_src: TRIG_NONE | TRIG_COUNT
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8254.h>
#define N_CHAN_AI 8 /* number of analog input channels */
/* Registers for the das800 */
#define DAS800_LSB 0
#define FIFO_EMPTY 0x1
#define FIFO_OVF 0x2
#define DAS800_MSB 1
#define DAS800_CONTROL1 2
#define CONTROL1_INTE 0x8
#define DAS800_CONV_CONTROL 2
#define ITE 0x1
#define CASC 0x2
#define DTEN 0x4
#define IEOC 0x8
#define EACS 0x10
#define CONV_HCEN 0x80
#define DAS800_SCAN_LIMITS 2
#define DAS800_STATUS 2
#define IRQ 0x8
#define BUSY 0x80
#define DAS800_GAIN 3
#define CIO_FFOV 0x8 /* cio-das802/16 fifo overflow */
#define CIO_ENHF 0x90 /* cio-das802/16 fifo half full int ena */
#define CONTROL1 0x80
#define CONV_CONTROL 0xa0
#define SCAN_LIMITS 0xc0
#define ID 0xe0
#define DAS800_8254 4
#define DAS800_STATUS2 7
#define STATUS2_HCEN 0x80
#define STATUS2_INTE 0X20
#define DAS800_ID 7
#define DAS802_16_HALF_FIFO_SZ 128
struct das800_board {
const char *name;
int ai_speed;
const struct comedi_lrange *ai_range;
int resolution;
};
static const struct comedi_lrange range_das801_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(0.5),
UNI_RANGE(1),
BIP_RANGE(0.05),
UNI_RANGE(0.1),
BIP_RANGE(0.01),
UNI_RANGE(0.02)
}
};
static const struct comedi_lrange range_cio_das801_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(0.5),
UNI_RANGE(1),
BIP_RANGE(0.05),
UNI_RANGE(0.1),
BIP_RANGE(0.005),
UNI_RANGE(0.01)
}
};
static const struct comedi_lrange range_das802_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(2.5),
UNI_RANGE(5),
BIP_RANGE(1.25),
UNI_RANGE(2.5),
BIP_RANGE(0.625),
UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_das80216_ai = {
8, {
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(5),
UNI_RANGE(5),
BIP_RANGE(2.5),
UNI_RANGE(2.5),
BIP_RANGE(1.25),
UNI_RANGE(1.25)
}
};
enum das800_boardinfo {
BOARD_DAS800,
BOARD_CIODAS800,
BOARD_DAS801,
BOARD_CIODAS801,
BOARD_DAS802,
BOARD_CIODAS802,
BOARD_CIODAS80216,
};
static const struct das800_board das800_boards[] = {
[BOARD_DAS800] = {
.name = "das-800",
.ai_speed = 25000,
.ai_range = &range_bipolar5,
.resolution = 12,
},
[BOARD_CIODAS800] = {
.name = "cio-das800",
.ai_speed = 20000,
.ai_range = &range_bipolar5,
.resolution = 12,
},
[BOARD_DAS801] = {
.name = "das-801",
.ai_speed = 25000,
.ai_range = &range_das801_ai,
.resolution = 12,
},
[BOARD_CIODAS801] = {
.name = "cio-das801",
.ai_speed = 20000,
.ai_range = &range_cio_das801_ai,
.resolution = 12,
},
[BOARD_DAS802] = {
.name = "das-802",
.ai_speed = 25000,
.ai_range = &range_das802_ai,
.resolution = 12,
},
[BOARD_CIODAS802] = {
.name = "cio-das802",
.ai_speed = 20000,
.ai_range = &range_das802_ai,
.resolution = 12,
},
[BOARD_CIODAS80216] = {
.name = "cio-das802/16",
.ai_speed = 10000,
.ai_range = &range_das80216_ai,
.resolution = 16,
},
};
struct das800_private {
unsigned int do_bits; /* digital output bits */
};
static void das800_ind_write(struct comedi_device *dev,
unsigned int val, unsigned int reg)
{
/*
* Select dev->iobase + 2 to be desired register
* then write to that register.
*/
outb(reg, dev->iobase + DAS800_GAIN);
outb(val, dev->iobase + 2);
}
static unsigned int das800_ind_read(struct comedi_device *dev, unsigned int reg)
{
/*
* Select dev->iobase + 7 to be desired register
* then read from that register.
*/
outb(reg, dev->iobase + DAS800_GAIN);
return inb(dev->iobase + 7);
}
static void das800_enable(struct comedi_device *dev)
{
const struct das800_board *board = dev->board_ptr;
struct das800_private *devpriv = dev->private;
unsigned long irq_flags;
spin_lock_irqsave(&dev->spinlock, irq_flags);
/* enable fifo-half full interrupts for cio-das802/16 */
if (board->resolution == 16)
outb(CIO_ENHF, dev->iobase + DAS800_GAIN);
/* enable hardware triggering */
das800_ind_write(dev, CONV_HCEN, CONV_CONTROL);
/* enable card's interrupt */
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits, CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
}
static void das800_disable(struct comedi_device *dev)
{
unsigned long irq_flags;
spin_lock_irqsave(&dev->spinlock, irq_flags);
/* disable hardware triggering of conversions */
das800_ind_write(dev, 0x0, CONV_CONTROL);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
}
static int das800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
das800_disable(dev);
return 0;
}
static int das800_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
int i;
for (i = 1; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned int range = CR_RANGE(cmd->chanlist[i]);
if (chan != (chan0 + i) % s->n_chan) {
dev_dbg(dev->class_dev,
"chanlist must be consecutive, counting upwards\n");
return -EINVAL;
}
if (range != range0) {
dev_dbg(dev->class_dev,
"chanlist must all have the same gain\n");
return -EINVAL;
}
}
return 0;
}
static int das800_ai_do_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct das800_board *board = dev->board_ptr;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->convert_src,
TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->convert_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
board->ai_speed);
}
err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
unsigned int arg = cmd->convert_arg;
comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
if (cmd->chanlist && cmd->chanlist_len > 0)
err |= das800_ai_check_chanlist(dev, s, cmd);
if (err)
return 5;
return 0;
}
static int das800_ai_do_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct das800_board *board = dev->board_ptr;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int gain = CR_RANGE(cmd->chanlist[0]);
unsigned int start_chan = CR_CHAN(cmd->chanlist[0]);
unsigned int end_chan = (start_chan + cmd->chanlist_len - 1) % 8;
unsigned int scan_chans = (end_chan << 3) | start_chan;
int conv_bits;
unsigned long irq_flags;
das800_disable(dev);
spin_lock_irqsave(&dev->spinlock, irq_flags);
/* set scan limits */
das800_ind_write(dev, scan_chans, SCAN_LIMITS);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
/* set gain */
if (board->resolution == 12 && gain > 0)
gain += 0x7;
gain &= 0xf;
outb(gain, dev->iobase + DAS800_GAIN);
/* enable auto channel scan, send interrupts on end of conversion
* and set clock source to internal or external
*/
conv_bits = 0;
conv_bits |= EACS | IEOC;
if (cmd->start_src == TRIG_EXT)
conv_bits |= DTEN;
if (cmd->convert_src == TRIG_TIMER) {
conv_bits |= CASC | ITE;
comedi_8254_update_divisors(dev->pacer);
comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
}
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, conv_bits, CONV_CONTROL);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
das800_enable(dev);
return 0;
}
static unsigned int das800_ai_get_sample(struct comedi_device *dev)
{
unsigned int lsb = inb(dev->iobase + DAS800_LSB);
unsigned int msb = inb(dev->iobase + DAS800_MSB);
return (msb << 8) | lsb;
}
static irqreturn_t das800_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct das800_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async;
struct comedi_cmd *cmd;
unsigned long irq_flags;
unsigned int status;
unsigned short val;
bool fifo_empty;
bool fifo_overflow;
int i;
status = inb(dev->iobase + DAS800_STATUS);
if (!(status & IRQ))
return IRQ_NONE;
if (!dev->attached)
return IRQ_HANDLED;
async = s->async;
cmd = &async->cmd;
spin_lock_irqsave(&dev->spinlock, irq_flags);
status = das800_ind_read(dev, CONTROL1) & STATUS2_HCEN;
/*
* Don't release spinlock yet since we want to make sure
* no one else disables hardware conversions.
*/
/* if hardware conversions are not enabled, then quit */
if (status == 0) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
return IRQ_HANDLED;
}
for (i = 0; i < DAS802_16_HALF_FIFO_SZ; i++) {
val = das800_ai_get_sample(dev);
if (s->maxdata == 0x0fff) {
fifo_empty = !!(val & FIFO_EMPTY);
fifo_overflow = !!(val & FIFO_OVF);
} else {
/* cio-das802/16 has no fifo empty status bit */
fifo_empty = false;
fifo_overflow = !!(inb(dev->iobase + DAS800_GAIN) &
CIO_FFOV);
}
if (fifo_empty || fifo_overflow)
break;
if (s->maxdata == 0x0fff)
val >>= 4; /* 12-bit sample */
val &= s->maxdata;
comedi_buf_write_samples(s, &val, 1);
if (cmd->stop_src == TRIG_COUNT &&
async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
break;
}
}
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
/*
* Re-enable card's interrupt.
* We already have spinlock, so indirect addressing is safe
*/
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
} else {
/* otherwise, stop taking data */
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
das800_disable(dev);
}
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int das800_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + DAS800_STATUS);
if ((status & BUSY) == 0)
return 0;
return -EBUSY;
}
static int das800_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct das800_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned long irq_flags;
unsigned int val;
int ret;
int i;
das800_disable(dev);
/* set multiplexer */
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, chan | devpriv->do_bits, CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
/* set gain / range */
if (s->maxdata == 0x0fff && range)
range += 0x7;
range &= 0xf;
outb(range, dev->iobase + DAS800_GAIN);
udelay(5);
for (i = 0; i < insn->n; i++) {
/* trigger conversion */
outb_p(0, dev->iobase + DAS800_MSB);
ret = comedi_timeout(dev, s, insn, das800_ai_eoc, 0);
if (ret)
return ret;
val = das800_ai_get_sample(dev);
if (s->maxdata == 0x0fff)
val >>= 4; /* 12-bit sample */
data[i] = val & s->maxdata;
}
return insn->n;
}
static int das800_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = (inb(dev->iobase + DAS800_STATUS) >> 4) & 0x7;
return insn->n;
}
static int das800_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct das800_private *devpriv = dev->private;
unsigned long irq_flags;
if (comedi_dio_update_state(s, data)) {
devpriv->do_bits = s->state << 4;
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
}
data[1] = s->state;
return insn->n;
}
static const struct das800_board *das800_probe(struct comedi_device *dev)
{
const struct das800_board *board = dev->board_ptr;
int index = board ? board - das800_boards : -EINVAL;
int id_bits;
unsigned long irq_flags;
/*
* The dev->board_ptr will be set by comedi_device_attach() if the
* board name provided by the user matches a board->name in this
* driver. If so, this function sanity checks the id_bits to verify
* that the board is correct.
*
* If the dev->board_ptr is not set, the user is trying to attach
* an unspecified board to this driver. In this case the id_bits
* are used to 'probe' for the correct dev->board_ptr.
*/
spin_lock_irqsave(&dev->spinlock, irq_flags);
id_bits = das800_ind_read(dev, ID) & 0x3;
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
switch (id_bits) {
case 0x0:
if (index == BOARD_DAS800 || index == BOARD_CIODAS800)
return board;
index = BOARD_DAS800;
break;
case 0x2:
if (index == BOARD_DAS801 || index == BOARD_CIODAS801)
return board;
index = BOARD_DAS801;
break;
case 0x3:
if (index == BOARD_DAS802 || index == BOARD_CIODAS802 ||
index == BOARD_CIODAS80216)
return board;
index = BOARD_DAS802;
break;
default:
dev_dbg(dev->class_dev, "Board model: 0x%x (unknown)\n",
id_bits);
return NULL;
}
dev_dbg(dev->class_dev, "Board model (probed): %s series\n",
das800_boards[index].name);
return &das800_boards[index];
}
static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das800_board *board;
struct das800_private *devpriv;
struct comedi_subdevice *s;
unsigned int irq = it->options[1];
unsigned long irq_flags;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], 0x8);
if (ret)
return ret;
board = das800_probe(dev);
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
if (irq > 1 && irq <= 7) {
ret = request_irq(irq, das800_interrupt, 0, "das800",
dev);
if (ret == 0)
dev->irq = irq;
}
dev->pacer = comedi_8254_init(dev->iobase + DAS800_8254,
I8254_OSC_BASE_1MHZ, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 8;
s->maxdata = (1 << board->resolution) - 1;
s->range_table = board->ai_range;
s->insn_read = das800_ai_insn_read;
if (dev->irq) {
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 8;
s->do_cmdtest = das800_ai_do_cmdtest;
s->do_cmd = das800_ai_do_cmd;
s->cancel = das800_cancel;
}
/* Digital Input subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 3;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = das800_di_insn_bits;
/* Digital Output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = das800_do_insn_bits;
das800_disable(dev);
/* initialize digital out channels */
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits, CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
return 0;
};
static struct comedi_driver driver_das800 = {
.driver_name = "das800",
.module = THIS_MODULE,
.attach = das800_attach,
.detach = comedi_legacy_detach,
.num_names = ARRAY_SIZE(das800_boards),
.board_name = &das800_boards[0].name,
.offset = sizeof(struct das800_board),
};
module_comedi_driver(driver_das800);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/das800.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for CIO-DAS16/M1
* Author: Frank Mori Hess, based on code from the das16 driver.
* Copyright (C) 2001 Frank Mori Hess <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: das16m1
* Description: CIO-DAS16/M1
* Author: Frank Mori Hess <[email protected]>
* Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
* Status: works
*
* This driver supports a single board - the CIO-DAS16/M1. As far as I know,
* there are no other boards that have the same register layout. Even the
* CIO-DAS16/M1/16 is significantly different.
*
* I was _barely_ able to reach the full 1 MHz capability of this board, using
* a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd
* and use rtlinux or RTAI). The board can't do dma, so the bottleneck is
* pulling the data across the ISA bus. I timed the interrupt handler, and it
* took my computer ~470 microseconds to pull 512 samples from the board. So
* at 1 Mhz sampling rate, expect your CPU to be spending almost all of its
* time in the interrupt handler.
*
* This board has some unusual restrictions for its channel/gain list. If the
* list has 2 or more channels in it, then two conditions must be satisfied:
* (1) - even/odd channels must appear at even/odd indices in the list
* (2) - the list must have an even number of entries.
*
* Configuration options:
* [0] - base io address
* [1] - irq (optional, but you probably want it)
*
* irq can be omitted, although the cmd interface will not work without it.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8255.h>
#include <linux/comedi/comedi_8254.h>
/*
* Register map (dev->iobase)
*/
#define DAS16M1_AI_REG 0x00 /* 16-bit register */
#define DAS16M1_AI_TO_CHAN(x) (((x) >> 0) & 0xf)
#define DAS16M1_AI_TO_SAMPLE(x) (((x) >> 4) & 0xfff)
#define DAS16M1_CS_REG 0x02
#define DAS16M1_CS_EXT_TRIG BIT(0)
#define DAS16M1_CS_OVRUN BIT(5)
#define DAS16M1_CS_IRQDATA BIT(7)
#define DAS16M1_DI_REG 0x03
#define DAS16M1_DO_REG 0x03
#define DAS16M1_CLR_INTR_REG 0x04
#define DAS16M1_INTR_CTRL_REG 0x05
#define DAS16M1_INTR_CTRL_PACER(x) (((x) & 0x3) << 0)
#define DAS16M1_INTR_CTRL_PACER_EXT DAS16M1_INTR_CTRL_PACER(2)
#define DAS16M1_INTR_CTRL_PACER_INT DAS16M1_INTR_CTRL_PACER(3)
#define DAS16M1_INTR_CTRL_PACER_MASK DAS16M1_INTR_CTRL_PACER(3)
#define DAS16M1_INTR_CTRL_IRQ(x) (((x) & 0x7) << 4)
#define DAS16M1_INTR_CTRL_INTE BIT(7)
#define DAS16M1_Q_ADDR_REG 0x06
#define DAS16M1_Q_REG 0x07
#define DAS16M1_Q_CHAN(x) (((x) & 0x7) << 0)
#define DAS16M1_Q_RANGE(x) (((x) & 0xf) << 4)
#define DAS16M1_8254_IOBASE1 0x08
#define DAS16M1_8254_IOBASE2 0x0c
#define DAS16M1_8255_IOBASE 0x400
#define DAS16M1_8254_IOBASE3 0x404
#define DAS16M1_SIZE2 0x08
#define DAS16M1_AI_FIFO_SZ 1024 /* # samples */
static const struct comedi_lrange range_das16m1 = {
9, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
BIP_RANGE(10)
}
};
struct das16m1_private {
struct comedi_8254 *counter;
unsigned int intr_ctrl;
unsigned int adc_count;
u16 initial_hw_count;
unsigned short ai_buffer[DAS16M1_AI_FIFO_SZ];
unsigned long extra_iobase;
};
static void das16m1_ai_set_queue(struct comedi_device *dev,
unsigned int *chanspec, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i++) {
unsigned int chan = CR_CHAN(chanspec[i]);
unsigned int range = CR_RANGE(chanspec[i]);
outb(i, dev->iobase + DAS16M1_Q_ADDR_REG);
outb(DAS16M1_Q_CHAN(chan) | DAS16M1_Q_RANGE(range),
dev->iobase + DAS16M1_Q_REG);
}
}
static void das16m1_ai_munge(struct comedi_device *dev,
struct comedi_subdevice *s,
void *data, unsigned int num_bytes,
unsigned int start_chan_index)
{
unsigned short *array = data;
unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
/*
* The fifo values have the channel number in the lower 4-bits and
* the sample in the upper 12-bits. This just shifts the values
* to remove the channel numbers.
*/
for (i = 0; i < nsamples; i++)
array[i] = DAS16M1_AI_TO_SAMPLE(array[i]);
}
static int das16m1_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int i;
if (cmd->chanlist_len == 1)
return 0;
if ((cmd->chanlist_len % 2) != 0) {
dev_dbg(dev->class_dev,
"chanlist must be of even length or length 1\n");
return -EINVAL;
}
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
if ((i % 2) != (chan % 2)) {
dev_dbg(dev->class_dev,
"even/odd channels must go have even/odd chanlist indices\n");
return -EINVAL;
}
}
return 0;
}
static int das16m1_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->convert_src,
TRIG_TIMER | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->convert_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
if (cmd->convert_src == TRIG_TIMER)
err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 1000);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up arguments */
if (cmd->convert_src == TRIG_TIMER) {
unsigned int arg = cmd->convert_arg;
comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
if (cmd->chanlist && cmd->chanlist_len > 0)
err |= das16m1_ai_check_chanlist(dev, s, cmd);
if (err)
return 5;
return 0;
}
static int das16m1_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct das16m1_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int byte;
/* set software count */
devpriv->adc_count = 0;
/*
* Initialize lower half of hardware counter, used to determine how
* many samples are in fifo. Value doesn't actually load into counter
* until counter's next clock (the next a/d conversion).
*/
comedi_8254_set_mode(devpriv->counter, 1, I8254_MODE2 | I8254_BINARY);
comedi_8254_write(devpriv->counter, 1, 0);
/*
* Remember current reading of counter so we know when counter has
* actually been loaded.
*/
devpriv->initial_hw_count = comedi_8254_read(devpriv->counter, 1);
das16m1_ai_set_queue(dev, cmd->chanlist, cmd->chanlist_len);
/* enable interrupts and set internal pacer counter mode and counts */
devpriv->intr_ctrl &= ~DAS16M1_INTR_CTRL_PACER_MASK;
if (cmd->convert_src == TRIG_TIMER) {
comedi_8254_update_divisors(dev->pacer);
comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_INT;
} else { /* TRIG_EXT */
devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_EXT;
}
/* set control & status register */
byte = 0;
/*
* If we are using external start trigger (also board dislikes having
* both start and conversion triggers external simultaneously).
*/
if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
byte |= DAS16M1_CS_EXT_TRIG;
outb(byte, dev->iobase + DAS16M1_CS_REG);
/* clear interrupt */
outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_INTE;
outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
static int das16m1_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct das16m1_private *devpriv = dev->private;
/* disable interrupts and pacer */
devpriv->intr_ctrl &= ~(DAS16M1_INTR_CTRL_INTE |
DAS16M1_INTR_CTRL_PACER_MASK);
outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
static int das16m1_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + DAS16M1_CS_REG);
if (status & DAS16M1_CS_IRQDATA)
return 0;
return -EBUSY;
}
static int das16m1_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int ret;
int i;
das16m1_ai_set_queue(dev, &insn->chanspec, 1);
for (i = 0; i < insn->n; i++) {
unsigned short val;
/* clear interrupt */
outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
/* trigger conversion */
outb(0, dev->iobase + DAS16M1_AI_REG);
ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
if (ret)
return ret;
val = inw(dev->iobase + DAS16M1_AI_REG);
data[i] = DAS16M1_AI_TO_SAMPLE(val);
}
return insn->n;
}
static int das16m1_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inb(dev->iobase + DAS16M1_DI_REG) & 0xf;
return insn->n;
}
static int das16m1_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DAS16M1_DO_REG);
data[1] = s->state;
return insn->n;
}
static void das16m1_handler(struct comedi_device *dev, unsigned int status)
{
struct das16m1_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
u16 num_samples;
u16 hw_counter;
/* figure out how many samples are in fifo */
hw_counter = comedi_8254_read(devpriv->counter, 1);
/*
* Make sure hardware counter reading is not bogus due to initial
* value not having been loaded yet.
*/
if (devpriv->adc_count == 0 &&
hw_counter == devpriv->initial_hw_count) {
num_samples = 0;
} else {
/*
* The calculation of num_samples looks odd, but it uses the
* following facts. 16 bit hardware counter is initialized with
* value of zero (which really means 0x1000). The counter
* decrements by one on each conversion (when the counter
* decrements from zero it goes to 0xffff). num_samples is a
* 16 bit variable, so it will roll over in a similar fashion
* to the hardware counter. Work it out, and this is what you
* get.
*/
num_samples = -hw_counter - devpriv->adc_count;
}
/* check if we only need some of the points */
if (cmd->stop_src == TRIG_COUNT) {
if (num_samples > cmd->stop_arg * cmd->chanlist_len)
num_samples = cmd->stop_arg * cmd->chanlist_len;
}
/* make sure we don't try to get too many points if fifo has overrun */
if (num_samples > DAS16M1_AI_FIFO_SZ)
num_samples = DAS16M1_AI_FIFO_SZ;
insw(dev->iobase, devpriv->ai_buffer, num_samples);
comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
devpriv->adc_count += num_samples;
if (cmd->stop_src == TRIG_COUNT) {
if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) {
/* end of acquisition */
async->events |= COMEDI_CB_EOA;
}
}
/*
* This probably won't catch overruns since the card doesn't generate
* overrun interrupts, but we might as well try.
*/
if (status & DAS16M1_CS_OVRUN) {
async->events |= COMEDI_CB_ERROR;
dev_err(dev->class_dev, "fifo overflow\n");
}
comedi_handle_events(dev, s);
}
static int das16m1_ai_poll(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned long flags;
unsigned int status;
/* prevent race with interrupt handler */
spin_lock_irqsave(&dev->spinlock, flags);
status = inb(dev->iobase + DAS16M1_CS_REG);
das16m1_handler(dev, status);
spin_unlock_irqrestore(&dev->spinlock, flags);
return comedi_buf_n_bytes_ready(s);
}
static irqreturn_t das16m1_interrupt(int irq, void *d)
{
int status;
struct comedi_device *dev = d;
if (!dev->attached) {
dev_err(dev->class_dev, "premature interrupt\n");
return IRQ_HANDLED;
}
/* prevent race with comedi_poll() */
spin_lock(&dev->spinlock);
status = inb(dev->iobase + DAS16M1_CS_REG);
if ((status & (DAS16M1_CS_IRQDATA | DAS16M1_CS_OVRUN)) == 0) {
dev_err(dev->class_dev, "spurious interrupt\n");
spin_unlock(&dev->spinlock);
return IRQ_NONE;
}
das16m1_handler(dev, status);
/* clear interrupt */
outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
}
static int das16m1_irq_bits(unsigned int irq)
{
switch (irq) {
case 10:
return 0x0;
case 11:
return 0x1;
case 12:
return 0x2;
case 15:
return 0x3;
case 2:
return 0x4;
case 3:
return 0x5;
case 5:
return 0x6;
case 7:
return 0x7;
default:
return 0x0;
}
}
static int das16m1_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct das16m1_private *devpriv;
struct comedi_subdevice *s;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
/* Request an additional region for the 8255 and 3rd 8254 */
ret = __comedi_request_region(dev, dev->iobase + DAS16M1_8255_IOBASE,
DAS16M1_SIZE2);
if (ret)
return ret;
devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
if ((1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], das16m1_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE2,
I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE1,
0, I8254_IO8, 0);
if (!devpriv->counter)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = 8;
s->maxdata = 0x0fff;
s->range_table = &range_das16m1;
s->insn_read = das16m1_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 256;
s->do_cmdtest = das16m1_ai_cmdtest;
s->do_cmd = das16m1_ai_cmd;
s->cancel = das16m1_ai_cancel;
s->poll = das16m1_ai_poll;
s->munge = das16m1_ai_munge;
}
/* Digital Input subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = das16m1_di_insn_bits;
/* Digital Output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = das16m1_do_insn_bits;
/* Digital I/O subdevice (8255) */
s = &dev->subdevices[3];
ret = subdev_8255_init(dev, s, NULL, DAS16M1_8255_IOBASE);
if (ret)
return ret;
/* initialize digital output lines */
outb(0, dev->iobase + DAS16M1_DO_REG);
/* set the interrupt level */
devpriv->intr_ctrl = DAS16M1_INTR_CTRL_IRQ(das16m1_irq_bits(dev->irq));
outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
static void das16m1_detach(struct comedi_device *dev)
{
struct das16m1_private *devpriv = dev->private;
if (devpriv) {
if (devpriv->extra_iobase)
release_region(devpriv->extra_iobase, DAS16M1_SIZE2);
kfree(devpriv->counter);
}
comedi_legacy_detach(dev);
}
static struct comedi_driver das16m1_driver = {
.driver_name = "das16m1",
.module = THIS_MODULE,
.attach = das16m1_attach,
.detach = das16m1_detach,
};
module_comedi_driver(das16m1_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for CIO-DAS16/M1 ISA cards");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/das16m1.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/8255.c
* Driver for 8255
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <[email protected]>
*/
/*
* Driver: 8255
* Description: generic 8255 support
* Devices: [standard] 8255 (8255)
* Author: ds
* Status: works
* Updated: Fri, 7 Jun 2002 12:56:45 -0700
*
* The classic in digital I/O. The 8255 appears in Comedi as a single
* digital I/O subdevice with 24 channels. The channel 0 corresponds
* to the 8255's port A, bit 0; channel 23 corresponds to port C, bit
* 7. Direction configuration is done in blocks, with channels 0-7,
* 8-15, 16-19, and 20-23 making up the 4 blocks. The only 8255 mode
* supported is mode 0.
*
* You should enable compilation this driver if you plan to use a board
* that has an 8255 chip. For multifunction boards, the main driver will
* configure the 8255 subdevice automatically.
*
* This driver also works independently with ISA and PCI cards that
* directly map the 8255 registers to I/O ports, including cards with
* multiple 8255 chips. To configure the driver for such a card, the
* option list should be a list of the I/O port bases for each of the
* 8255 chips. For example,
*
* comedi_config /dev/comedi0 8255 0x200,0x204,0x208,0x20c
*
* Note that most PCI 8255 boards do NOT work with this driver, and
* need a separate driver as a wrapper. For those that do work, the
* I/O port base address can be found in the output of 'lspci -v'.
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8255.h>
static int dev_8255_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned long iobase;
int ret;
int i;
for (i = 0; i < COMEDI_NDEVCONFOPTS; i++) {
iobase = it->options[i];
if (!iobase)
break;
}
if (i == 0) {
dev_warn(dev->class_dev, "no devices specified\n");
return -EINVAL;
}
ret = comedi_alloc_subdevices(dev, i);
if (ret)
return ret;
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
iobase = it->options[i];
/*
* __comedi_request_region() does not set dev->iobase.
*
* For 8255 devices that are manually attached using
* comedi_config, the 'iobase' is the actual I/O port
* base address of the chip.
*/
ret = __comedi_request_region(dev, iobase, I8255_SIZE);
if (ret) {
s->type = COMEDI_SUBD_UNUSED;
} else {
ret = subdev_8255_init(dev, s, NULL, iobase);
if (ret) {
/*
* Release the I/O port region here, as the
* "detach" handler cannot find it.
*/
release_region(iobase, I8255_SIZE);
s->type = COMEDI_SUBD_UNUSED;
return ret;
}
}
}
return 0;
}
static void dev_8255_detach(struct comedi_device *dev)
{
struct comedi_subdevice *s;
int i;
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
if (s->type != COMEDI_SUBD_UNUSED) {
unsigned long regbase = subdev_8255_regbase(s);
release_region(regbase, I8255_SIZE);
}
}
}
static struct comedi_driver dev_8255_driver = {
.driver_name = "8255",
.module = THIS_MODULE,
.attach = dev_8255_attach,
.detach = dev_8255_detach,
};
module_comedi_driver(dev_8255_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for standalone 8255 devices");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/8255.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* multiq3.c
* Hardware driver for Quanser Consulting MultiQ-3 board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <[email protected]>
*/
/*
* Driver: multiq3
* Description: Quanser Consulting MultiQ-3
* Devices: [Quanser Consulting] MultiQ-3 (multiq3)
* Author: Anders Blomdell <[email protected]>
* Status: works
*
* Configuration Options:
* [0] - I/O port base address
* [1] - IRQ (not used)
* [2] - Number of optional encoder chips installed on board
* 0 = none
* 1 = 2 inputs (Model -2E)
* 2 = 4 inputs (Model -4E)
* 3 = 6 inputs (Model -6E)
* 4 = 8 inputs (Model -8E)
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
/*
* Register map
*/
#define MULTIQ3_DI_REG 0x00
#define MULTIQ3_DO_REG 0x00
#define MULTIQ3_AO_REG 0x02
#define MULTIQ3_AI_REG 0x04
#define MULTIQ3_AI_CONV_REG 0x04
#define MULTIQ3_STATUS_REG 0x06
#define MULTIQ3_STATUS_EOC BIT(3)
#define MULTIQ3_STATUS_EOC_I BIT(4)
#define MULTIQ3_CTRL_REG 0x06
#define MULTIQ3_CTRL_AO_CHAN(x) (((x) & 0x7) << 0)
#define MULTIQ3_CTRL_RC(x) (((x) & 0x3) << 0)
#define MULTIQ3_CTRL_AI_CHAN(x) (((x) & 0x7) << 3)
#define MULTIQ3_CTRL_E_CHAN(x) (((x) & 0x7) << 3)
#define MULTIQ3_CTRL_EN BIT(6)
#define MULTIQ3_CTRL_AZ BIT(7)
#define MULTIQ3_CTRL_CAL BIT(8)
#define MULTIQ3_CTRL_SH BIT(9)
#define MULTIQ3_CTRL_CLK BIT(10)
#define MULTIQ3_CTRL_LD (3 << 11)
#define MULTIQ3_CLK_REG 0x08
#define MULTIQ3_ENC_DATA_REG 0x0c
#define MULTIQ3_ENC_CTRL_REG 0x0e
/*
* Encoder chip commands (from the programming manual)
*/
#define MULTIQ3_CLOCK_DATA 0x00 /* FCK frequency divider */
#define MULTIQ3_CLOCK_SETUP 0x18 /* xfer PR0 to PSC */
#define MULTIQ3_INPUT_SETUP 0x41 /* enable inputs A and B */
#define MULTIQ3_QUAD_X4 0x38 /* quadrature */
#define MULTIQ3_BP_RESET 0x01 /* reset byte pointer */
#define MULTIQ3_CNTR_RESET 0x02 /* reset counter */
#define MULTIQ3_TRSFRPR_CTR 0x08 /* xfre preset reg to counter */
#define MULTIQ3_TRSFRCNTR_OL 0x10 /* xfer CNTR to OL (x and y) */
#define MULTIQ3_EFLAG_RESET 0x06 /* reset E bit of flag reg */
static void multiq3_set_ctrl(struct comedi_device *dev, unsigned int bits)
{
/*
* According to the programming manual, the SH and CLK bits should
* be kept high at all times.
*/
outw(MULTIQ3_CTRL_SH | MULTIQ3_CTRL_CLK | bits,
dev->iobase + MULTIQ3_CTRL_REG);
}
static int multiq3_ai_status(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + MULTIQ3_STATUS_REG);
if (status & context)
return 0;
return -EBUSY;
}
static int multiq3_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val;
int ret;
int i;
multiq3_set_ctrl(dev, MULTIQ3_CTRL_EN | MULTIQ3_CTRL_AI_CHAN(chan));
ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
MULTIQ3_STATUS_EOC);
if (ret)
return ret;
for (i = 0; i < insn->n; i++) {
outw(0, dev->iobase + MULTIQ3_AI_CONV_REG);
ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
MULTIQ3_STATUS_EOC_I);
if (ret)
return ret;
/* get a 16-bit sample; mask it to the subdevice resolution */
val = inb(dev->iobase + MULTIQ3_AI_REG) << 8;
val |= inb(dev->iobase + MULTIQ3_AI_REG);
val &= s->maxdata;
/* munge the 2's complement value to offset binary */
data[i] = comedi_offset_munge(s, val);
}
return insn->n;
}
static int multiq3_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
for (i = 0; i < insn->n; i++) {
val = data[i];
multiq3_set_ctrl(dev, MULTIQ3_CTRL_LD |
MULTIQ3_CTRL_AO_CHAN(chan));
outw(val, dev->iobase + MULTIQ3_AO_REG);
multiq3_set_ctrl(dev, 0);
}
s->readback[chan] = val;
return insn->n;
}
static int multiq3_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[1] = inw(dev->iobase + MULTIQ3_DI_REG);
return insn->n;
}
static int multiq3_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + MULTIQ3_DO_REG);
data[1] = s->state;
return insn->n;
}
static int multiq3_encoder_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val;
int i;
for (i = 0; i < insn->n; i++) {
/* select encoder channel */
multiq3_set_ctrl(dev, MULTIQ3_CTRL_EN |
MULTIQ3_CTRL_E_CHAN(chan));
/* reset the byte pointer */
outb(MULTIQ3_BP_RESET, dev->iobase + MULTIQ3_ENC_CTRL_REG);
/* latch the data */
outb(MULTIQ3_TRSFRCNTR_OL, dev->iobase + MULTIQ3_ENC_CTRL_REG);
/* read the 24-bit encoder data (lsb/mid/msb) */
val = inb(dev->iobase + MULTIQ3_ENC_DATA_REG);
val |= (inb(dev->iobase + MULTIQ3_ENC_DATA_REG) << 8);
val |= (inb(dev->iobase + MULTIQ3_ENC_DATA_REG) << 16);
/*
* Munge the data so that the reset value is in the middle
* of the maxdata range, i.e.:
*
* real value comedi value
* 0xffffff 0x7fffff 1 negative count
* 0x000000 0x800000 reset value
* 0x000001 0x800001 1 positive count
*
* It's possible for the 24-bit counter to overflow but it
* would normally take _quite_ a few turns. A 2000 line
* encoder in quadrature results in 8000 counts/rev. So about
* 1048 turns in either direction can be measured without
* an overflow.
*/
data[i] = (val + ((s->maxdata + 1) >> 1)) & s->maxdata;
}
return insn->n;
}
static void multiq3_encoder_reset(struct comedi_device *dev,
unsigned int chan)
{
multiq3_set_ctrl(dev, MULTIQ3_CTRL_EN | MULTIQ3_CTRL_E_CHAN(chan));
outb(MULTIQ3_EFLAG_RESET, dev->iobase + MULTIQ3_ENC_CTRL_REG);
outb(MULTIQ3_BP_RESET, dev->iobase + MULTIQ3_ENC_CTRL_REG);
outb(MULTIQ3_CLOCK_DATA, dev->iobase + MULTIQ3_ENC_DATA_REG);
outb(MULTIQ3_CLOCK_SETUP, dev->iobase + MULTIQ3_ENC_CTRL_REG);
outb(MULTIQ3_INPUT_SETUP, dev->iobase + MULTIQ3_ENC_CTRL_REG);
outb(MULTIQ3_QUAD_X4, dev->iobase + MULTIQ3_ENC_CTRL_REG);
outb(MULTIQ3_CNTR_RESET, dev->iobase + MULTIQ3_ENC_CTRL_REG);
}
static int multiq3_encoder_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
switch (data[0]) {
case INSN_CONFIG_RESET:
multiq3_encoder_reset(dev, chan);
break;
default:
return -EINVAL;
}
return insn->n;
}
static int multiq3_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret;
int i;
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 8;
s->maxdata = 0x1fff;
s->range_table = &range_bipolar5;
s->insn_read = multiq3_ai_insn_read;
/* Analog Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 0x0fff;
s->range_table = &range_bipolar5;
s->insn_write = multiq3_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
/* Digital Input subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = multiq3_di_insn_bits;
/* Digital Output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = multiq3_do_insn_bits;
/* Encoder (Counter) subdevice */
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
s->n_chan = it->options[2] * 2;
s->maxdata = 0x00ffffff;
s->range_table = &range_unknown;
s->insn_read = multiq3_encoder_insn_read;
s->insn_config = multiq3_encoder_insn_config;
for (i = 0; i < s->n_chan; i++)
multiq3_encoder_reset(dev, i);
return 0;
}
static struct comedi_driver multiq3_driver = {
.driver_name = "multiq3",
.module = THIS_MODULE,
.attach = multiq3_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(multiq3_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for Quanser Consulting MultiQ-3 board");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/multiq3.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_3120.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
*
* ADDI-DATA GmbH
* Dieselstrasse 3
* D-77833 Ottersweier
* Tel: +19(0)7223/9493-0
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* [email protected]
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/comedi/comedi_pci.h>
#include "amcc_s5933.h"
/*
* PCI BAR 0 register map (devpriv->amcc)
* see amcc_s5933.h for register and bit defines
*/
#define APCI3120_FIFO_ADVANCE_ON_BYTE_2 BIT(29)
/*
* PCI BAR 1 register map (dev->iobase)
*/
#define APCI3120_AI_FIFO_REG 0x00
#define APCI3120_CTRL_REG 0x00
#define APCI3120_CTRL_EXT_TRIG BIT(15)
#define APCI3120_CTRL_GATE(x) BIT(12 + (x))
#define APCI3120_CTRL_PR(x) (((x) & 0xf) << 8)
#define APCI3120_CTRL_PA(x) (((x) & 0xf) << 0)
#define APCI3120_AI_SOFTTRIG_REG 0x02
#define APCI3120_STATUS_REG 0x02
#define APCI3120_STATUS_EOC_INT BIT(15)
#define APCI3120_STATUS_AMCC_INT BIT(14)
#define APCI3120_STATUS_EOS_INT BIT(13)
#define APCI3120_STATUS_TIMER2_INT BIT(12)
#define APCI3120_STATUS_INT_MASK (0xf << 12)
#define APCI3120_STATUS_TO_DI_BITS(x) (((x) >> 8) & 0xf)
#define APCI3120_STATUS_TO_VERSION(x) (((x) >> 4) & 0xf)
#define APCI3120_STATUS_FIFO_FULL BIT(2)
#define APCI3120_STATUS_FIFO_EMPTY BIT(1)
#define APCI3120_STATUS_DA_READY BIT(0)
#define APCI3120_TIMER_REG 0x04
#define APCI3120_CHANLIST_REG 0x06
#define APCI3120_CHANLIST_INDEX(x) (((x) & 0xf) << 8)
#define APCI3120_CHANLIST_UNIPOLAR BIT(7)
#define APCI3120_CHANLIST_GAIN(x) (((x) & 0x3) << 4)
#define APCI3120_CHANLIST_MUX(x) (((x) & 0xf) << 0)
#define APCI3120_AO_REG(x) (0x08 + (((x) / 4) * 2))
#define APCI3120_AO_MUX(x) (((x) & 0x3) << 14)
#define APCI3120_AO_DATA(x) ((x) << 0)
#define APCI3120_TIMER_MODE_REG 0x0c
#define APCI3120_TIMER_MODE(_t, _m) ((_m) << ((_t) * 2))
#define APCI3120_TIMER_MODE0 0 /* I8254_MODE0 */
#define APCI3120_TIMER_MODE2 1 /* I8254_MODE2 */
#define APCI3120_TIMER_MODE4 2 /* I8254_MODE4 */
#define APCI3120_TIMER_MODE5 3 /* I8254_MODE5 */
#define APCI3120_TIMER_MODE_MASK(_t) (3 << ((_t) * 2))
#define APCI3120_CTR0_REG 0x0d
#define APCI3120_CTR0_DO_BITS(x) ((x) << 4)
#define APCI3120_CTR0_TIMER_SEL(x) ((x) << 0)
#define APCI3120_MODE_REG 0x0e
#define APCI3120_MODE_TIMER2_CLK(x) (((x) & 0x3) << 6)
#define APCI3120_MODE_TIMER2_CLK_OSC APCI3120_MODE_TIMER2_CLK(0)
#define APCI3120_MODE_TIMER2_CLK_OUT1 APCI3120_MODE_TIMER2_CLK(1)
#define APCI3120_MODE_TIMER2_CLK_EOC APCI3120_MODE_TIMER2_CLK(2)
#define APCI3120_MODE_TIMER2_CLK_EOS APCI3120_MODE_TIMER2_CLK(3)
#define APCI3120_MODE_TIMER2_CLK_MASK APCI3120_MODE_TIMER2_CLK(3)
#define APCI3120_MODE_TIMER2_AS(x) (((x) & 0x3) << 4)
#define APCI3120_MODE_TIMER2_AS_TIMER APCI3120_MODE_TIMER2_AS(0)
#define APCI3120_MODE_TIMER2_AS_COUNTER APCI3120_MODE_TIMER2_AS(1)
#define APCI3120_MODE_TIMER2_AS_WDOG APCI3120_MODE_TIMER2_AS(2)
#define APCI3120_MODE_TIMER2_AS_MASK APCI3120_MODE_TIMER2_AS(3)
#define APCI3120_MODE_SCAN_ENA BIT(3)
#define APCI3120_MODE_TIMER2_IRQ_ENA BIT(2)
#define APCI3120_MODE_EOS_IRQ_ENA BIT(1)
#define APCI3120_MODE_EOC_IRQ_ENA BIT(0)
/*
* PCI BAR 2 register map (devpriv->addon)
*/
#define APCI3120_ADDON_ADDR_REG 0x00
#define APCI3120_ADDON_DATA_REG 0x02
#define APCI3120_ADDON_CTRL_REG 0x04
#define APCI3120_ADDON_CTRL_AMWEN_ENA BIT(1)
#define APCI3120_ADDON_CTRL_A2P_FIFO_ENA BIT(0)
/*
* Board revisions
*/
#define APCI3120_REVA 0xa
#define APCI3120_REVB 0xb
#define APCI3120_REVA_OSC_BASE 70 /* 70ns = 14.29MHz */
#define APCI3120_REVB_OSC_BASE 50 /* 50ns = 20MHz */
static const struct comedi_lrange apci3120_ai_range = {
8, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2),
BIP_RANGE(1),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2),
UNI_RANGE(1)
}
};
enum apci3120_boardid {
BOARD_APCI3120,
BOARD_APCI3001,
};
struct apci3120_board {
const char *name;
unsigned int ai_is_16bit:1;
unsigned int has_ao:1;
};
static const struct apci3120_board apci3120_boardtypes[] = {
[BOARD_APCI3120] = {
.name = "apci3120",
.ai_is_16bit = 1,
.has_ao = 1,
},
[BOARD_APCI3001] = {
.name = "apci3001",
},
};
struct apci3120_dmabuf {
unsigned short *virt;
dma_addr_t hw;
unsigned int size;
unsigned int use_size;
};
struct apci3120_private {
unsigned long amcc;
unsigned long addon;
unsigned int osc_base;
unsigned int use_dma:1;
unsigned int use_double_buffer:1;
unsigned int cur_dmabuf:1;
struct apci3120_dmabuf dmabuf[2];
unsigned char do_bits;
unsigned char timer_mode;
unsigned char mode;
unsigned short ctrl;
};
static void apci3120_addon_write(struct comedi_device *dev,
unsigned int val, unsigned int reg)
{
struct apci3120_private *devpriv = dev->private;
/* 16-bit interface for AMCC add-on registers */
outw(reg, devpriv->addon + APCI3120_ADDON_ADDR_REG);
outw(val & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
outw(reg + 2, devpriv->addon + APCI3120_ADDON_ADDR_REG);
outw((val >> 16) & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
}
static void apci3120_init_dma(struct comedi_device *dev,
struct apci3120_dmabuf *dmabuf)
{
struct apci3120_private *devpriv = dev->private;
/* AMCC - enable transfer count and reset A2P FIFO */
outl(AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
devpriv->amcc + AMCC_OP_REG_AGCSTS);
/* Add-On - enable transfer count and reset A2P FIFO */
apci3120_addon_write(dev, AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
AMCC_OP_REG_AGCSTS);
/* AMCC - enable transfers and reset A2P flags */
outl(RESET_A2P_FLAGS | EN_A2P_TRANSFERS,
devpriv->amcc + AMCC_OP_REG_MCSR);
/* Add-On - DMA start address */
apci3120_addon_write(dev, dmabuf->hw, AMCC_OP_REG_AMWAR);
/* Add-On - Number of acquisitions */
apci3120_addon_write(dev, dmabuf->use_size, AMCC_OP_REG_AMWTC);
/* AMCC - enable write complete (DMA) and set FIFO advance */
outl(APCI3120_FIFO_ADVANCE_ON_BYTE_2 | AINT_WRITE_COMPL,
devpriv->amcc + AMCC_OP_REG_INTCSR);
/* Add-On - enable DMA */
outw(APCI3120_ADDON_CTRL_AMWEN_ENA | APCI3120_ADDON_CTRL_A2P_FIFO_ENA,
devpriv->addon + APCI3120_ADDON_CTRL_REG);
}
static void apci3120_setup_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct apci3120_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
struct apci3120_dmabuf *dmabuf0 = &devpriv->dmabuf[0];
struct apci3120_dmabuf *dmabuf1 = &devpriv->dmabuf[1];
unsigned int dmalen0 = dmabuf0->size;
unsigned int dmalen1 = dmabuf1->size;
unsigned int scan_bytes;
scan_bytes = comedi_samples_to_bytes(s, cmd->scan_end_arg);
if (cmd->stop_src == TRIG_COUNT) {
/*
* Must we fill full first buffer? And must we fill
* full second buffer when first is once filled?
*/
if (dmalen0 > (cmd->stop_arg * scan_bytes))
dmalen0 = cmd->stop_arg * scan_bytes;
else if (dmalen1 > (cmd->stop_arg * scan_bytes - dmalen0))
dmalen1 = cmd->stop_arg * scan_bytes - dmalen0;
}
if (cmd->flags & CMDF_WAKE_EOS) {
/* don't we want wake up every scan? */
if (dmalen0 > scan_bytes) {
dmalen0 = scan_bytes;
if (cmd->scan_end_arg & 1)
dmalen0 += 2;
}
if (dmalen1 > scan_bytes) {
dmalen1 = scan_bytes;
if (cmd->scan_end_arg & 1)
dmalen1 -= 2;
if (dmalen1 < 4)
dmalen1 = 4;
}
} else {
/* isn't output buff smaller that our DMA buff? */
if (dmalen0 > s->async->prealloc_bufsz)
dmalen0 = s->async->prealloc_bufsz;
if (dmalen1 > s->async->prealloc_bufsz)
dmalen1 = s->async->prealloc_bufsz;
}
dmabuf0->use_size = dmalen0;
dmabuf1->use_size = dmalen1;
apci3120_init_dma(dev, dmabuf0);
}
/*
* There are three timers on the board. They all use the same base
* clock with a fixed prescaler for each timer. The base clock used
* depends on the board version and type.
*
* APCI-3120 Rev A boards OSC = 14.29MHz base clock (~70ns)
* APCI-3120 Rev B boards OSC = 20MHz base clock (50ns)
* APCI-3001 boards OSC = 20MHz base clock (50ns)
*
* The prescalers for each timer are:
* Timer 0 CLK = OSC/10
* Timer 1 CLK = OSC/1000
* Timer 2 CLK = OSC/1000
*/
static unsigned int apci3120_ns_to_timer(struct comedi_device *dev,
unsigned int timer,
unsigned int ns,
unsigned int flags)
{
struct apci3120_private *devpriv = dev->private;
unsigned int prescale = (timer == 0) ? 10 : 1000;
unsigned int timer_base = devpriv->osc_base * prescale;
unsigned int divisor;
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_UP:
divisor = DIV_ROUND_UP(ns, timer_base);
break;
case CMDF_ROUND_DOWN:
divisor = ns / timer_base;
break;
case CMDF_ROUND_NEAREST:
default:
divisor = DIV_ROUND_CLOSEST(ns, timer_base);
break;
}
if (timer == 2) {
/* timer 2 is 24-bits */
if (divisor > 0x00ffffff)
divisor = 0x00ffffff;
} else {
/* timers 0 and 1 are 16-bits */
if (divisor > 0xffff)
divisor = 0xffff;
}
/* the timers require a minimum divisor of 2 */
if (divisor < 2)
divisor = 2;
return divisor;
}
static void apci3120_clr_timer2_interrupt(struct comedi_device *dev)
{
/* a dummy read of APCI3120_CTR0_REG clears the timer 2 interrupt */
inb(dev->iobase + APCI3120_CTR0_REG);
}
static void apci3120_timer_write(struct comedi_device *dev,
unsigned int timer, unsigned int val)
{
struct apci3120_private *devpriv = dev->private;
/* write 16-bit value to timer (lower 16-bits of timer 2) */
outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
APCI3120_CTR0_TIMER_SEL(timer),
dev->iobase + APCI3120_CTR0_REG);
outw(val & 0xffff, dev->iobase + APCI3120_TIMER_REG);
if (timer == 2) {
/* write upper 16-bits to timer 2 */
outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
APCI3120_CTR0_TIMER_SEL(timer + 1),
dev->iobase + APCI3120_CTR0_REG);
outw((val >> 16) & 0xffff, dev->iobase + APCI3120_TIMER_REG);
}
}
static unsigned int apci3120_timer_read(struct comedi_device *dev,
unsigned int timer)
{
struct apci3120_private *devpriv = dev->private;
unsigned int val;
/* read 16-bit value from timer (lower 16-bits of timer 2) */
outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
APCI3120_CTR0_TIMER_SEL(timer),
dev->iobase + APCI3120_CTR0_REG);
val = inw(dev->iobase + APCI3120_TIMER_REG);
if (timer == 2) {
/* read upper 16-bits from timer 2 */
outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
APCI3120_CTR0_TIMER_SEL(timer + 1),
dev->iobase + APCI3120_CTR0_REG);
val |= (inw(dev->iobase + APCI3120_TIMER_REG) << 16);
}
return val;
}
static void apci3120_timer_set_mode(struct comedi_device *dev,
unsigned int timer, unsigned int mode)
{
struct apci3120_private *devpriv = dev->private;
devpriv->timer_mode &= ~APCI3120_TIMER_MODE_MASK(timer);
devpriv->timer_mode |= APCI3120_TIMER_MODE(timer, mode);
outb(devpriv->timer_mode, dev->iobase + APCI3120_TIMER_MODE_REG);
}
static void apci3120_timer_enable(struct comedi_device *dev,
unsigned int timer, bool enable)
{
struct apci3120_private *devpriv = dev->private;
if (enable)
devpriv->ctrl |= APCI3120_CTRL_GATE(timer);
else
devpriv->ctrl &= ~APCI3120_CTRL_GATE(timer);
outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
}
static void apci3120_exttrig_enable(struct comedi_device *dev, bool enable)
{
struct apci3120_private *devpriv = dev->private;
if (enable)
devpriv->ctrl |= APCI3120_CTRL_EXT_TRIG;
else
devpriv->ctrl &= ~APCI3120_CTRL_EXT_TRIG;
outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
}
static void apci3120_set_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
int n_chan, unsigned int *chanlist)
{
struct apci3120_private *devpriv = dev->private;
int i;
/* set chanlist for scan */
for (i = 0; i < n_chan; i++) {
unsigned int chan = CR_CHAN(chanlist[i]);
unsigned int range = CR_RANGE(chanlist[i]);
unsigned int val;
val = APCI3120_CHANLIST_MUX(chan) |
APCI3120_CHANLIST_GAIN(range) |
APCI3120_CHANLIST_INDEX(i);
if (comedi_range_is_unipolar(s, range))
val |= APCI3120_CHANLIST_UNIPOLAR;
outw(val, dev->iobase + APCI3120_CHANLIST_REG);
}
/* a dummy read of APCI3120_TIMER_MODE_REG resets the ai FIFO */
inw(dev->iobase + APCI3120_TIMER_MODE_REG);
/* set scan length (PR) and scan start (PA) */
devpriv->ctrl = APCI3120_CTRL_PR(n_chan - 1) | APCI3120_CTRL_PA(0);
outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
/* enable chanlist scanning if necessary */
if (n_chan > 1)
devpriv->mode |= APCI3120_MODE_SCAN_ENA;
}
static void apci3120_interrupt_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct apci3120_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
struct apci3120_dmabuf *dmabuf;
unsigned int nbytes;
unsigned int nsamples;
dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
nbytes = dmabuf->use_size - inl(devpriv->amcc + AMCC_OP_REG_MWTC);
if (nbytes < dmabuf->use_size)
dev_err(dev->class_dev, "Interrupted DMA transfer!\n");
if (nbytes & 1) {
dev_err(dev->class_dev, "Odd count of bytes in DMA ring!\n");
async->events |= COMEDI_CB_ERROR;
return;
}
nsamples = comedi_bytes_to_samples(s, nbytes);
if (nsamples) {
comedi_buf_write_samples(s, dmabuf->virt, nsamples);
if (!(cmd->flags & CMDF_WAKE_EOS))
async->events |= COMEDI_CB_EOS;
}
if ((async->events & COMEDI_CB_CANCEL_MASK) ||
(cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg))
return;
if (devpriv->use_double_buffer) {
/* switch DMA buffers for next interrupt */
devpriv->cur_dmabuf = !devpriv->cur_dmabuf;
dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
apci3120_init_dma(dev, dmabuf);
} else {
/* restart DMA if not using double buffering */
apci3120_init_dma(dev, dmabuf);
}
}
static irqreturn_t apci3120_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct apci3120_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int status;
unsigned int int_amcc;
status = inw(dev->iobase + APCI3120_STATUS_REG);
int_amcc = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
if (!(status & APCI3120_STATUS_INT_MASK) &&
!(int_amcc & ANY_S593X_INT)) {
dev_err(dev->class_dev, "IRQ from unknown source\n");
return IRQ_NONE;
}
outl(int_amcc | AINT_INT_MASK, devpriv->amcc + AMCC_OP_REG_INTCSR);
if (devpriv->ctrl & APCI3120_CTRL_EXT_TRIG)
apci3120_exttrig_enable(dev, false);
if (int_amcc & MASTER_ABORT_INT)
dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
if (int_amcc & TARGET_ABORT_INT)
dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
if ((status & APCI3120_STATUS_EOS_INT) &&
(devpriv->mode & APCI3120_MODE_EOS_IRQ_ENA)) {
unsigned short val;
int i;
for (i = 0; i < cmd->chanlist_len; i++) {
val = inw(dev->iobase + APCI3120_AI_FIFO_REG);
comedi_buf_write_samples(s, &val, 1);
}
devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
}
if (status & APCI3120_STATUS_TIMER2_INT) {
/*
* for safety...
* timer2 interrupts are not enabled in the driver
*/
apci3120_clr_timer2_interrupt(dev);
}
if (status & APCI3120_STATUS_AMCC_INT) {
/* AMCC- Clear write complete interrupt (DMA) */
outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
/* do some data transfer */
apci3120_interrupt_dma(dev, s);
}
if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int apci3120_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct apci3120_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int divisor;
/* set default mode bits */
devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
APCI3120_MODE_TIMER2_AS_TIMER;
/* AMCC- Clear write complete interrupt (DMA) */
outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
devpriv->cur_dmabuf = 0;
/* load chanlist for command scan */
apci3120_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist);
if (cmd->start_src == TRIG_EXT)
apci3120_exttrig_enable(dev, true);
if (cmd->scan_begin_src == TRIG_TIMER) {
/*
* Timer 1 is used in MODE2 (rate generator) to set the
* start time for each scan.
*/
divisor = apci3120_ns_to_timer(dev, 1, cmd->scan_begin_arg,
cmd->flags);
apci3120_timer_set_mode(dev, 1, APCI3120_TIMER_MODE2);
apci3120_timer_write(dev, 1, divisor);
}
/*
* Timer 0 is used in MODE2 (rate generator) to set the conversion
* time for each acquisition.
*/
divisor = apci3120_ns_to_timer(dev, 0, cmd->convert_arg, cmd->flags);
apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE2);
apci3120_timer_write(dev, 0, divisor);
if (devpriv->use_dma)
apci3120_setup_dma(dev, s);
else
devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
/* set mode to enable acquisition */
outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
if (cmd->scan_begin_src == TRIG_TIMER)
apci3120_timer_enable(dev, 1, true);
apci3120_timer_enable(dev, 0, true);
return 0;
}
static int apci3120_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int arg;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_FOLLOW);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->scan_begin_src == TRIG_TIMER) { /* Test Delay timing */
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
100000);
}
/* minimum conversion time per sample is 10us */
err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* Step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
/* scan begin must be larger than the scan time */
arg = cmd->convert_arg * cmd->scan_end_arg;
err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
return 0;
}
static int apci3120_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct apci3120_private *devpriv = dev->private;
/* Add-On - disable DMA */
outw(0, devpriv->addon + 4);
/* Add-On - disable bus master */
apci3120_addon_write(dev, 0, AMCC_OP_REG_AGCSTS);
/* AMCC - disable bus master */
outl(0, devpriv->amcc + AMCC_OP_REG_MCSR);
/* disable all counters, ext trigger, and reset scan */
devpriv->ctrl = 0;
outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
/* DISABLE_ALL_INTERRUPT */
devpriv->mode = 0;
outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
inw(dev->iobase + APCI3120_STATUS_REG);
devpriv->cur_dmabuf = 0;
return 0;
}
static int apci3120_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + APCI3120_STATUS_REG);
if ((status & APCI3120_STATUS_EOC_INT) == 0)
return 0;
return -EBUSY;
}
static int apci3120_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3120_private *devpriv = dev->private;
unsigned int divisor;
int ret;
int i;
/* set mode for A/D conversions by software trigger with timer 0 */
devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
APCI3120_MODE_TIMER2_AS_TIMER;
outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
/* load chanlist for single channel scan */
apci3120_set_chanlist(dev, s, 1, &insn->chanspec);
/*
* Timer 0 is used in MODE4 (software triggered strobe) to set the
* conversion time for each acquisition. Each conversion is triggered
* when the divisor is written to the timer, The conversion is done
* when the EOC bit in the status register is '0'.
*/
apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE4);
apci3120_timer_enable(dev, 0, true);
/* fixed conversion time of 10 us */
divisor = apci3120_ns_to_timer(dev, 0, 10000, CMDF_ROUND_NEAREST);
for (i = 0; i < insn->n; i++) {
/* trigger conversion */
apci3120_timer_write(dev, 0, divisor);
ret = comedi_timeout(dev, s, insn, apci3120_ai_eoc, 0);
if (ret)
return ret;
data[i] = inw(dev->iobase + APCI3120_AI_FIFO_REG);
}
return insn->n;
}
static int apci3120_ao_ready(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + APCI3120_STATUS_REG);
if (status & APCI3120_STATUS_DA_READY)
return 0;
return -EBUSY;
}
static int apci3120_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
int ret;
ret = comedi_timeout(dev, s, insn, apci3120_ao_ready, 0);
if (ret)
return ret;
outw(APCI3120_AO_MUX(chan) | APCI3120_AO_DATA(val),
dev->iobase + APCI3120_AO_REG(chan));
s->readback[chan] = val;
}
return insn->n;
}
static int apci3120_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int status;
status = inw(dev->iobase + APCI3120_STATUS_REG);
data[1] = APCI3120_STATUS_TO_DI_BITS(status);
return insn->n;
}
static int apci3120_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3120_private *devpriv = dev->private;
if (comedi_dio_update_state(s, data)) {
devpriv->do_bits = s->state;
outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits),
dev->iobase + APCI3120_CTR0_REG);
}
data[1] = s->state;
return insn->n;
}
static int apci3120_timer_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3120_private *devpriv = dev->private;
unsigned int divisor;
unsigned int status;
unsigned int mode;
unsigned int timer_mode;
switch (data[0]) {
case INSN_CONFIG_ARM:
apci3120_clr_timer2_interrupt(dev);
divisor = apci3120_ns_to_timer(dev, 2, data[1],
CMDF_ROUND_DOWN);
apci3120_timer_write(dev, 2, divisor);
apci3120_timer_enable(dev, 2, true);
break;
case INSN_CONFIG_DISARM:
apci3120_timer_enable(dev, 2, false);
apci3120_clr_timer2_interrupt(dev);
break;
case INSN_CONFIG_GET_COUNTER_STATUS:
data[1] = 0;
data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
COMEDI_COUNTER_TERMINAL_COUNT;
if (devpriv->ctrl & APCI3120_CTRL_GATE(2)) {
data[1] |= COMEDI_COUNTER_ARMED;
data[1] |= COMEDI_COUNTER_COUNTING;
}
status = inw(dev->iobase + APCI3120_STATUS_REG);
if (status & APCI3120_STATUS_TIMER2_INT) {
data[1] &= ~COMEDI_COUNTER_COUNTING;
data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
}
break;
case INSN_CONFIG_SET_COUNTER_MODE:
switch (data[1]) {
case I8254_MODE0:
mode = APCI3120_MODE_TIMER2_AS_COUNTER;
timer_mode = APCI3120_TIMER_MODE0;
break;
case I8254_MODE2:
mode = APCI3120_MODE_TIMER2_AS_TIMER;
timer_mode = APCI3120_TIMER_MODE2;
break;
case I8254_MODE4:
mode = APCI3120_MODE_TIMER2_AS_TIMER;
timer_mode = APCI3120_TIMER_MODE4;
break;
case I8254_MODE5:
mode = APCI3120_MODE_TIMER2_AS_WDOG;
timer_mode = APCI3120_TIMER_MODE5;
break;
default:
return -EINVAL;
}
apci3120_timer_enable(dev, 2, false);
apci3120_clr_timer2_interrupt(dev);
apci3120_timer_set_mode(dev, 2, timer_mode);
devpriv->mode &= ~APCI3120_MODE_TIMER2_AS_MASK;
devpriv->mode |= mode;
outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
break;
default:
return -EINVAL;
}
return insn->n;
}
static int apci3120_timer_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i;
for (i = 0; i < insn->n; i++)
data[i] = apci3120_timer_read(dev, 2);
return insn->n;
}
static void apci3120_dma_alloc(struct comedi_device *dev)
{
struct apci3120_private *devpriv = dev->private;
struct apci3120_dmabuf *dmabuf;
int order;
int i;
for (i = 0; i < 2; i++) {
dmabuf = &devpriv->dmabuf[i];
for (order = 2; order >= 0; order--) {
dmabuf->virt = dma_alloc_coherent(dev->hw_dev,
PAGE_SIZE << order,
&dmabuf->hw,
GFP_KERNEL);
if (dmabuf->virt)
break;
}
if (!dmabuf->virt)
break;
dmabuf->size = PAGE_SIZE << order;
if (i == 0)
devpriv->use_dma = 1;
if (i == 1)
devpriv->use_double_buffer = 1;
}
}
static void apci3120_dma_free(struct comedi_device *dev)
{
struct apci3120_private *devpriv = dev->private;
struct apci3120_dmabuf *dmabuf;
int i;
if (!devpriv)
return;
for (i = 0; i < 2; i++) {
dmabuf = &devpriv->dmabuf[i];
if (dmabuf->virt) {
dma_free_coherent(dev->hw_dev, dmabuf->size,
dmabuf->virt, dmabuf->hw);
}
}
}
static void apci3120_reset(struct comedi_device *dev)
{
/* disable all interrupt sources */
outb(0, dev->iobase + APCI3120_MODE_REG);
/* disable all counters, ext trigger, and reset scan */
outw(0, dev->iobase + APCI3120_CTRL_REG);
/* clear interrupt status */
inw(dev->iobase + APCI3120_STATUS_REG);
}
static int apci3120_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct apci3120_board *board = NULL;
struct apci3120_private *devpriv;
struct comedi_subdevice *s;
unsigned int status;
int ret;
if (context < ARRAY_SIZE(apci3120_boardtypes))
board = &apci3120_boardtypes[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
pci_set_master(pcidev);
dev->iobase = pci_resource_start(pcidev, 1);
devpriv->amcc = pci_resource_start(pcidev, 0);
devpriv->addon = pci_resource_start(pcidev, 2);
apci3120_reset(dev);
if (pcidev->irq > 0) {
ret = request_irq(pcidev->irq, apci3120_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0) {
dev->irq = pcidev->irq;
apci3120_dma_alloc(dev);
}
}
status = inw(dev->iobase + APCI3120_STATUS_REG);
if (APCI3120_STATUS_TO_VERSION(status) == APCI3120_REVB ||
context == BOARD_APCI3001)
devpriv->osc_base = APCI3120_REVB_OSC_BASE;
else
devpriv->osc_base = APCI3120_REVA_OSC_BASE;
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
s->n_chan = 16;
s->maxdata = board->ai_is_16bit ? 0xffff : 0x0fff;
s->range_table = &apci3120_ai_range;
s->insn_read = apci3120_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = s->n_chan;
s->do_cmdtest = apci3120_ai_cmdtest;
s->do_cmd = apci3120_ai_cmd;
s->cancel = apci3120_cancel;
}
/* Analog Output subdevice */
s = &dev->subdevices[1];
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 8;
s->maxdata = 0x3fff;
s->range_table = &range_bipolar10;
s->insn_write = apci3120_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Digital Input subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci3120_di_insn_bits;
/* Digital Output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci3120_do_insn_bits;
/* Timer subdevice */
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_TIMER;
s->subdev_flags = SDF_READABLE;
s->n_chan = 1;
s->maxdata = 0x00ffffff;
s->insn_config = apci3120_timer_insn_config;
s->insn_read = apci3120_timer_insn_read;
return 0;
}
static void apci3120_detach(struct comedi_device *dev)
{
comedi_pci_detach(dev);
apci3120_dma_free(dev);
}
static struct comedi_driver apci3120_driver = {
.driver_name = "addi_apci_3120",
.module = THIS_MODULE,
.auto_attach = apci3120_auto_attach,
.detach = apci3120_detach,
};
static int apci3120_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &apci3120_driver, id->driver_data);
}
static const struct pci_device_id apci3120_pci_table[] = {
{ PCI_VDEVICE(AMCC, 0x818d), BOARD_APCI3120 },
{ PCI_VDEVICE(AMCC, 0x828d), BOARD_APCI3001 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci3120_pci_table);
static struct pci_driver apci3120_pci_driver = {
.name = "addi_apci_3120",
.id_table = apci3120_pci_table,
.probe = apci3120_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3120_driver, apci3120_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("ADDI-DATA APCI-3120, Analog input board");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/addi_apci_3120.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* adl_pci6208.c
* Comedi driver for ADLink 6208 series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: adl_pci6208
* Description: ADLink PCI-6208/6216 Series Multi-channel Analog Output Cards
* Devices: [ADLink] PCI-6208 (adl_pci6208), PCI-6216
* Author: nsyeow <[email protected]>
* Updated: Wed, 11 Feb 2015 11:37:18 +0000
* Status: untested
*
* Configuration Options: not applicable, uses PCI auto config
*
* All supported devices share the same PCI device ID and are treated as a
* PCI-6216 with 16 analog output channels. On a PCI-6208, the upper 8
* channels exist in registers, but don't go to DAC chips.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/comedi/comedi_pci.h>
/*
* PCI-6208/6216-GL register map
*/
#define PCI6208_AO_CONTROL(x) (0x00 + (2 * (x)))
#define PCI6208_AO_STATUS 0x00
#define PCI6208_AO_STATUS_DATA_SEND BIT(0)
#define PCI6208_DIO 0x40
#define PCI6208_DIO_DO_MASK (0x0f)
#define PCI6208_DIO_DO_SHIFT (0)
#define PCI6208_DIO_DI_MASK (0xf0)
#define PCI6208_DIO_DI_SHIFT (4)
static int pci6208_ao_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + PCI6208_AO_STATUS);
if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
return 0;
return -EBUSY;
}
static int pci6208_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int ret;
int i;
for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
/* D/A transfer rate is 2.2us */
ret = comedi_timeout(dev, s, insn, pci6208_ao_eoc, 0);
if (ret)
return ret;
/* the hardware expects two's complement values */
outw(comedi_offset_munge(s, val),
dev->iobase + PCI6208_AO_CONTROL(chan));
s->readback[chan] = val;
}
return insn->n;
}
static int pci6208_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int val;
val = inw(dev->iobase + PCI6208_DIO);
val = (val & PCI6208_DIO_DI_MASK) >> PCI6208_DIO_DI_SHIFT;
data[1] = val;
return insn->n;
}
static int pci6208_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PCI6208_DIO);
data[1] = s->state;
return insn->n;
}
static int pci6208_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
unsigned int val;
int ret;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16; /* Only 8 usable on PCI-6208 */
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = pci6208_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[1];
/* digital input subdevice */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pci6208_di_insn_bits;
s = &dev->subdevices[2];
/* digital output subdevice */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pci6208_do_insn_bits;
/*
* Get the read back signals from the digital outputs
* and save it as the initial state for the subdevice.
*/
val = inw(dev->iobase + PCI6208_DIO);
val = (val & PCI6208_DIO_DO_MASK) >> PCI6208_DIO_DO_SHIFT;
s->state = val;
return 0;
}
static struct comedi_driver adl_pci6208_driver = {
.driver_name = "adl_pci6208",
.module = THIS_MODULE,
.auto_attach = pci6208_auto_attach,
.detach = comedi_pci_detach,
};
static int adl_pci6208_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &adl_pci6208_driver,
id->driver_data);
}
static const struct pci_device_id adl_pci6208_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x6208) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
0x9999, 0x6208) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, adl_pci6208_pci_table);
static struct pci_driver adl_pci6208_pci_driver = {
.name = "adl_pci6208",
.id_table = adl_pci6208_pci_table,
.probe = adl_pci6208_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci6208_driver, adl_pci6208_pci_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for ADLink 6208 series cards");
MODULE_LICENSE("GPL");
| linux-master | drivers/comedi/drivers/adl_pci6208.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/tests/ni_routes_test.c
* Unit tests for NI routes (ni_routes.c module).
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include "../ni_stc.h"
#include "../ni_routes.h"
#include "unittest.h"
#define RVI(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
#define O(x) ((x) + NI_NAMES_BASE)
#define B(x) ((x) - NI_NAMES_BASE)
#define V(x) ((x) | 0x80)
/* *** BEGIN fake board data *** */
static const char *pci_6070e = "pci-6070e";
static const char *pci_6220 = "pci-6220";
static const char *pci_fake = "pci-fake";
static const char *ni_eseries = "ni_eseries";
static const char *ni_mseries = "ni_mseries";
static struct ni_board_struct board = {
.name = NULL,
};
static struct ni_private private = {
.is_m_series = 0,
};
static const int bad_dest = O(8), dest0 = O(0), desti = O(5);
static const int ith_dest_index = 2;
static const int no_val_dest = O(7), no_val_index = 4;
/* These have to be defs to be used in init code below */
#define rgout0_src0 (O(100))
#define rgout0_src1 (O(101))
#define brd0_src0 (O(110))
#define brd0_src1 (O(111))
#define brd1_src0 (O(120))
#define brd1_src1 (O(121))
#define brd2_src0 (O(130))
#define brd2_src1 (O(131))
#define brd3_src0 (O(140))
#define brd3_src1 (O(141))
/* I1 and I2 should not call O(...). Mostly here to shut checkpatch.pl up */
#define I1(x1) \
((int[]){ \
(x1), 0 \
})
#define I2(x1, x2) \
((int[]){ \
(x1), (x2), 0 \
})
#define I3(x1, x2, x3) \
((int[]){ \
(x1), (x2), (x3), 0 \
})
/* O9 is build to call O(...) for each arg */
#define O9(x1, x2, x3, x4, x5, x6, x7, x8, x9) \
((int[]){ \
O(x1), O(x2), O(x3), O(x4), O(x5), O(x6), O(x7), O(x8), O(x9), \
0 \
})
static struct ni_device_routes DR = {
.device = "testdev",
.routes = (struct ni_route_set[]){
{.dest = O(0), .src = O9(/**/1, 2, 3, 4, 5, 6, 7, 8, 9)},
{.dest = O(1), .src = O9(0, /**/2, 3, 4, 5, 6, 7, 8, 9)},
/* ith route_set */
{.dest = O(5), .src = O9(0, 1, 2, 3, 4,/**/ 6, 7, 8, 9)},
{.dest = O(6), .src = O9(0, 1, 2, 3, 4, 5,/**/ 7, 8, 9)},
/* next one will not have valid reg values */
{.dest = O(7), .src = O9(0, 1, 2, 3, 4, 5, 6,/**/ 8, 9)},
{.dest = O(9), .src = O9(0, 1, 2, 3, 4, 5, 6, 7, 8/**/)},
/* indirect routes done through muxes */
{.dest = TRIGGER_LINE(0), .src = I1(rgout0_src0)},
{.dest = TRIGGER_LINE(1), .src = I3(rgout0_src0,
brd3_src0,
brd3_src1)},
{.dest = TRIGGER_LINE(2), .src = I3(rgout0_src1,
brd2_src0,
brd2_src1)},
{.dest = TRIGGER_LINE(3), .src = I3(rgout0_src1,
brd1_src0,
brd1_src1)},
{.dest = TRIGGER_LINE(4), .src = I2(brd0_src0,
brd0_src1)},
{.dest = 0},
},
};
#undef I1
#undef I2
#undef O9
#define RV9(x1, x2, x3, x4, x5, x6, x7, x8, x9) \
[x1] = V(x1), [x2] = V(x2), [x3] = V(x3), [x4] = V(x4), \
[x5] = V(x5), [x6] = V(x6), [x7] = V(x7), [x8] = V(x8), \
[x9] = V(x9),
/* This table is indexed as RV[destination][source] */
static const u8 RV[NI_NUM_NAMES][NI_NUM_NAMES] = {
[0] = {RV9(/**/1, 2, 3, 4, 5, 6, 7, 8, 9)},
[1] = {RV9(0,/**/ 2, 3, 4, 5, 6, 7, 8, 9)},
[2] = {RV9(0, 1,/**/3, 4, 5, 6, 7, 8, 9)},
[3] = {RV9(0, 1, 2,/**/4, 5, 6, 7, 8, 9)},
[4] = {RV9(0, 1, 2, 3,/**/5, 6, 7, 8, 9)},
[5] = {RV9(0, 1, 2, 3, 4,/**/6, 7, 8, 9)},
[6] = {RV9(0, 1, 2, 3, 4, 5,/**/7, 8, 9)},
/* [7] is intentionaly left absent to test invalid routes */
[8] = {RV9(0, 1, 2, 3, 4, 5, 6, 7,/**/9)},
[9] = {RV9(0, 1, 2, 3, 4, 5, 6, 7, 8/**/)},
/* some tests for needing extra muxes */
[B(NI_RGOUT0)] = {[B(rgout0_src0)] = V(0),
[B(rgout0_src1)] = V(1)},
[B(NI_RTSI_BRD(0))] = {[B(brd0_src0)] = V(0),
[B(brd0_src1)] = V(1)},
[B(NI_RTSI_BRD(1))] = {[B(brd1_src0)] = V(0),
[B(brd1_src1)] = V(1)},
[B(NI_RTSI_BRD(2))] = {[B(brd2_src0)] = V(0),
[B(brd2_src1)] = V(1)},
[B(NI_RTSI_BRD(3))] = {[B(brd3_src0)] = V(0),
[B(brd3_src1)] = V(1)},
};
#undef RV9
/* *** END fake board data *** */
/* *** BEGIN board data initializers *** */
static void init_private(void)
{
memset(&private, 0, sizeof(struct ni_private));
}
static void init_pci_6070e(void)
{
board.name = pci_6070e;
init_private();
private.is_m_series = 0;
}
static void init_pci_6220(void)
{
board.name = pci_6220;
init_private();
private.is_m_series = 1;
}
static void init_pci_fake(void)
{
board.name = pci_fake;
init_private();
private.routing_tables.route_values = &RV[0][0];
private.routing_tables.valid_routes = &DR;
}
/* *** END board data initializers *** */
/* Tests that route_sets are in order of the signal destination. */
static bool route_set_dests_in_order(const struct ni_device_routes *devroutes)
{
int i;
int last = NI_NAMES_BASE - 1;
for (i = 0; i < devroutes->n_route_sets; ++i) {
if (last >= devroutes->routes[i].dest)
return false;
last = devroutes->routes[i].dest;
}
return true;
}
/* Tests that all route_set->src are in order of the signal source. */
static bool route_set_sources_in_order(const struct ni_device_routes *devroutes)
{
int i;
for (i = 0; i < devroutes->n_route_sets; ++i) {
int j;
int last = NI_NAMES_BASE - 1;
for (j = 0; j < devroutes->routes[i].n_src; ++j) {
if (last >= devroutes->routes[i].src[j])
return false;
last = devroutes->routes[i].src[j];
}
}
return true;
}
static void test_ni_assign_device_routes(void)
{
const struct ni_device_routes *devroutes;
const u8 *table, *oldtable;
init_pci_6070e();
ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
&private.routing_tables);
devroutes = private.routing_tables.valid_routes;
table = private.routing_tables.route_values;
unittest(strncmp(devroutes->device, pci_6070e, 10) == 0,
"find device pci-6070e\n");
unittest(devroutes->n_route_sets == 37,
"number of pci-6070e route_sets == 37\n");
unittest(devroutes->routes->dest == NI_PFI(0),
"first pci-6070e route_set is for NI_PFI(0)\n");
unittest(devroutes->routes->n_src == 1,
"first pci-6070e route_set length == 1\n");
unittest(devroutes->routes->src[0] == NI_AI_StartTrigger,
"first pci-6070e route_set src. == NI_AI_StartTrigger\n");
unittest(devroutes->routes[10].dest == TRIGGER_LINE(0),
"10th pci-6070e route_set is for TRIGGER_LINE(0)\n");
unittest(devroutes->routes[10].n_src == 10,
"10th pci-6070e route_set length == 10\n");
unittest(devroutes->routes[10].src[0] == NI_CtrSource(0),
"10th pci-6070e route_set src. == NI_CtrSource(0)\n");
unittest(route_set_dests_in_order(devroutes),
"all pci-6070e route_sets in order of signal destination\n");
unittest(route_set_sources_in_order(devroutes),
"all pci-6070e route_set->src's in order of signal source\n");
unittest(RVI(table, B(PXI_Star), B(NI_AI_SampleClock)) == V(17) &&
RVI(table, B(NI_10MHzRefClock), B(TRIGGER_LINE(0))) == 0 &&
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(0))) == 0 &&
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(2))) == V(NI_PFI_OUTPUT_AI_CONVERT),
"pci-6070e finds e-series route_values table\n");
oldtable = table;
init_pci_6220();
ni_assign_device_routes(ni_mseries, pci_6220, NULL,
&private.routing_tables);
devroutes = private.routing_tables.valid_routes;
table = private.routing_tables.route_values;
unittest(strncmp(devroutes->device, pci_6220, 10) == 0,
"find device pci-6220\n");
unittest(oldtable != table, "pci-6220 find other route_values table\n");
unittest(RVI(table, B(PXI_Star), B(NI_AI_SampleClock)) == V(20) &&
RVI(table, B(NI_10MHzRefClock), B(TRIGGER_LINE(0))) == V(12) &&
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(0))) == V(3) &&
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(2))) == V(3),
"pci-6220 finds m-series route_values table\n");
}
static void test_ni_sort_device_routes(void)
{
/* We begin by sorting the device routes for use in later tests */
ni_sort_device_routes(&DR);
/* now we test that sorting. */
unittest(route_set_dests_in_order(&DR),
"all route_sets of fake data in order of sig. destination\n");
unittest(route_set_sources_in_order(&DR),
"all route_set->src's of fake data in order of sig. source\n");
}
static void test_ni_find_route_set(void)
{
unittest(!ni_find_route_set(bad_dest, &DR),
"check for nonexistent route_set\n");
unittest(ni_find_route_set(dest0, &DR) == &DR.routes[0],
"find first route_set\n");
unittest(ni_find_route_set(desti, &DR) == &DR.routes[ith_dest_index],
"find ith route_set\n");
unittest(ni_find_route_set(no_val_dest, &DR) ==
&DR.routes[no_val_index],
"find no_val route_set in spite of missing values\n");
unittest(ni_find_route_set(DR.routes[DR.n_route_sets - 1].dest, &DR) ==
&DR.routes[DR.n_route_sets - 1],
"find last route_set\n");
}
static void test_ni_route_set_has_source(void)
{
unittest(!ni_route_set_has_source(&DR.routes[0], O(0)),
"check for bad source\n");
unittest(ni_route_set_has_source(&DR.routes[0], O(1)),
"find first source\n");
unittest(ni_route_set_has_source(&DR.routes[0], O(5)),
"find fifth source\n");
unittest(ni_route_set_has_source(&DR.routes[0], O(9)),
"find last source\n");
}
static void test_ni_route_to_register(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(ni_route_to_register(O(0), O(0), T) < 0,
"check for bad route 0-->0\n");
unittest(ni_route_to_register(O(1), O(0), T) == 1,
"validate first destination\n");
unittest(ni_route_to_register(O(6), O(5), T) == 6,
"validate middle destination\n");
unittest(ni_route_to_register(O(8), O(9), T) == 8,
"validate last destination\n");
/* choice of trigger line in the following is somewhat random */
unittest(ni_route_to_register(rgout0_src0, TRIGGER_LINE(0), T) == 0,
"validate indirect route through rgout0 to TRIGGER_LINE(0)\n");
unittest(ni_route_to_register(rgout0_src0, TRIGGER_LINE(1), T) == 0,
"validate indirect route through rgout0 to TRIGGER_LINE(1)\n");
unittest(ni_route_to_register(rgout0_src1, TRIGGER_LINE(2), T) == 1,
"validate indirect route through rgout0 to TRIGGER_LINE(2)\n");
unittest(ni_route_to_register(rgout0_src1, TRIGGER_LINE(3), T) == 1,
"validate indirect route through rgout0 to TRIGGER_LINE(3)\n");
unittest(ni_route_to_register(brd0_src0, TRIGGER_LINE(4), T) ==
BIT(6),
"validate indirect route through brd0 to TRIGGER_LINE(4)\n");
unittest(ni_route_to_register(brd0_src1, TRIGGER_LINE(4), T) ==
BIT(6),
"validate indirect route through brd0 to TRIGGER_LINE(4)\n");
unittest(ni_route_to_register(brd1_src0, TRIGGER_LINE(3), T) ==
BIT(6),
"validate indirect route through brd1 to TRIGGER_LINE(3)\n");
unittest(ni_route_to_register(brd1_src1, TRIGGER_LINE(3), T) ==
BIT(6),
"validate indirect route through brd1 to TRIGGER_LINE(3)\n");
unittest(ni_route_to_register(brd2_src0, TRIGGER_LINE(2), T) ==
BIT(6),
"validate indirect route through brd2 to TRIGGER_LINE(2)\n");
unittest(ni_route_to_register(brd2_src1, TRIGGER_LINE(2), T) ==
BIT(6),
"validate indirect route through brd2 to TRIGGER_LINE(2)\n");
unittest(ni_route_to_register(brd3_src0, TRIGGER_LINE(1), T) ==
BIT(6),
"validate indirect route through brd3 to TRIGGER_LINE(1)\n");
unittest(ni_route_to_register(brd3_src1, TRIGGER_LINE(1), T) ==
BIT(6),
"validate indirect route through brd3 to TRIGGER_LINE(1)\n");
}
static void test_ni_lookup_route_register(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(ni_lookup_route_register(O(0), O(0), T) == -EINVAL,
"check for bad route 0-->0\n");
unittest(ni_lookup_route_register(O(1), O(0), T) == 1,
"validate first destination\n");
unittest(ni_lookup_route_register(O(6), O(5), T) == 6,
"validate middle destination\n");
unittest(ni_lookup_route_register(O(8), O(9), T) == 8,
"validate last destination\n");
unittest(ni_lookup_route_register(O(10), O(9), T) == -EINVAL,
"lookup invalid destination\n");
unittest(ni_lookup_route_register(rgout0_src0, TRIGGER_LINE(0), T) ==
-EINVAL,
"rgout0_src0: no direct lookup of indirect route\n");
unittest(ni_lookup_route_register(rgout0_src0, NI_RGOUT0, T) == 0,
"rgout0_src0: lookup indirect route register\n");
unittest(ni_lookup_route_register(rgout0_src1, TRIGGER_LINE(2), T) ==
-EINVAL,
"rgout0_src1: no direct lookup of indirect route\n");
unittest(ni_lookup_route_register(rgout0_src1, NI_RGOUT0, T) == 1,
"rgout0_src1: lookup indirect route register\n");
unittest(ni_lookup_route_register(brd0_src0, TRIGGER_LINE(4), T) ==
-EINVAL,
"brd0_src0: no direct lookup of indirect route\n");
unittest(ni_lookup_route_register(brd0_src0, NI_RTSI_BRD(0), T) == 0,
"brd0_src0: lookup indirect route register\n");
unittest(ni_lookup_route_register(brd0_src1, TRIGGER_LINE(4), T) ==
-EINVAL,
"brd0_src1: no direct lookup of indirect route\n");
unittest(ni_lookup_route_register(brd0_src1, NI_RTSI_BRD(0), T) == 1,
"brd0_src1: lookup indirect route register\n");
}
static void test_route_is_valid(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(!route_is_valid(O(0), O(0), T),
"check for bad route 0-->0\n");
unittest(route_is_valid(O(0), O(1), T),
"validate first destination\n");
unittest(route_is_valid(O(5), O(6), T),
"validate middle destination\n");
unittest(route_is_valid(O(8), O(9), T),
"validate last destination\n");
}
static void test_ni_is_cmd_dest(void)
{
init_pci_fake();
unittest(ni_is_cmd_dest(NI_AI_SampleClock),
"check that AI/SampleClock is cmd destination\n");
unittest(ni_is_cmd_dest(NI_AI_StartTrigger),
"check that AI/StartTrigger is cmd destination\n");
unittest(ni_is_cmd_dest(NI_AI_ConvertClock),
"check that AI/ConvertClock is cmd destination\n");
unittest(ni_is_cmd_dest(NI_AO_SampleClock),
"check that AO/SampleClock is cmd destination\n");
unittest(ni_is_cmd_dest(NI_DO_SampleClock),
"check that DO/SampleClock is cmd destination\n");
unittest(!ni_is_cmd_dest(NI_AO_SampleClockTimebase),
"check that AO/SampleClockTimebase _not_ cmd destination\n");
}
static void test_channel_is_pfi(void)
{
init_pci_fake();
unittest(channel_is_pfi(NI_PFI(0)), "check First pfi channel\n");
unittest(channel_is_pfi(NI_PFI(10)), "check 10th pfi channel\n");
unittest(channel_is_pfi(NI_PFI(-1)), "check last pfi channel\n");
unittest(!channel_is_pfi(NI_PFI(-1) + 1),
"check first non pfi channel\n");
}
static void test_channel_is_rtsi(void)
{
init_pci_fake();
unittest(channel_is_rtsi(TRIGGER_LINE(0)),
"check First rtsi channel\n");
unittest(channel_is_rtsi(TRIGGER_LINE(3)),
"check 3rd rtsi channel\n");
unittest(channel_is_rtsi(TRIGGER_LINE(-1)),
"check last rtsi channel\n");
unittest(!channel_is_rtsi(TRIGGER_LINE(-1) + 1),
"check first non rtsi channel\n");
}
static void test_ni_count_valid_routes(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(ni_count_valid_routes(T) == 57, "count all valid routes\n");
}
static void test_ni_get_valid_routes(void)
{
const struct ni_route_tables *T = &private.routing_tables;
unsigned int pair_data[2];
init_pci_fake();
unittest(ni_get_valid_routes(T, 0, NULL) == 57,
"count all valid routes through ni_get_valid_routes\n");
unittest(ni_get_valid_routes(T, 1, pair_data) == 1,
"copied first valid route from ni_get_valid_routes\n");
unittest(pair_data[0] == O(1),
"source of first valid pair from ni_get_valid_routes\n");
unittest(pair_data[1] == O(0),
"destination of first valid pair from ni_get_valid_routes\n");
}
static void test_ni_find_route_source(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(ni_find_route_source(4, O(4), T) == -EINVAL,
"check for bad source 4-->4\n");
unittest(ni_find_route_source(0, O(1), T) == O(0),
"find first source\n");
unittest(ni_find_route_source(4, O(6), T) == O(4),
"find middle source\n");
unittest(ni_find_route_source(9, O(8), T) == O(9),
"find last source");
unittest(ni_find_route_source(8, O(9), T) == O(8),
"find invalid source (without checking device routes)\n");
}
static void test_route_register_is_valid(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(!route_register_is_valid(4, O(4), T),
"check for bad source 4-->4\n");
unittest(route_register_is_valid(0, O(1), T),
"find first source\n");
unittest(route_register_is_valid(4, O(6), T),
"find middle source\n");
unittest(route_register_is_valid(9, O(8), T),
"find last source");
}
static void test_ni_check_trigger_arg(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(ni_check_trigger_arg(0, O(0), T) == -EINVAL,
"check bad direct trigger arg for first reg->dest\n");
unittest(ni_check_trigger_arg(0, O(1), T) == 0,
"check direct trigger arg for first reg->dest\n");
unittest(ni_check_trigger_arg(4, O(6), T) == 0,
"check direct trigger arg for middle reg->dest\n");
unittest(ni_check_trigger_arg(9, O(8), T) == 0,
"check direct trigger arg for last reg->dest\n");
unittest(ni_check_trigger_arg_roffs(-1, O(0), T, 1) == -EINVAL,
"check bad direct trigger arg for first reg->dest w/offs\n");
unittest(ni_check_trigger_arg_roffs(0, O(1), T, 0) == 0,
"check direct trigger arg for first reg->dest w/offs\n");
unittest(ni_check_trigger_arg_roffs(3, O(6), T, 1) == 0,
"check direct trigger arg for middle reg->dest w/offs\n");
unittest(ni_check_trigger_arg_roffs(7, O(8), T, 2) == 0,
"check direct trigger arg for last reg->dest w/offs\n");
unittest(ni_check_trigger_arg(O(0), O(0), T) == -EINVAL,
"check bad trigger arg for first src->dest\n");
unittest(ni_check_trigger_arg(O(0), O(1), T) == 0,
"check trigger arg for first src->dest\n");
unittest(ni_check_trigger_arg(O(5), O(6), T) == 0,
"check trigger arg for middle src->dest\n");
unittest(ni_check_trigger_arg(O(8), O(9), T) == 0,
"check trigger arg for last src->dest\n");
}
static void test_ni_get_reg_value(void)
{
const struct ni_route_tables *T = &private.routing_tables;
init_pci_fake();
unittest(ni_get_reg_value(0, O(0), T) == -1,
"check bad direct trigger arg for first reg->dest\n");
unittest(ni_get_reg_value(0, O(1), T) == 0,
"check direct trigger arg for first reg->dest\n");
unittest(ni_get_reg_value(4, O(6), T) == 4,
"check direct trigger arg for middle reg->dest\n");
unittest(ni_get_reg_value(9, O(8), T) == 9,
"check direct trigger arg for last reg->dest\n");
unittest(ni_get_reg_value_roffs(-1, O(0), T, 1) == -1,
"check bad direct trigger arg for first reg->dest w/offs\n");
unittest(ni_get_reg_value_roffs(0, O(1), T, 0) == 0,
"check direct trigger arg for first reg->dest w/offs\n");
unittest(ni_get_reg_value_roffs(3, O(6), T, 1) == 4,
"check direct trigger arg for middle reg->dest w/offs\n");
unittest(ni_get_reg_value_roffs(7, O(8), T, 2) == 9,
"check direct trigger arg for last reg->dest w/offs\n");
unittest(ni_get_reg_value(O(0), O(0), T) == -1,
"check bad trigger arg for first src->dest\n");
unittest(ni_get_reg_value(O(0), O(1), T) == 0,
"check trigger arg for first src->dest\n");
unittest(ni_get_reg_value(O(5), O(6), T) == 5,
"check trigger arg for middle src->dest\n");
unittest(ni_get_reg_value(O(8), O(9), T) == 8,
"check trigger arg for last src->dest\n");
}
/* **** BEGIN simple module entry/exit functions **** */
static int __init ni_routes_unittest(void)
{
static const unittest_fptr unit_tests[] = {
test_ni_assign_device_routes,
test_ni_sort_device_routes,
test_ni_find_route_set,
test_ni_route_set_has_source,
test_ni_route_to_register,
test_ni_lookup_route_register,
test_route_is_valid,
test_ni_is_cmd_dest,
test_channel_is_pfi,
test_channel_is_rtsi,
test_ni_count_valid_routes,
test_ni_get_valid_routes,
test_ni_find_route_source,
test_route_register_is_valid,
test_ni_check_trigger_arg,
test_ni_get_reg_value,
NULL,
};
exec_unittests("ni_routes", unit_tests);
return 0;
}
static void __exit ni_routes_unittest_exit(void) { }
module_init(ni_routes_unittest);
module_exit(ni_routes_unittest_exit);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi unit-tests for ni_routes module");
MODULE_LICENSE("GPL");
/* **** END simple module entry/exit functions **** */
| linux-master | drivers/comedi/drivers/tests/ni_routes_test.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/tests/comedi_example_test.c
* Example set of unit tests.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include "unittest.h"
/* *** BEGIN fake board data *** */
struct comedi_device {
const char *board_name;
int item;
};
static struct comedi_device dev = {
.board_name = "fake_device",
};
/* *** END fake board data *** */
/* *** BEGIN fake data init *** */
static void init_fake(void)
{
dev.item = 10;
}
/* *** END fake data init *** */
static void test0(void)
{
init_fake();
unittest(dev.item != 11, "negative result\n");
unittest(dev.item == 10, "positive result\n");
}
/* **** BEGIN simple module entry/exit functions **** */
static int __init unittest_enter(void)
{
static const unittest_fptr unit_tests[] = {
test0,
NULL,
};
exec_unittests("example", unit_tests);
return 0;
}
static void __exit unittest_exit(void) { }
module_init(unittest_enter);
module_exit(unittest_exit);
MODULE_AUTHOR("Spencer Olson <[email protected]>");
MODULE_DESCRIPTION("Comedi unit-tests example");
MODULE_LICENSE("GPL");
/* **** END simple module entry/exit functions **** */
| linux-master | drivers/comedi/drivers/tests/comedi_example_test.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_route_values.c
* Route information for NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file includes the tables that are a list of all the values of various
* signals routes available on NI hardware. In many cases, one does not
* explicitly make these routes, rather one might indicate that something is
* used as the source of one particular trigger or another (using
* *_src=TRIG_EXT).
*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "ni_route_values.h"
#include "ni_route_values/all.h"
const struct family_route_values *const ni_all_route_values[] = {
&ni_660x_route_values,
&ni_eseries_route_values,
&ni_mseries_route_values,
NULL,
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_route_values.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "ni_device_routes.h"
#include "ni_device_routes/all.h"
struct ni_device_routes *const ni_device_routes_list[] = {
&ni_pxi_6030e_device_routes,
&ni_pci_6070e_device_routes,
&ni_pci_6220_device_routes,
&ni_pci_6221_device_routes,
&ni_pxi_6224_device_routes,
&ni_pxi_6225_device_routes,
&ni_pci_6229_device_routes,
&ni_pci_6251_device_routes,
&ni_pxi_6251_device_routes,
&ni_pxie_6251_device_routes,
&ni_pci_6254_device_routes,
&ni_pci_6259_device_routes,
&ni_pci_6534_device_routes,
&ni_pci_6602_device_routes,
&ni_pci_6713_device_routes,
&ni_pci_6723_device_routes,
&ni_pci_6733_device_routes,
&ni_pxi_6733_device_routes,
NULL,
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6070e_device_routes = {
.device = "pci-6070e",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
NI_AI_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
NI_AI_ConvertClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
NI_AI_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrOut(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(1),
.src = (int[]){
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_AI_SampleClockTimebase,
NI_MasterTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_HoldComplete,
.src = (int[]){
NI_AI_HoldCompleteEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxie_6738_device_routes = {
.device = "pxie-6738",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrZ(0),
NI_CtrZ(1),
NI_CtrZ(2),
NI_CtrZ(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
PXI_Clk10,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
PXI_Clk10,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
PXI_Clk10,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
PXI_Clk10,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSampleClock(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSampleClock(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSampleClock(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSampleClock(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClockTimebase,
NI_DI_SampleClock,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_DI_SampleClockTimebase,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_DI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_DO_SampleClockTimebase,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_DO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrArmStartTrigger(0),
NI_CtrArmStartTrigger(1),
NI_CtrArmStartTrigger(2),
NI_CtrArmStartTrigger(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrSampleClock(0),
NI_CtrSampleClock(1),
NI_CtrSampleClock(2),
NI_CtrSampleClock(3),
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_PauseTrigger,
NI_10MHzRefClock,
NI_ChangeDetectionEvent,
NI_WatchdogExpiredEvent,
0, /* Termination */
}
},
{
.dest = NI_WatchdogExpirationTrigger,
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6251.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6251_device_routes = {
.device = "pci-6251",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6251.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6534.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6534_device_routes = {
.device = "pci-6534",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(0),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6534.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6220.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6220_device_routes = {
.device = "pci-6220",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6220.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6602.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6602_device_routes = {
.device = "pci-6602",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(2),
.src = (int[]){
NI_80MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_80MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_PFI(7),
NI_PFI(15),
NI_PFI(23),
NI_PFI(31),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_PFI(7),
NI_PFI(15),
NI_PFI(23),
NI_PFI(31),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
NI_CtrGate(7),
NI_LogicLow,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
NI_CtrSource(7),
NI_LogicLow,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
NI_PFI(6),
NI_PFI(14),
NI_PFI(22),
NI_PFI(30),
NI_PFI(38),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
NI_PFI(6),
NI_PFI(14),
NI_PFI(22),
NI_PFI(30),
NI_PFI(38),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
NI_CtrGate(6),
NI_LogicLow,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
NI_CtrSource(6),
NI_LogicLow,
0, /* Termination */
}
},
{
.dest = NI_PFI(16),
.src = (int[]){
NI_PFI(5),
NI_PFI(13),
NI_PFI(21),
NI_PFI(29),
NI_PFI(37),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(17),
.src = (int[]){
NI_PFI(5),
NI_PFI(13),
NI_PFI(21),
NI_PFI(29),
NI_PFI(37),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(18),
.src = (int[]){
NI_CtrGate(5),
NI_LogicLow,
0, /* Termination */
}
},
{
.dest = NI_PFI(19),
.src = (int[]){
NI_CtrSource(5),
0, /* Termination */
}
},
{
.dest = NI_PFI(20),
.src = (int[]){
NI_PFI(4),
NI_PFI(12),
NI_PFI(28),
NI_PFI(36),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(21),
.src = (int[]){
NI_PFI(4),
NI_PFI(12),
NI_PFI(20),
NI_PFI(28),
NI_PFI(36),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(22),
.src = (int[]){
NI_CtrGate(4),
0, /* Termination */
}
},
{
.dest = NI_PFI(23),
.src = (int[]){
NI_CtrSource(4),
NI_LogicLow,
0, /* Termination */
}
},
{
.dest = NI_PFI(24),
.src = (int[]){
NI_PFI(3),
NI_PFI(11),
NI_PFI(19),
NI_PFI(27),
NI_PFI(35),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(3),
NI_CtrSource(7),
NI_CtrGate(3),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(25),
.src = (int[]){
NI_PFI(3),
NI_PFI(11),
NI_PFI(19),
NI_PFI(27),
NI_PFI(35),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(3),
NI_CtrSource(7),
NI_CtrGate(3),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(26),
.src = (int[]){
NI_CtrGate(3),
0, /* Termination */
}
},
{
.dest = NI_PFI(27),
.src = (int[]){
NI_CtrSource(3),
0, /* Termination */
}
},
{
.dest = NI_PFI(28),
.src = (int[]){
NI_PFI(2),
NI_PFI(10),
NI_PFI(18),
NI_PFI(26),
NI_PFI(34),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(2),
NI_CtrSource(6),
NI_CtrGate(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(29),
.src = (int[]){
NI_PFI(2),
NI_PFI(10),
NI_PFI(18),
NI_PFI(26),
NI_PFI(34),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(2),
NI_CtrSource(6),
NI_CtrGate(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(30),
.src = (int[]){
NI_CtrGate(2),
0, /* Termination */
}
},
{
.dest = NI_PFI(31),
.src = (int[]){
NI_CtrSource(2),
0, /* Termination */
}
},
{
.dest = NI_PFI(32),
.src = (int[]){
NI_PFI(1),
NI_PFI(9),
NI_PFI(17),
NI_PFI(25),
NI_PFI(33),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(5),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(33),
.src = (int[]){
NI_PFI(1),
NI_PFI(9),
NI_PFI(17),
NI_PFI(25),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(5),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(34),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(35),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(36),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(5),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(37),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(5),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_PFI(38),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(39),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(3),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(7),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(2),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(2),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(2),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(3),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(3),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(3),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrSource(2),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrGate(2),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_CtrInternalOutput(2),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(6),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(6),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(6),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(7),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(7),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(7),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
NI_PFI(16),
NI_PFI(17),
NI_PFI(18),
NI_PFI(19),
NI_PFI(20),
NI_PFI(21),
NI_PFI(22),
NI_PFI(23),
NI_PFI(24),
NI_PFI(25),
NI_PFI(26),
NI_PFI(27),
NI_PFI(28),
NI_PFI(29),
NI_PFI(30),
NI_PFI(31),
NI_PFI(32),
NI_PFI(33),
NI_PFI(34),
NI_PFI(35),
NI_PFI(36),
NI_PFI(37),
NI_PFI(38),
NI_PFI(39),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(4),
NI_CtrSource(5),
NI_CtrSource(6),
NI_CtrGate(4),
NI_CtrGate(5),
NI_CtrGate(6),
NI_CtrInternalOutput(4),
NI_CtrInternalOutput(5),
NI_CtrInternalOutput(6),
NI_LogicLow,
NI_LogicHigh,
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6602.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6723.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6723_device_routes = {
.device = "pci-6723",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(3),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(1),
.src = (int[]){
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6723.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxie_6535_device_routes = {
.device = "pxie-6535",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_DI_SampleClock,
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_DI_StartTrigger,
NI_DI_ReferenceTrigger,
NI_DI_InputBufferFull,
NI_DI_ReadyForStartEvent,
NI_DI_ReadyForTransferEventBurst,
NI_DI_ReadyForTransferEventPipelined,
NI_DO_SampleClock,
NI_DO_StartTrigger,
NI_DO_OutputBufferFull,
NI_DO_DataActiveEvent,
NI_DO_ReadyForStartEvent,
NI_DO_ReadyForTransferEvent,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(5),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_DI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_DI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_DI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(4),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_DO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_DO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6733.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6733_device_routes = {
.device = "pci-6733",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(3),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(1),
.src = (int[]){
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6733.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6229.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6229_device_routes = {
.device = "pci-6229",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AI_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6229.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxi_6225_device_routes = {
.device = "pxi-6225",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxi_6251_device_routes = {
.device = "pxi-6251",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
PXI_Star,
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
PXI_Star,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
PXI_Star,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
PXI_Star,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Star,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6259.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6259_device_routes = {
.device = "pci-6259",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6259.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxie_6251_device_routes = {
.device = "pxie-6251",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6713.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6713_device_routes = {
.device = "pci-6713",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(3),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(1),
.src = (int[]){
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6713.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxi_6224_device_routes = {
.device = "pxi-6224",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
PXI_Clk10,
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
PXI_Clk10,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6254.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6254_device_routes = {
.device = "pci-6254",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6254.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxi_6030e_device_routes = {
.device = "pxi-6030e",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
NI_AI_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
NI_AI_ConvertClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
NI_AI_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrOut(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
0, /* Termination */
}
},
{
.dest = NI_CtrOut(1),
.src = (int[]){
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
NI_AI_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
NI_AI_ConvertClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_AI_SampleClockTimebase,
NI_MasterTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AI_HoldComplete,
.src = (int[]){
NI_AI_HoldCompleteEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_AI_StartTrigger,
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_AnalogComparisonEvent,
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6221.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pci_6221_device_routes = {
.device = "pci-6221",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(1),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(2),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(3),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(7),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(10),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(11),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(12),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(13),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(14),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_PFI(15),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_DI_SampleClock,
NI_DO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(6),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_CtrSource(0),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
NI_AI_ConvertClock,
NI_AI_PauseTrigger,
NI_AO_SampleClock,
NI_AO_StartTrigger,
NI_AO_PauseTrigger,
NI_10MHzRefClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(1),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrGate(0),
NI_20MHzTimebase,
NI_80MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(1),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrAux(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrGate(1),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrA(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrA(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrB(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrB(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrZ(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrZ(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(1),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_CtrArmStartTrigger(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_AI_StartTrigger,
NI_AI_ReferenceTrigger,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = NI_AI_ReferenceTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_ConvertClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_ConvertClockTimebase,
.src = (int[]){
NI_AI_SampleClockTimebase,
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AI_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AO_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_AI_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
NI_PFI(10),
NI_PFI(11),
NI_PFI(12),
NI_PFI(13),
NI_PFI(14),
NI_PFI(15),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(6),
TRIGGER_LINE(7),
NI_CtrInternalOutput(0),
NI_CtrInternalOutput(1),
NI_AI_SampleClock,
NI_AI_ConvertClock,
NI_AO_SampleClock,
NI_FrequencyOutput,
NI_ChangeDetectionEvent,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6221.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "all.h"
struct ni_device_routes ni_pxi_6733_device_routes = {
.device = "pxi-6733",
.routes = (struct ni_route_set[]){
{
.dest = NI_PFI(3),
.src = (int[]){
NI_CtrSource(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(4),
.src = (int[]){
NI_CtrGate(1),
0, /* Termination */
}
},
{
.dest = NI_PFI(5),
.src = (int[]){
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_PFI(6),
.src = (int[]){
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_PFI(8),
.src = (int[]){
NI_CtrSource(0),
0, /* Termination */
}
},
{
.dest = NI_PFI(9),
.src = (int[]){
NI_CtrGate(0),
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(0),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(1),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(2),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(3),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(4),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(5),
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = TRIGGER_LINE(7),
.src = (int[]){
NI_20MHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
PXI_Star,
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrSource(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
PXI_Star,
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(0),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(1),
PXI_Star,
0, /* Termination */
}
},
{
.dest = NI_CtrGate(1),
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
PXI_Star,
0, /* Termination */
}
},
{
.dest = NI_CtrOut(0),
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(0),
PXI_Star,
0, /* Termination */
}
},
{
.dest = NI_CtrOut(1),
.src = (int[]){
NI_CtrInternalOutput(1),
0, /* Termination */
}
},
{
.dest = PXI_Star,
.src = (int[]){
NI_CtrSource(0),
NI_CtrGate(0),
NI_CtrInternalOutput(0),
NI_CtrOut(0),
NI_AO_SampleClock,
NI_AO_StartTrigger,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClock,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
NI_CtrInternalOutput(1),
PXI_Star,
NI_AO_SampleClockTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_SampleClockTimebase,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
TRIGGER_LINE(7),
PXI_Star,
NI_MasterTimebase,
NI_20MHzTimebase,
NI_100kHzTimebase,
0, /* Termination */
}
},
{
.dest = NI_AO_StartTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
PXI_Star,
0, /* Termination */
}
},
{
.dest = NI_AO_PauseTrigger,
.src = (int[]){
NI_PFI(0),
NI_PFI(1),
NI_PFI(2),
NI_PFI(3),
NI_PFI(4),
NI_PFI(5),
NI_PFI(6),
NI_PFI(7),
NI_PFI(8),
NI_PFI(9),
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
PXI_Star,
0, /* Termination */
}
},
{
.dest = NI_DI_SampleClock,
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
PXI_Star,
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_DO_SampleClock,
.src = (int[]){
TRIGGER_LINE(0),
TRIGGER_LINE(1),
TRIGGER_LINE(2),
TRIGGER_LINE(3),
TRIGGER_LINE(4),
TRIGGER_LINE(5),
PXI_Star,
NI_AO_SampleClock,
0, /* Termination */
}
},
{
.dest = NI_MasterTimebase,
.src = (int[]){
TRIGGER_LINE(7),
NI_20MHzTimebase,
0, /* Termination */
}
},
{ /* Termination of list */
.dest = 0,
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c |
// SPDX-License-Identifier: GPL-2.0+
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <errno.h>
#include <stdlib.h>
typedef uint8_t u8;
typedef uint16_t u16;
typedef int8_t s8;
#define __user
#define BIT(x) (1UL << (x))
#define NI_ROUTE_VALUE_EXTERNAL_CONVERSION 1
#include "../ni_route_values.c"
#include "../ni_device_routes.c"
#include "all_cfiles.c"
#include <stdio.h>
#define RVij(rv, src, dest) ((rv)->register_values[(dest)][(src)])
/*
* write out
* {
* "family" : "<family-name>",
* "register_values": {
* <destination0>:[src0, src1, ...],
* <destination0>:[src0, src1, ...],
* ...
* }
* }
*/
void family_write(const struct family_route_values *rv, FILE *fp)
{
fprintf(fp,
" \"%s\" : {\n"
" # dest -> {src0:val0, src1:val1, ...}\n"
, rv->family);
for (unsigned int dest = NI_NAMES_BASE;
dest < (NI_NAMES_BASE + NI_NUM_NAMES);
++dest) {
unsigned int src = NI_NAMES_BASE;
for (; src < (NI_NAMES_BASE + NI_NUM_NAMES) &&
RVij(rv, B(src), B(dest)) == 0; ++src)
;
if (src >= (NI_NAMES_BASE + NI_NUM_NAMES))
continue; /* no data here */
fprintf(fp, " %u : {\n", dest);
for (src = NI_NAMES_BASE; src < (NI_NAMES_BASE + NI_NUM_NAMES);
++src) {
register_type r = RVij(rv, B(src), B(dest));
const char *M;
if (r == 0) {
continue;
} else if (MARKED_V(r)) {
M = "V";
} else if (MARKED_I(r)) {
M = "I";
} else if (MARKED_U(r)) {
M = "U";
} else {
fprintf(stderr,
"Invalid register marking %s[%u][%u] = %u\n",
rv->family, dest, src, r);
exit(1);
}
fprintf(fp, " %u : \"%s(%u)\",\n",
src, M, UNMARK(r));
}
fprintf(fp, " },\n");
}
fprintf(fp, " },\n\n");
}
bool is_valid_ni_sig(unsigned int sig)
{
return (sig >= NI_NAMES_BASE) && (sig < (NI_NAMES_BASE + NI_NUM_NAMES));
}
/*
* write out
* {
* "family" : "<family-name>",
* "register_values": {
* <destination0>:[src0, src1, ...],
* <destination0>:[src0, src1, ...],
* ...
* }
* }
*/
void device_write(const struct ni_device_routes *dR, FILE *fp)
{
fprintf(fp,
" \"%s\" : {\n"
" # dest -> [src0, src1, ...]\n"
, dR->device);
unsigned int i = 0;
while (dR->routes[i].dest != 0) {
if (!is_valid_ni_sig(dR->routes[i].dest)) {
fprintf(stderr,
"Invalid NI signal value [%u] for destination %s.[%u]\n",
dR->routes[i].dest, dR->device, i);
exit(1);
}
fprintf(fp, " %u : [", dR->routes[i].dest);
unsigned int j = 0;
while (dR->routes[i].src[j] != 0) {
if (!is_valid_ni_sig(dR->routes[i].src[j])) {
fprintf(stderr,
"Invalid NI signal value [%u] for source %s.[%u].[%u]\n",
dR->routes[i].src[j], dR->device, i, j);
exit(1);
}
fprintf(fp, "%u,", dR->routes[i].src[j]);
++j;
}
fprintf(fp, "],\n");
++i;
}
fprintf(fp, " },\n\n");
}
int main(void)
{
FILE *fp = fopen("ni_values.py", "w");
/* write route register values */
fprintf(fp, "ni_route_values = {\n");
for (int i = 0; ni_all_route_values[i]; ++i)
family_write(ni_all_route_values[i], fp);
fprintf(fp, "}\n\n");
/* write valid device routes */
fprintf(fp, "ni_device_routes = {\n");
for (int i = 0; ni_device_routes_list[i]; ++i)
device_write(ni_device_routes_list[i], fp);
fprintf(fp, "}\n");
/* finish; close file */
fclose(fp);
return 0;
}
| linux-master | drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_route_values/ni_eseries.c
* Route information for NI_ESERIES boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file includes a list of all the values of various signals routes
* available on NI 660x hardware. In many cases, one does not explicitly make
* these routes, rather one might indicate that something is used as the source
* of one particular trigger or another (using *_src=TRIG_EXT).
*
* The contents of this file can be generated using the tools in
* comedi/drivers/ni_routing/tools. This file also contains specific notes to
* this family of devices.
*
* Please use those tools to help maintain the contents of this file, but be
* mindful to not lose the notes already made in this file, since these notes
* are critical to a complete undertsanding of the register values of this
* family.
*/
#include "../ni_route_values.h"
#include "all.h"
/*
* Note that for e-series devices, the backplane TRIGGER_LINE(6) is generally
* not connected to RTSI(6).
*/
const struct family_route_values ni_eseries_route_values = {
.family = "ni_eseries",
.register_values = {
/*
* destination = {
* source = register value,
* ...
* }
*/
[B(NI_PFI(0))] = {
[B(NI_AI_StartTrigger)] = I(NI_PFI_OUTPUT_AI_START1),
},
[B(NI_PFI(1))] = {
[B(NI_AI_ReferenceTrigger)] = I(NI_PFI_OUTPUT_AI_START2),
},
[B(NI_PFI(2))] = {
[B(NI_AI_ConvertClock)] = I(NI_PFI_OUTPUT_AI_CONVERT),
},
[B(NI_PFI(3))] = {
[B(NI_CtrSource(1))] = I(NI_PFI_OUTPUT_G_SRC1),
},
[B(NI_PFI(4))] = {
[B(NI_CtrGate(1))] = I(NI_PFI_OUTPUT_G_GATE1),
},
[B(NI_PFI(5))] = {
[B(NI_AO_SampleClock)] = I(NI_PFI_OUTPUT_AO_UPDATE_N),
},
[B(NI_PFI(6))] = {
[B(NI_AO_StartTrigger)] = I(NI_PFI_OUTPUT_AO_START1),
},
[B(NI_PFI(7))] = {
[B(NI_AI_SampleClock)] = I(NI_PFI_OUTPUT_AI_START_PULSE),
},
[B(NI_PFI(8))] = {
[B(NI_CtrSource(0))] = I(NI_PFI_OUTPUT_G_SRC0),
},
[B(NI_PFI(9))] = {
[B(NI_CtrGate(0))] = I(NI_PFI_OUTPUT_G_GATE0),
},
[B(TRIGGER_LINE(0))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(1))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(2))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(3))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(4))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(5))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(6))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(7))] = {
[B(NI_20MHzTimebase)] = I(NI_RTSI_OUTPUT_RTSI_OSC),
},
[B(NI_RTSI_BRD(0))] = {
[B(TRIGGER_LINE(0))] = I(0),
[B(TRIGGER_LINE(1))] = I(1),
[B(TRIGGER_LINE(2))] = I(2),
[B(TRIGGER_LINE(3))] = I(3),
[B(TRIGGER_LINE(4))] = I(4),
[B(TRIGGER_LINE(5))] = I(5),
[B(TRIGGER_LINE(6))] = I(6),
[B(PXI_Star)] = I(6),
[B(NI_AI_STOP)] = I(7),
},
[B(NI_RTSI_BRD(1))] = {
[B(TRIGGER_LINE(0))] = I(0),
[B(TRIGGER_LINE(1))] = I(1),
[B(TRIGGER_LINE(2))] = I(2),
[B(TRIGGER_LINE(3))] = I(3),
[B(TRIGGER_LINE(4))] = I(4),
[B(TRIGGER_LINE(5))] = I(5),
[B(TRIGGER_LINE(6))] = I(6),
[B(PXI_Star)] = I(6),
[B(NI_AI_STOP)] = I(7),
},
[B(NI_RTSI_BRD(2))] = {
[B(TRIGGER_LINE(0))] = I(0),
[B(TRIGGER_LINE(1))] = I(1),
[B(TRIGGER_LINE(2))] = I(2),
[B(TRIGGER_LINE(3))] = I(3),
[B(TRIGGER_LINE(4))] = I(4),
[B(TRIGGER_LINE(5))] = I(5),
[B(TRIGGER_LINE(6))] = I(6),
[B(PXI_Star)] = I(6),
[B(NI_AI_SampleClock)] = I(7),
},
[B(NI_RTSI_BRD(3))] = {
[B(TRIGGER_LINE(0))] = I(0),
[B(TRIGGER_LINE(1))] = I(1),
[B(TRIGGER_LINE(2))] = I(2),
[B(TRIGGER_LINE(3))] = I(3),
[B(TRIGGER_LINE(4))] = I(4),
[B(TRIGGER_LINE(5))] = I(5),
[B(TRIGGER_LINE(6))] = I(6),
[B(PXI_Star)] = I(6),
[B(NI_AI_SampleClock)] = I(7),
},
[B(NI_CtrSource(0))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrInternalOutput(1))] = U(19),
[B(PXI_Star)] = U(17),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(1))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrInternalOutput(0))] = U(19),
[B(PXI_Star)] = U(17),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrGate(0))] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrInternalOutput(1))] = I(20),
[B(PXI_Star)] = I(17),
[B(NI_AI_StartTrigger)] = I(21),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrGate(1))] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrInternalOutput(0))] = I(20),
[B(PXI_Star)] = I(17),
[B(NI_AI_StartTrigger)] = I(21),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrOut(0))] = {
[B(TRIGGER_LINE(0))] = I(1),
[B(TRIGGER_LINE(1))] = I(2),
[B(TRIGGER_LINE(2))] = I(3),
[B(TRIGGER_LINE(3))] = I(4),
[B(TRIGGER_LINE(4))] = I(5),
[B(TRIGGER_LINE(5))] = I(6),
[B(TRIGGER_LINE(6))] = I(7),
[B(NI_CtrInternalOutput(0))] = I(0),
[B(PXI_Star)] = I(7),
},
[B(NI_CtrOut(1))] = {
[B(NI_CtrInternalOutput(1))] = I(0),
},
[B(NI_AI_SampleClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrInternalOutput(0))] = I(19),
[B(PXI_Star)] = I(17),
[B(NI_AI_SampleClockTimebase)] = I(0),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AI_SampleClockTimebase)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(PXI_Star)] = U(17),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_100kHzTimebase)] = U(19),
[B(NI_LogicLow)] = U(31),
},
[B(NI_AI_StartTrigger)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrInternalOutput(0))] = I(18),
[B(PXI_Star)] = I(17),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AI_ReferenceTrigger)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(PXI_Star)] = U(17),
[B(NI_LogicLow)] = U(31),
},
[B(NI_AI_ConvertClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrInternalOutput(0))] = I(19),
[B(PXI_Star)] = I(17),
[B(NI_AI_ConvertClockTimebase)] = I(0),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AI_ConvertClockTimebase)] = {
/* These are not currently implemented in ni modules */
[B(NI_AI_SampleClockTimebase)] = U(0),
[B(NI_20MHzTimebase)] = U(1),
},
[B(NI_AI_PauseTrigger)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(PXI_Star)] = U(17),
[B(NI_LogicLow)] = U(31),
},
[B(NI_AO_SampleClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrInternalOutput(1))] = I(19),
[B(PXI_Star)] = I(17),
[B(NI_AO_SampleClockTimebase)] = I(0),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AO_SampleClockTimebase)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(PXI_Star)] = U(17),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_100kHzTimebase)] = U(19),
[B(NI_LogicLow)] = U(31),
},
[B(NI_AO_StartTrigger)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(PXI_Star)] = I(17),
/*
* for the signal route
* (NI_AI_StartTrigger->NI_AO_StartTrigger), MHDDK says
* used register value 18 and DAQ-STC says 19.
* Hoping that the MHDDK is correct--being a "working"
* example.
*/
[B(NI_AI_StartTrigger)] = I(18),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AO_PauseTrigger)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(PXI_Star)] = U(17),
[B(NI_LogicLow)] = U(31),
},
[B(NI_MasterTimebase)] = {
/* These are not currently implemented in ni modules */
[B(TRIGGER_LINE(7))] = U(1),
[B(PXI_Star)] = U(2),
[B(PXI_Clk10)] = U(3),
[B(NI_10MHzRefClock)] = U(0),
},
/*
* This symbol is not defined and nothing for this is
* implemented--just including this because data was found in
* the NI-STC for it--can't remember where.
* [B(NI_FrequencyOutTimebase)] = {
* ** These are not currently implemented in ni modules **
* [B(NI_20MHzTimebase)] = U(0),
* [B(NI_100kHzTimebase)] = U(1),
* },
*/
[B(NI_RGOUT0)] = {
[B(NI_CtrInternalOutput(0))] = I(0),
[B(NI_CtrOut(0))] = I(1),
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_route_values/ni_eseries.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_route_values/ni_mseries.c
* Route information for NI_MSERIES boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file includes a list of all the values of various signals routes
* available on NI 660x hardware. In many cases, one does not explicitly make
* these routes, rather one might indicate that something is used as the source
* of one particular trigger or another (using *_src=TRIG_EXT).
*
* The contents of this file can be generated using the tools in
* comedi/drivers/ni_routing/tools. This file also contains specific notes to
* this family of devices.
*
* Please use those tools to help maintain the contents of this file, but be
* mindful to not lose the notes already made in this file, since these notes
* are critical to a complete undertsanding of the register values of this
* family.
*/
#include "../ni_route_values.h"
#include "all.h"
/*
* GATE SELECT NOTE:
* CtrAux and CtrArmStartrigger register values are not documented in the
* DAQ-STC. There is some evidence that using CtrGate values is valid (see
* comedi.h). Some information and hints exist in the M-Series user manual
* (ni-62xx user-manual 371022K-01).
*/
const struct family_route_values ni_mseries_route_values = {
.family = "ni_mseries",
.register_values = {
/*
* destination = {
* source = register value,
* ...
* }
*/
[B(NI_PFI(0))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(1))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(2))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(3))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(4))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(5))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(6))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(7))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(8))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(9))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(10))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(11))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(12))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(13))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(14))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(NI_PFI(15))] = {
[B(TRIGGER_LINE(0))] = I(18),
[B(TRIGGER_LINE(1))] = I(19),
[B(TRIGGER_LINE(2))] = I(20),
[B(TRIGGER_LINE(3))] = I(21),
[B(TRIGGER_LINE(4))] = I(22),
[B(TRIGGER_LINE(5))] = I(23),
[B(TRIGGER_LINE(6))] = I(24),
[B(TRIGGER_LINE(7))] = I(25),
[B(NI_CtrSource(0))] = I(9),
[B(NI_CtrSource(1))] = I(4),
[B(NI_CtrGate(0))] = I(10),
[B(NI_CtrGate(1))] = I(5),
[B(NI_CtrInternalOutput(0))] = I(13),
[B(NI_CtrInternalOutput(1))] = I(14),
[B(PXI_Star)] = I(26),
[B(NI_AI_SampleClock)] = I(8),
[B(NI_AI_StartTrigger)] = I(1),
[B(NI_AI_ReferenceTrigger)] = I(2),
[B(NI_AI_ConvertClock)] = I(3),
[B(NI_AI_ExternalMUXClock)] = I(12),
[B(NI_AO_SampleClock)] = I(6),
[B(NI_AO_StartTrigger)] = I(7),
[B(NI_DI_SampleClock)] = I(29),
[B(NI_DO_SampleClock)] = I(30),
[B(NI_FrequencyOutput)] = I(15),
[B(NI_ChangeDetectionEvent)] = I(28),
[B(NI_AnalogComparisonEvent)] = I(17),
[B(NI_SCXI_Trig1)] = I(27),
[B(NI_ExternalStrobe)] = I(11),
[B(NI_PFI_DO)] = I(16),
},
[B(TRIGGER_LINE(0))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(1))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(2))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(3))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(4))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(5))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(6))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(TRIGGER_LINE(7))] = {
[B(NI_RTSI_BRD(0))] = I(8),
[B(NI_RTSI_BRD(1))] = I(9),
[B(NI_RTSI_BRD(2))] = I(10),
[B(NI_RTSI_BRD(3))] = I(11),
[B(NI_CtrSource(0))] = I(5),
[B(NI_CtrGate(0))] = I(6),
[B(NI_AI_StartTrigger)] = I(0),
[B(NI_AI_ReferenceTrigger)] = I(1),
[B(NI_AI_ConvertClock)] = I(2),
[B(NI_AO_SampleClock)] = I(3),
[B(NI_AO_StartTrigger)] = I(4),
/*
* for (*->TRIGGER_LINE(*)) MUX, a value of 12 should be
* RTSI_OSC according to MHDDK mseries source. There
* are hints in comedi that show that this is actually a
* 20MHz source for 628x cards(?)
*/
[B(NI_10MHzRefClock)] = I(12),
[B(NI_RGOUT0)] = I(7),
},
[B(NI_RTSI_BRD(0))] = {
[B(NI_PFI(0))] = I(0),
[B(NI_PFI(1))] = I(1),
[B(NI_PFI(2))] = I(2),
[B(NI_PFI(3))] = I(3),
[B(NI_PFI(4))] = I(4),
[B(NI_PFI(5))] = I(5),
[B(NI_CtrSource(1))] = I(11),
[B(NI_CtrGate(1))] = I(10),
[B(NI_CtrZ(0))] = I(13),
[B(NI_CtrZ(1))] = I(12),
[B(NI_CtrOut(1))] = I(9),
[B(NI_AI_SampleClock)] = I(15),
[B(NI_AI_PauseTrigger)] = I(7),
[B(NI_AO_PauseTrigger)] = I(6),
[B(NI_FrequencyOutput)] = I(8),
[B(NI_AnalogComparisonEvent)] = I(14),
},
[B(NI_RTSI_BRD(1))] = {
[B(NI_PFI(0))] = I(0),
[B(NI_PFI(1))] = I(1),
[B(NI_PFI(2))] = I(2),
[B(NI_PFI(3))] = I(3),
[B(NI_PFI(4))] = I(4),
[B(NI_PFI(5))] = I(5),
[B(NI_CtrSource(1))] = I(11),
[B(NI_CtrGate(1))] = I(10),
[B(NI_CtrZ(0))] = I(13),
[B(NI_CtrZ(1))] = I(12),
[B(NI_CtrOut(1))] = I(9),
[B(NI_AI_SampleClock)] = I(15),
[B(NI_AI_PauseTrigger)] = I(7),
[B(NI_AO_PauseTrigger)] = I(6),
[B(NI_FrequencyOutput)] = I(8),
[B(NI_AnalogComparisonEvent)] = I(14),
},
[B(NI_RTSI_BRD(2))] = {
[B(NI_PFI(0))] = I(0),
[B(NI_PFI(1))] = I(1),
[B(NI_PFI(2))] = I(2),
[B(NI_PFI(3))] = I(3),
[B(NI_PFI(4))] = I(4),
[B(NI_PFI(5))] = I(5),
[B(NI_CtrSource(1))] = I(11),
[B(NI_CtrGate(1))] = I(10),
[B(NI_CtrZ(0))] = I(13),
[B(NI_CtrZ(1))] = I(12),
[B(NI_CtrOut(1))] = I(9),
[B(NI_AI_SampleClock)] = I(15),
[B(NI_AI_PauseTrigger)] = I(7),
[B(NI_AO_PauseTrigger)] = I(6),
[B(NI_FrequencyOutput)] = I(8),
[B(NI_AnalogComparisonEvent)] = I(14),
},
[B(NI_RTSI_BRD(3))] = {
[B(NI_PFI(0))] = I(0),
[B(NI_PFI(1))] = I(1),
[B(NI_PFI(2))] = I(2),
[B(NI_PFI(3))] = I(3),
[B(NI_PFI(4))] = I(4),
[B(NI_PFI(5))] = I(5),
[B(NI_CtrSource(1))] = I(11),
[B(NI_CtrGate(1))] = I(10),
[B(NI_CtrZ(0))] = I(13),
[B(NI_CtrZ(1))] = I(12),
[B(NI_CtrOut(1))] = I(9),
[B(NI_AI_SampleClock)] = I(15),
[B(NI_AI_PauseTrigger)] = I(7),
[B(NI_AO_PauseTrigger)] = I(6),
[B(NI_FrequencyOutput)] = I(8),
[B(NI_AnalogComparisonEvent)] = I(14),
},
[B(NI_CtrSource(0))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(NI_CtrGate(1))] = U(Gi_SRC(20, 0)),
[B(NI_CtrInternalOutput(1))] = U(19),
[B(PXI_Star)] = U(Gi_SRC(20, 1)),
[B(PXI_Clk10)] = U(29),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(Gi_SRC(30, 0)),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_AnalogComparisonEvent)] = U(Gi_SRC(30, 1)),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(1))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(NI_CtrGate(0))] = U(Gi_SRC(20, 0)),
[B(NI_CtrInternalOutput(0))] = U(19),
[B(PXI_Star)] = U(Gi_SRC(20, 1)),
[B(PXI_Clk10)] = U(29),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(Gi_SRC(30, 0)),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_AnalogComparisonEvent)] = U(Gi_SRC(30, 1)),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrGate(0))] = {
[B(NI_PFI(0))] = I(1 /* source: mhddk examples */),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrSource(1))] = I(29),
/* source for following line: mhddk GP examples */
[B(NI_CtrInternalOutput(1))] = I(20),
[B(PXI_Star)] = I(19),
[B(NI_AI_StartTrigger)] = I(28),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrGate(1))] = {
/* source for following line: mhddk examples */
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrSource(0))] = I(29),
/* source for following line: mhddk GP examples */
[B(NI_CtrInternalOutput(0))] = I(20),
[B(PXI_Star)] = I(19),
[B(NI_AI_StartTrigger)] = I(28),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(0))] = {
/* these are just a guess; see GATE SELECT NOTE */
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrSource(1))] = I(29),
/* source for following line: mhddk GP examples */
[B(NI_CtrInternalOutput(1))] = I(20),
[B(PXI_Star)] = I(19),
[B(NI_AI_StartTrigger)] = I(28),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(1))] = {
/* these are just a guess; see GATE SELECT NOTE */
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrSource(0))] = I(29),
/* source for following line: mhddk GP examples */
[B(NI_CtrInternalOutput(0))] = I(20),
[B(PXI_Star)] = I(19),
[B(NI_AI_StartTrigger)] = I(28),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrA(0))] = {
/*
* See nimseries/Examples for outputs; inputs a guess
* from device routes shown on NI-MAX.
* see M-Series user manual (371022K-01)
*/
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrA(1))] = {
/*
* See nimseries/Examples for outputs; inputs a guess
* from device routes shown on NI-MAX.
* see M-Series user manual (371022K-01)
*/
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrB(0))] = {
/*
* See nimseries/Examples for outputs; inputs a guess
* from device routes shown on NI-MAX.
* see M-Series user manual (371022K-01)
*/
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrB(1))] = {
/*
* See nimseries/Examples for outputs; inputs a guess
* from device routes shown on NI-MAX.
* see M-Series user manual (371022K-01)
*/
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrZ(0))] = {
/*
* See nimseries/Examples for outputs; inputs a guess
* from device routes shown on NI-MAX.
* see M-Series user manual (371022K-01)
*/
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrZ(1))] = {
/*
* See nimseries/Examples for outputs; inputs a guess
* from device routes shown on NI-MAX.
* see M-Series user manual (371022K-01)
*/
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrArmStartTrigger(0))] = {
/* these are just a guess; see GATE SELECT NOTE */
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrSource(1))] = I(29),
/* source for following line: mhddk GP examples */
[B(NI_CtrInternalOutput(1))] = I(20),
[B(PXI_Star)] = I(19),
[B(NI_AI_StartTrigger)] = I(28),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrArmStartTrigger(1))] = {
/* these are just a guess; see GATE SELECT NOTE */
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrSource(0))] = I(29),
/* source for following line: mhddk GP examples */
[B(NI_CtrInternalOutput(0))] = I(20),
[B(PXI_Star)] = I(19),
[B(NI_AI_StartTrigger)] = I(28),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrOut(0))] = {
[B(TRIGGER_LINE(0))] = I(1),
[B(TRIGGER_LINE(1))] = I(2),
[B(TRIGGER_LINE(2))] = I(3),
[B(TRIGGER_LINE(3))] = I(4),
[B(TRIGGER_LINE(4))] = I(5),
[B(TRIGGER_LINE(5))] = I(6),
[B(TRIGGER_LINE(6))] = I(7),
[B(NI_CtrInternalOutput(0))] = I(0),
},
[B(NI_CtrOut(1))] = {
[B(NI_CtrInternalOutput(1))] = I(0),
},
[B(NI_AI_SampleClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrInternalOutput(0))] = I(19),
[B(NI_CtrInternalOutput(1))] = I(28),
[B(PXI_Star)] = I(20),
[B(NI_AI_SampleClockTimebase)] = I(0),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_SCXI_Trig1)] = I(29),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AI_SampleClockTimebase)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(PXI_Star)] = U(20),
[B(PXI_Clk10)] = U(29),
/*
* For routes (*->NI_AI_SampleClockTimebase) and
* (*->NI_AO_SampleClockTimebase), tMSeries.h of MHDDK
* shows 0 value as selecting ground (case ground?) and
* 28 value selecting TIMEBASE 1.
*/
[B(NI_20MHzTimebase)] = U(28),
[B(NI_100kHzTimebase)] = U(19),
[B(NI_AnalogComparisonEvent)] = U(30),
[B(NI_LogicLow)] = U(31),
[B(NI_CaseGround)] = U(0),
},
[B(NI_AI_StartTrigger)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrInternalOutput(0))] = I(18),
[B(NI_CtrInternalOutput(1))] = I(19),
[B(PXI_Star)] = I(20),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AI_ReferenceTrigger)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(PXI_Star)] = U(20),
[B(NI_AnalogComparisonEvent)] = U(30),
[B(NI_LogicLow)] = U(31),
},
[B(NI_AI_ConvertClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
/* source for following line: mhddk example headers */
[B(NI_CtrInternalOutput(0))] = I(19),
/* source for following line: mhddk example headers */
[B(NI_CtrInternalOutput(1))] = I(18),
[B(PXI_Star)] = I(20),
[B(NI_AI_ConvertClockTimebase)] = I(0),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AI_ConvertClockTimebase)] = {
/* These are not currently implemented in ni modules */
[B(NI_AI_SampleClockTimebase)] = U(0),
[B(NI_20MHzTimebase)] = U(1),
},
[B(NI_AI_PauseTrigger)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(PXI_Star)] = U(20),
[B(NI_AnalogComparisonEvent)] = U(30),
[B(NI_LogicLow)] = U(31),
},
[B(NI_AO_SampleClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrInternalOutput(0))] = I(18),
[B(NI_CtrInternalOutput(1))] = I(19),
[B(PXI_Star)] = I(20),
[B(NI_AO_SampleClockTimebase)] = I(0),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AO_SampleClockTimebase)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(PXI_Star)] = U(20),
[B(PXI_Clk10)] = U(29),
/*
* For routes (*->NI_AI_SampleClockTimebase) and
* (*->NI_AO_SampleClockTimebase), tMSeries.h of MHDDK
* shows 0 value as selecting ground (case ground?) and
* 28 value selecting TIMEBASE 1.
*/
[B(NI_20MHzTimebase)] = U(28),
[B(NI_100kHzTimebase)] = U(19),
[B(NI_AnalogComparisonEvent)] = U(30),
[B(NI_LogicLow)] = U(31),
[B(NI_CaseGround)] = U(0),
},
[B(NI_AO_StartTrigger)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(PXI_Star)] = I(20),
/*
* for the signal route
* (NI_AI_StartTrigger->NI_AO_StartTrigger), DAQ-STC &
* MHDDK disagreed for e-series. MHDDK for m-series
* agrees with DAQ-STC description and uses the value 18
* for the route
* (NI_AI_ReferenceTrigger->NI_AO_StartTrigger). The
* m-series devices are supposed to have DAQ-STC2.
* There are no DAQ-STC2 docs to compare with.
*/
[B(NI_AI_StartTrigger)] = I(19),
[B(NI_AI_ReferenceTrigger)] = I(18),
[B(NI_AnalogComparisonEvent)] = I(30),
[B(NI_LogicLow)] = I(31),
},
[B(NI_AO_PauseTrigger)] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(0))] = U(1),
[B(NI_PFI(1))] = U(2),
[B(NI_PFI(2))] = U(3),
[B(NI_PFI(3))] = U(4),
[B(NI_PFI(4))] = U(5),
[B(NI_PFI(5))] = U(6),
[B(NI_PFI(6))] = U(7),
[B(NI_PFI(7))] = U(8),
[B(NI_PFI(8))] = U(9),
[B(NI_PFI(9))] = U(10),
[B(NI_PFI(10))] = U(21),
[B(NI_PFI(11))] = U(22),
[B(NI_PFI(12))] = U(23),
[B(NI_PFI(13))] = U(24),
[B(NI_PFI(14))] = U(25),
[B(NI_PFI(15))] = U(26),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(PXI_Star)] = U(20),
[B(NI_AnalogComparisonEvent)] = U(30),
[B(NI_LogicLow)] = U(31),
},
[B(NI_DI_SampleClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrInternalOutput(0))] = I(28),
[B(NI_CtrInternalOutput(1))] = I(29),
[B(PXI_Star)] = I(20),
[B(NI_AI_SampleClock)] = I(18),
[B(NI_AI_ConvertClock)] = I(19),
[B(NI_AO_SampleClock)] = I(31),
[B(NI_FrequencyOutput)] = I(32),
[B(NI_ChangeDetectionEvent)] = I(33),
[B(NI_CaseGround)] = I(0),
},
[B(NI_DO_SampleClock)] = {
[B(NI_PFI(0))] = I(1),
[B(NI_PFI(1))] = I(2),
[B(NI_PFI(2))] = I(3),
[B(NI_PFI(3))] = I(4),
[B(NI_PFI(4))] = I(5),
[B(NI_PFI(5))] = I(6),
[B(NI_PFI(6))] = I(7),
[B(NI_PFI(7))] = I(8),
[B(NI_PFI(8))] = I(9),
[B(NI_PFI(9))] = I(10),
[B(NI_PFI(10))] = I(21),
[B(NI_PFI(11))] = I(22),
[B(NI_PFI(12))] = I(23),
[B(NI_PFI(13))] = I(24),
[B(NI_PFI(14))] = I(25),
[B(NI_PFI(15))] = I(26),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(TRIGGER_LINE(7))] = I(27),
[B(NI_CtrInternalOutput(0))] = I(28),
[B(NI_CtrInternalOutput(1))] = I(29),
[B(PXI_Star)] = I(20),
[B(NI_AI_SampleClock)] = I(18),
[B(NI_AI_ConvertClock)] = I(19),
[B(NI_AO_SampleClock)] = I(31),
[B(NI_FrequencyOutput)] = I(32),
[B(NI_ChangeDetectionEvent)] = I(33),
[B(NI_CaseGround)] = I(0),
},
[B(NI_MasterTimebase)] = {
/* These are not currently implemented in ni modules */
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(TRIGGER_LINE(7))] = U(27),
[B(PXI_Star)] = U(20),
[B(PXI_Clk10)] = U(29),
[B(NI_10MHzRefClock)] = U(0),
},
/*
* This symbol is not defined and nothing for this is
* implemented--just including this because data was found in
* the NI-STC for it--can't remember where.
* [B(NI_FrequencyOutTimebase)] = {
* ** These are not currently implemented in ni modules **
* [B(NI_20MHzTimebase)] = U(0),
* [B(NI_100kHzTimebase)] = U(1),
* },
*/
[B(NI_RGOUT0)] = {
[B(NI_CtrInternalOutput(0))] = I(0),
[B(NI_CtrOut(0))] = I(1),
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_route_values/ni_mseries.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/ni_route_values/ni_660x.c
* Route information for NI_660X boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file includes a list of all the values of various signals routes
* available on NI 660x hardware. In many cases, one does not explicitly make
* these routes, rather one might indicate that something is used as the source
* of one particular trigger or another (using *_src=TRIG_EXT).
*
* The contents of this file can be generated using the tools in
* comedi/drivers/ni_routing/tools. This file also contains specific notes to
* this family of devices.
*
* Please use those tools to help maintain the contents of this file, but be
* mindful to not lose the notes already made in this file, since these notes
* are critical to a complete undertsanding of the register values of this
* family.
*/
#include "../ni_route_values.h"
#include "all.h"
const struct family_route_values ni_660x_route_values = {
.family = "ni_660x",
.register_values = {
/*
* destination = {
* source = register value,
* ...
* }
*/
[B(NI_PFI(8))] = {
[B(NI_CtrInternalOutput(7))] = I(1),
},
[B(NI_PFI(10))] = {
[B(NI_CtrGate(7))] = I(1),
},
[B(NI_PFI(11))] = {
[B(NI_CtrSource(7))] = I(1),
},
[B(NI_PFI(12))] = {
[B(NI_CtrInternalOutput(6))] = I(1),
},
[B(NI_PFI(14))] = {
[B(NI_CtrGate(6))] = I(1),
},
[B(NI_PFI(15))] = {
[B(NI_CtrSource(6))] = I(1),
},
[B(NI_PFI(16))] = {
[B(NI_CtrInternalOutput(5))] = I(1),
},
[B(NI_PFI(18))] = {
[B(NI_CtrGate(5))] = I(1),
},
[B(NI_PFI(19))] = {
[B(NI_CtrSource(5))] = I(1),
},
[B(NI_PFI(20))] = {
[B(NI_CtrInternalOutput(4))] = I(1),
},
[B(NI_PFI(22))] = {
[B(NI_CtrGate(4))] = I(1),
},
[B(NI_PFI(23))] = {
[B(NI_CtrSource(4))] = I(1),
},
[B(NI_PFI(24))] = {
[B(NI_CtrInternalOutput(3))] = I(1),
},
[B(NI_PFI(26))] = {
[B(NI_CtrGate(3))] = I(1),
},
[B(NI_PFI(27))] = {
[B(NI_CtrSource(3))] = I(1),
},
[B(NI_PFI(28))] = {
[B(NI_CtrInternalOutput(2))] = I(1),
},
[B(NI_PFI(30))] = {
[B(NI_CtrGate(2))] = I(1),
},
[B(NI_PFI(31))] = {
[B(NI_CtrSource(2))] = I(1),
},
[B(NI_PFI(32))] = {
[B(NI_CtrInternalOutput(1))] = I(1),
},
[B(NI_PFI(34))] = {
[B(NI_CtrGate(1))] = I(1),
},
[B(NI_PFI(35))] = {
[B(NI_CtrSource(1))] = I(1),
},
[B(NI_PFI(36))] = {
[B(NI_CtrInternalOutput(0))] = I(1),
},
[B(NI_PFI(38))] = {
[B(NI_CtrGate(0))] = I(1),
},
[B(NI_PFI(39))] = {
[B(NI_CtrSource(0))] = I(1),
},
[B(NI_CtrSource(0))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2 /* or 1 */),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(1))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(1))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3 /* or 1 */),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(2))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(2))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4 /* or 1 */),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(3))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(3))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5 /* or 1 */),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(4))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(4))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6 /* or 1 */),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(5))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(5))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7 /* or 1 */),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(6))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(6))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9),
[B(NI_PFI(15))] = U(8 /* or 1 */),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(7))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrSource(7))] = {
/* These are not currently implemented in ni modules */
[B(NI_PFI(11))] = U(9 /* or 1 */),
[B(NI_PFI(15))] = U(8),
[B(NI_PFI(19))] = U(7),
[B(NI_PFI(23))] = U(6),
[B(NI_PFI(27))] = U(5),
[B(NI_PFI(31))] = U(4),
[B(NI_PFI(35))] = U(3),
[B(NI_PFI(39))] = U(2),
[B(TRIGGER_LINE(0))] = U(11),
[B(TRIGGER_LINE(1))] = U(12),
[B(TRIGGER_LINE(2))] = U(13),
[B(TRIGGER_LINE(3))] = U(14),
[B(TRIGGER_LINE(4))] = U(15),
[B(TRIGGER_LINE(5))] = U(16),
[B(TRIGGER_LINE(6))] = U(17),
[B(NI_CtrGate(0))] = U(10),
[B(NI_20MHzTimebase)] = U(0),
[B(NI_80MHzTimebase)] = U(30),
[B(NI_100kHzTimebase)] = U(18),
[B(NI_LogicLow)] = U(31),
},
[B(NI_CtrGate(0))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2 /* or 1 */),
[B(NI_PFI(39))] = I(0),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(1))] = I(10),
[B(NI_CtrInternalOutput(1))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(1))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3 /* or 1 */),
[B(NI_PFI(35))] = I(0),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(2))] = I(10),
[B(NI_CtrInternalOutput(2))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(2))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4 /* or 1 */),
[B(NI_PFI(31))] = I(0),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(3))] = I(10),
[B(NI_CtrInternalOutput(3))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(3))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5 /* or 1 */),
[B(NI_PFI(27))] = I(0),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(4))] = I(10),
[B(NI_CtrInternalOutput(4))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(4))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6 /* or 1 */),
[B(NI_PFI(23))] = I(0),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(5))] = I(10),
[B(NI_CtrInternalOutput(5))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(5))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7 /* or 1 */),
[B(NI_PFI(19))] = I(0),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(6))] = I(10),
[B(NI_CtrInternalOutput(6))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(6))] = {
[B(NI_PFI(10))] = I(9),
[B(NI_PFI(14))] = I(8 /* or 1 */),
[B(NI_PFI(15))] = I(0),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(7))] = I(10),
[B(NI_CtrInternalOutput(7))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrGate(7))] = {
[B(NI_PFI(10))] = I(9 /* or 1 */),
[B(NI_PFI(11))] = I(0),
[B(NI_PFI(14))] = I(8),
[B(NI_PFI(18))] = I(7),
[B(NI_PFI(22))] = I(6),
[B(NI_PFI(26))] = I(5),
[B(NI_PFI(30))] = I(4),
[B(NI_PFI(34))] = I(3),
[B(NI_PFI(38))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(0))] = I(10),
[B(NI_CtrInternalOutput(0))] = I(20),
[B(NI_LogicLow)] = I(31 /* or 30 */),
},
[B(NI_CtrAux(0))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2 /* or 1 */),
[B(NI_PFI(39))] = I(0),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(1))] = I(10),
[B(NI_CtrGate(1))] = I(30),
[B(NI_CtrInternalOutput(1))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(1))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3 /* or 1 */),
[B(NI_PFI(35))] = I(0),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(2))] = I(10),
[B(NI_CtrGate(2))] = I(30),
[B(NI_CtrInternalOutput(2))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(2))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4 /* or 1 */),
[B(NI_PFI(31))] = I(0),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(3))] = I(10),
[B(NI_CtrGate(3))] = I(30),
[B(NI_CtrInternalOutput(3))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(3))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5 /* or 1 */),
[B(NI_PFI(27))] = I(0),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(4))] = I(10),
[B(NI_CtrGate(4))] = I(30),
[B(NI_CtrInternalOutput(4))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(4))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6 /* or 1 */),
[B(NI_PFI(23))] = I(0),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(5))] = I(10),
[B(NI_CtrGate(5))] = I(30),
[B(NI_CtrInternalOutput(5))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(5))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7 /* or 1 */),
[B(NI_PFI(19))] = I(0),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(6))] = I(10),
[B(NI_CtrGate(6))] = I(30),
[B(NI_CtrInternalOutput(6))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(6))] = {
[B(NI_PFI(9))] = I(9),
[B(NI_PFI(13))] = I(8 /* or 1 */),
[B(NI_PFI(15))] = I(0),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(7))] = I(10),
[B(NI_CtrGate(7))] = I(30),
[B(NI_CtrInternalOutput(7))] = I(20),
[B(NI_LogicLow)] = I(31),
},
[B(NI_CtrAux(7))] = {
[B(NI_PFI(9))] = I(9 /* or 1 */),
[B(NI_PFI(11))] = I(0),
[B(NI_PFI(13))] = I(8),
[B(NI_PFI(17))] = I(7),
[B(NI_PFI(21))] = I(6),
[B(NI_PFI(25))] = I(5),
[B(NI_PFI(29))] = I(4),
[B(NI_PFI(33))] = I(3),
[B(NI_PFI(37))] = I(2),
[B(TRIGGER_LINE(0))] = I(11),
[B(TRIGGER_LINE(1))] = I(12),
[B(TRIGGER_LINE(2))] = I(13),
[B(TRIGGER_LINE(3))] = I(14),
[B(TRIGGER_LINE(4))] = I(15),
[B(TRIGGER_LINE(5))] = I(16),
[B(TRIGGER_LINE(6))] = I(17),
[B(NI_CtrSource(0))] = I(10),
[B(NI_CtrGate(0))] = I(30),
[B(NI_CtrInternalOutput(0))] = I(20),
[B(NI_LogicLow)] = I(31),
},
},
};
| linux-master | drivers/comedi/drivers/ni_routing/ni_route_values/ni_660x.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* kcomedilib/kcomedilib.c
* a comedlib interface for kernel modules
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <[email protected]>
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/comedi.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedilib.h>
MODULE_AUTHOR("David Schleef <[email protected]>");
MODULE_DESCRIPTION("Comedi kernel library");
MODULE_LICENSE("GPL");
struct comedi_device *comedi_open(const char *filename)
{
struct comedi_device *dev, *retval = NULL;
unsigned int minor;
if (strncmp(filename, "/dev/comedi", 11) != 0)
return NULL;
if (kstrtouint(filename + 11, 0, &minor))
return NULL;
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
dev = comedi_dev_get_from_minor(minor);
if (!dev)
return NULL;
down_read(&dev->attach_lock);
if (dev->attached)
retval = dev;
else
retval = NULL;
up_read(&dev->attach_lock);
if (!retval)
comedi_dev_put(dev);
return retval;
}
EXPORT_SYMBOL_GPL(comedi_open);
int comedi_close(struct comedi_device *dev)
{
comedi_dev_put(dev);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_close);
static int comedi_do_insn(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
struct comedi_subdevice *s;
int ret;
mutex_lock(&dev->mutex);
if (!dev->attached) {
ret = -EINVAL;
goto error;
}
/* a subdevice instruction */
if (insn->subdev >= dev->n_subdevices) {
ret = -EINVAL;
goto error;
}
s = &dev->subdevices[insn->subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
dev_err(dev->class_dev,
"%d not usable subdevice\n", insn->subdev);
ret = -EIO;
goto error;
}
/* XXX check lock */
ret = comedi_check_chanlist(s, 1, &insn->chanspec);
if (ret < 0) {
dev_err(dev->class_dev, "bad chanspec\n");
ret = -EINVAL;
goto error;
}
if (s->busy) {
ret = -EBUSY;
goto error;
}
s->busy = dev;
switch (insn->insn) {
case INSN_BITS:
ret = s->insn_bits(dev, s, insn, data);
break;
case INSN_CONFIG:
/* XXX should check instruction length */
ret = s->insn_config(dev, s, insn, data);
break;
default:
ret = -EINVAL;
break;
}
s->busy = NULL;
error:
mutex_unlock(&dev->mutex);
return ret;
}
int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int *io)
{
struct comedi_insn insn;
unsigned int data[2];
int ret;
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_CONFIG;
insn.n = 2;
insn.subdev = subdev;
insn.chanspec = CR_PACK(chan, 0, 0);
data[0] = INSN_CONFIG_DIO_QUERY;
data[1] = 0;
ret = comedi_do_insn(dev, &insn, data);
if (ret >= 0)
*io = data[1];
return ret;
}
EXPORT_SYMBOL_GPL(comedi_dio_get_config);
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int io)
{
struct comedi_insn insn;
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_CONFIG;
insn.n = 1;
insn.subdev = subdev;
insn.chanspec = CR_PACK(chan, 0, 0);
return comedi_do_insn(dev, &insn, &io);
}
EXPORT_SYMBOL_GPL(comedi_dio_config);
int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
unsigned int mask, unsigned int *bits,
unsigned int base_channel)
{
struct comedi_insn insn;
unsigned int data[2];
unsigned int n_chan;
unsigned int shift;
int ret;
base_channel = CR_CHAN(base_channel);
n_chan = comedi_get_n_channels(dev, subdev);
if (base_channel >= n_chan)
return -EINVAL;
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_BITS;
insn.chanspec = base_channel;
insn.n = 2;
insn.subdev = subdev;
data[0] = mask;
data[1] = *bits;
/*
* Most drivers ignore the base channel in insn->chanspec.
* Fix this here if the subdevice has <= 32 channels.
*/
if (n_chan <= 32) {
shift = base_channel;
if (shift) {
insn.chanspec = 0;
data[0] <<= shift;
data[1] <<= shift;
}
} else {
shift = 0;
}
ret = comedi_do_insn(dev, &insn, data);
*bits = data[1] >> shift;
return ret;
}
EXPORT_SYMBOL_GPL(comedi_dio_bitfield2);
int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
{
struct comedi_subdevice *s;
int ret = -ENODEV;
down_read(&dev->attach_lock);
if (dev->attached)
for (; subd < dev->n_subdevices; subd++) {
s = &dev->subdevices[subd];
if (s->type == type) {
ret = subd;
break;
}
}
up_read(&dev->attach_lock);
return ret;
}
EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
{
int n;
down_read(&dev->attach_lock);
if (!dev->attached || subdevice >= dev->n_subdevices)
n = 0;
else
n = dev->subdevices[subdevice].n_chan;
up_read(&dev->attach_lock);
return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
static int __init kcomedilib_module_init(void)
{
return 0;
}
static void __exit kcomedilib_module_exit(void)
{
}
module_init(kcomedilib_module_init);
module_exit(kcomedilib_module_exit);
| linux-master | drivers/comedi/kcomedilib/kcomedilib_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ps3vram - Use extra PS3 video ram as block device.
*
* Copyright 2009 Sony Corporation
*
* Based on the MTD ps3vram driver, which is
* Copyright (c) 2007-2008 Jim Paris <[email protected]>
* Added support RSX DMA Vivien Chappelier <[email protected]>
*/
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/cell-regs.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#include <asm/ps3gpu.h>
#define DEVICE_NAME "ps3vram"
#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
#define XDR_IOIF 0x0c000000
#define FIFO_BASE XDR_IOIF
#define FIFO_SIZE (64 * 1024)
#define DMA_PAGE_SIZE (4 * 1024)
#define CACHE_PAGE_SIZE (256 * 1024)
#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
#define CACHE_OFFSET CACHE_PAGE_SIZE
#define FIFO_OFFSET 0
#define CTRL_PUT 0x10
#define CTRL_GET 0x11
#define CTRL_TOP 0x15
#define UPLOAD_SUBCH 1
#define DOWNLOAD_SUBCH 2
#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
#define CACHE_PAGE_PRESENT 1
#define CACHE_PAGE_DIRTY 2
struct ps3vram_tag {
unsigned int address;
unsigned int flags;
};
struct ps3vram_cache {
unsigned int page_count;
unsigned int page_size;
struct ps3vram_tag *tags;
unsigned int hit;
unsigned int miss;
};
struct ps3vram_priv {
struct gendisk *gendisk;
u64 size;
u64 memory_handle;
u64 context_handle;
u32 __iomem *ctrl;
void __iomem *reports;
u8 *xdr_buf;
u32 *fifo_base;
u32 *fifo_ptr;
struct ps3vram_cache cache;
spinlock_t lock; /* protecting list of bios */
struct bio_list list;
};
static int ps3vram_major;
#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
#define DMA_NOTIFIER_SIZE 0x40
#define NOTIFIER 7 /* notifier used for completion report */
static char *size = "256M";
module_param(size, charp, 0);
MODULE_PARM_DESC(size, "memory size");
static u32 __iomem *ps3vram_get_notifier(void __iomem *reports, int notifier)
{
return reports + DMA_NOTIFIER_OFFSET_BASE +
DMA_NOTIFIER_SIZE * notifier;
}
static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
u32 __iomem *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
int i;
for (i = 0; i < 4; i++)
iowrite32be(0xffffffff, notify + i);
}
static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev,
unsigned int timeout_ms)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
u32 __iomem *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
unsigned long timeout;
for (timeout = 20; timeout; timeout--) {
if (!ioread32be(notify + 3))
return 0;
udelay(10);
}
timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (!ioread32be(notify + 3))
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static void ps3vram_init_ring(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
iowrite32be(FIFO_BASE + FIFO_OFFSET, priv->ctrl + CTRL_PUT);
iowrite32be(FIFO_BASE + FIFO_OFFSET, priv->ctrl + CTRL_GET);
}
static int ps3vram_wait_ring(struct ps3_system_bus_device *dev,
unsigned int timeout_ms)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (ioread32be(priv->ctrl + CTRL_PUT) == ioread32be(priv->ctrl + CTRL_GET))
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
dev_warn(&dev->core, "FIFO timeout (%08x/%08x/%08x)\n",
ioread32be(priv->ctrl + CTRL_PUT), ioread32be(priv->ctrl + CTRL_GET),
ioread32be(priv->ctrl + CTRL_TOP));
return -ETIMEDOUT;
}
static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
{
*(priv->fifo_ptr)++ = data;
}
static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, u32 tag,
u32 size)
{
ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
}
static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int status;
ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
iowrite32be(FIFO_BASE + FIFO_OFFSET, priv->ctrl + CTRL_PUT);
/* asking the HV for a blit will kick the FIFO */
status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0);
if (status)
dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n",
__func__, status);
priv->fifo_ptr = priv->fifo_base;
}
static void ps3vram_fire_ring(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int status;
mutex_lock(&ps3_gpu_mutex);
iowrite32be(FIFO_BASE + FIFO_OFFSET + (priv->fifo_ptr - priv->fifo_base)
* sizeof(u32), priv->ctrl + CTRL_PUT);
/* asking the HV for a blit will kick the FIFO */
status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0);
if (status)
dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n",
__func__, status);
if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
FIFO_SIZE - 1024) {
dev_dbg(&dev->core, "FIFO full, rewinding\n");
ps3vram_wait_ring(dev, 200);
ps3vram_rewind_ring(dev);
}
mutex_unlock(&ps3_gpu_mutex);
}
static void ps3vram_bind(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x31337303);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x3137c0de);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_fire_ring(dev);
}
static int ps3vram_upload(struct ps3_system_bus_device *dev,
unsigned int src_offset, unsigned int dst_offset,
int len, int count)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, XDR_IOIF + src_offset);
ps3vram_out_ring(priv, dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(dev);
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(dev);
if (ps3vram_notifier_wait(dev, 200) < 0) {
dev_warn(&dev->core, "%s: Notifier timeout\n", __func__);
return -1;
}
return 0;
}
static int ps3vram_download(struct ps3_system_bus_device *dev,
unsigned int src_offset, unsigned int dst_offset,
int len, int count)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, src_offset);
ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(dev);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(dev);
if (ps3vram_notifier_wait(dev, 200) < 0) {
dev_warn(&dev->core, "%s: Notifier timeout\n", __func__);
return -1;
}
return 0;
}
static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY))
return;
dev_dbg(&dev->core, "Flushing %d: 0x%08x\n", entry,
cache->tags[entry].address);
if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size,
cache->tags[entry].address, DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_err(&dev->core,
"Failed to upload from 0x%x to " "0x%x size 0x%x\n",
entry * cache->page_size, cache->tags[entry].address,
cache->page_size);
}
cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
}
static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry,
unsigned int address)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
dev_dbg(&dev->core, "Fetching %d: 0x%08x\n", entry, address);
if (ps3vram_download(dev, address,
CACHE_OFFSET + entry * cache->page_size,
DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_err(&dev->core,
"Failed to download from 0x%x to 0x%x size 0x%x\n",
address, entry * cache->page_size, cache->page_size);
}
cache->tags[entry].address = address;
cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
}
static void ps3vram_cache_flush(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
int i;
dev_dbg(&dev->core, "FLUSH\n");
for (i = 0; i < cache->page_count; i++) {
ps3vram_cache_evict(dev, i);
cache->tags[i].flags = 0;
}
}
static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev,
loff_t address)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
unsigned int base;
unsigned int offset;
int i;
static int counter;
offset = (unsigned int) (address & (cache->page_size - 1));
base = (unsigned int) (address - offset);
/* fully associative check */
for (i = 0; i < cache->page_count; i++) {
if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
cache->tags[i].address == base) {
cache->hit++;
dev_dbg(&dev->core, "Found entry %d: 0x%08x\n", i,
cache->tags[i].address);
return i;
}
}
/* choose a random entry */
i = (jiffies + (counter++)) % cache->page_count;
dev_dbg(&dev->core, "Using entry %d\n", i);
ps3vram_cache_evict(dev, i);
ps3vram_cache_load(dev, i, base);
cache->miss++;
return i;
}
static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kcalloc(CACHE_PAGE_COUNT,
sizeof(struct ps3vram_tag),
GFP_KERNEL);
if (!priv->cache.tags)
return -ENOMEM;
dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
return 0;
}
static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_cache_flush(dev);
kfree(priv->cache.tags);
}
static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned int cached, count;
dev_dbg(&dev->core, "%s: from=0x%08x len=0x%zx\n", __func__,
(unsigned int)from, len);
if (from >= priv->size)
return BLK_STS_IOERR;
if (len > priv->size - from)
len = priv->size - from;
/* Copy from vram to buf */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (from & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
entry = ps3vram_cache_match(dev, from);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(&dev->core, "%s: from=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__,
(unsigned int)from, cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(buf, priv->xdr_buf + cached, avail);
buf += avail;
count -= avail;
from += avail;
}
*retlen = len;
return 0;
}
static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned int cached, count;
if (to >= priv->size)
return BLK_STS_IOERR;
if (len > priv->size - to)
len = priv->size - to;
/* Copy from buf to vram */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (to & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
entry = ps3vram_cache_match(dev, to);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(&dev->core, "%s: to=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__, (unsigned int)to,
cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(priv->xdr_buf + cached, buf, avail);
priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
buf += avail;
count -= avail;
to += avail;
}
*retlen = len;
return 0;
}
static int ps3vram_proc_show(struct seq_file *m, void *v)
{
struct ps3vram_priv *priv = m->private;
seq_printf(m, "hit:%u\nmiss:%u\n", priv->cache.hit, priv->cache.miss);
return 0;
}
static void ps3vram_proc_init(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct proc_dir_entry *pde;
pde = proc_create_single_data(DEVICE_NAME, 0444, NULL,
ps3vram_proc_show, priv);
if (!pde)
dev_warn(&dev->core, "failed to create /proc entry\n");
}
static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
struct bio *bio)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
loff_t offset = bio->bi_iter.bi_sector << 9;
blk_status_t error = 0;
struct bio_vec bvec;
struct bvec_iter iter;
struct bio *next;
bio_for_each_segment(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
char *ptr = bvec_virt(&bvec);
size_t len = bvec.bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
len, offset);
if (write)
error = ps3vram_write(dev, offset, len, &retlen, ptr);
else
error = ps3vram_read(dev, offset, len, &retlen, ptr);
if (error) {
dev_err(&dev->core, "%s failed\n", op);
goto out;
}
if (retlen != len) {
dev_err(&dev->core, "Short %s\n", op);
error = BLK_STS_IOERR;
goto out;
}
offset += len;
}
dev_dbg(&dev->core, "%s completed\n", op);
out:
spin_lock_irq(&priv->lock);
bio_list_pop(&priv->list);
next = bio_list_peek(&priv->list);
spin_unlock_irq(&priv->lock);
bio->bi_status = error;
bio_endio(bio);
return next;
}
static void ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int busy;
dev_dbg(&dev->core, "%s\n", __func__);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
bio_list_add(&priv->list, bio);
spin_unlock_irq(&priv->lock);
if (busy)
return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
}
static const struct block_device_operations ps3vram_fops = {
.owner = THIS_MODULE,
.submit_bio = ps3vram_submit_bio,
};
static int ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int error, status;
struct gendisk *gendisk;
u64 ddr_size, ddr_lpar, ctrl_lpar, info_lpar, reports_lpar,
reports_size, xdr_lpar;
char *rest;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
error = -ENOMEM;
goto fail;
}
spin_lock_init(&priv->lock);
bio_list_init(&priv->list);
ps3_system_bus_set_drvdata(dev, priv);
/* Allocate XDR buffer (1MiB aligned) */
priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
get_order(XDR_BUF_SIZE));
if (priv->xdr_buf == NULL) {
dev_err(&dev->core, "Could not allocate XDR buffer\n");
error = -ENOMEM;
goto fail_free_priv;
}
/* Put FIFO at begginning of XDR buffer */
priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
priv->fifo_ptr = priv->fifo_base;
/* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
if (ps3_open_hv_device(dev)) {
dev_err(&dev->core, "ps3_open_hv_device failed\n");
error = -EAGAIN;
goto out_free_xdr_buf;
}
/* Request memory */
status = -1;
ddr_size = ALIGN(memparse(size, &rest), 1024*1024);
if (!ddr_size) {
dev_err(&dev->core, "Specified size is too small\n");
error = -EINVAL;
goto out_close_gpu;
}
while (ddr_size > 0) {
status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
&priv->memory_handle,
&ddr_lpar);
if (!status)
break;
ddr_size -= 1024*1024;
}
if (status) {
dev_err(&dev->core, "lv1_gpu_memory_allocate failed %d\n",
status);
error = -ENOMEM;
goto out_close_gpu;
}
/* Request context */
status = lv1_gpu_context_allocate(priv->memory_handle, 0,
&priv->context_handle, &ctrl_lpar,
&info_lpar, &reports_lpar,
&reports_size);
if (status) {
dev_err(&dev->core, "lv1_gpu_context_allocate failed %d\n",
status);
error = -ENOMEM;
goto out_free_memory;
}
/* Map XDR buffer to RSX */
xdr_lpar = ps3_mm_phys_to_lpar(__pa(priv->xdr_buf));
status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
xdr_lpar, XDR_BUF_SIZE,
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
CBE_IOPTE_M);
if (status) {
dev_err(&dev->core, "lv1_gpu_context_iomap failed %d\n",
status);
error = -ENOMEM;
goto out_free_context;
}
priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
if (!priv->ctrl) {
dev_err(&dev->core, "ioremap CTRL failed\n");
error = -ENOMEM;
goto out_unmap_context;
}
priv->reports = ioremap(reports_lpar, reports_size);
if (!priv->reports) {
dev_err(&dev->core, "ioremap REPORTS failed\n");
error = -ENOMEM;
goto out_unmap_ctrl;
}
mutex_lock(&ps3_gpu_mutex);
ps3vram_init_ring(dev);
mutex_unlock(&ps3_gpu_mutex);
priv->size = ddr_size;
ps3vram_bind(dev);
mutex_lock(&ps3_gpu_mutex);
error = ps3vram_wait_ring(dev, 100);
mutex_unlock(&ps3_gpu_mutex);
if (error < 0) {
dev_err(&dev->core, "Failed to initialize channels\n");
error = -ETIMEDOUT;
goto out_unmap_reports;
}
error = ps3vram_cache_init(dev);
if (error < 0) {
goto out_unmap_reports;
}
ps3vram_proc_init(dev);
gendisk = blk_alloc_disk(NUMA_NO_NODE);
if (!gendisk) {
dev_err(&dev->core, "blk_alloc_disk failed\n");
error = -ENOMEM;
goto out_cache_cleanup;
}
priv->gendisk = gendisk;
gendisk->major = ps3vram_major;
gendisk->minors = 1;
gendisk->flags |= GENHD_FL_NO_PART;
gendisk->fops = &ps3vram_fops;
gendisk->private_data = dev;
strscpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
error = device_add_disk(&dev->core, gendisk, NULL);
if (error)
goto out_cleanup_disk;
return 0;
out_cleanup_disk:
put_disk(gendisk);
out_cache_cleanup:
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
out_unmap_reports:
iounmap(priv->reports);
out_unmap_ctrl:
iounmap(priv->ctrl);
out_unmap_context:
lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, xdr_lpar,
XDR_BUF_SIZE, CBE_IOPTE_M);
out_free_context:
lv1_gpu_context_free(priv->context_handle);
out_free_memory:
lv1_gpu_memory_free(priv->memory_handle);
out_close_gpu:
ps3_close_hv_device(dev);
out_free_xdr_buf:
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
fail_free_priv:
kfree(priv);
ps3_system_bus_set_drvdata(dev, NULL);
fail:
return error;
}
static void ps3vram_remove(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
del_gendisk(priv->gendisk);
put_disk(priv->gendisk);
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);
iounmap(priv->ctrl);
lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
XDR_BUF_SIZE, CBE_IOPTE_M);
lv1_gpu_context_free(priv->context_handle);
lv1_gpu_memory_free(priv->memory_handle);
ps3_close_hv_device(dev);
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
ps3_system_bus_set_drvdata(dev, NULL);
}
static struct ps3_system_bus_driver ps3vram = {
.match_id = PS3_MATCH_ID_GPU,
.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3vram_probe,
.remove = ps3vram_remove,
.shutdown = ps3vram_remove,
};
static int __init ps3vram_init(void)
{
int error;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
error = register_blkdev(0, DEVICE_NAME);
if (error <= 0) {
pr_err("%s: register_blkdev failed %d\n", DEVICE_NAME, error);
return error;
}
ps3vram_major = error;
pr_info("%s: registered block device major %d\n", DEVICE_NAME,
ps3vram_major);
error = ps3_system_bus_driver_register(&ps3vram);
if (error)
unregister_blkdev(ps3vram_major, DEVICE_NAME);
return error;
}
static void __exit ps3vram_exit(void)
{
ps3_system_bus_driver_unregister(&ps3vram);
unregister_blkdev(ps3vram_major, DEVICE_NAME);
}
module_init(ps3vram_init);
module_exit(ps3vram_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 Video RAM Storage Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
| linux-master | drivers/block/ps3vram.c |
/*
* blkfront.c
*
* XenLinux virtual block device driver.
*
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
* Modifications by Mark A. Williamson are (c) Intel Research Cambridge
* Copyright (c) 2004, Christian Limpach
* Copyright (c) 2004, Andrew Warfield
* Copyright (c) 2005, Christopher Clark
* Copyright (c) 2005, XenSource Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/sched/mm.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/platform_pci.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>
#include <asm/xen/hypervisor.h>
/*
* The minimal size of segment supported by the block framework is PAGE_SIZE.
* When Linux is using a different page size than Xen, it may not be possible
* to put all the data in a single segment.
* This can happen when the backend doesn't support indirect descriptor and
* therefore the maximum amount of data that a request can carry is
* BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
*
* Note that we only support one extra request. So the Linux page size
* should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
* 88KB.
*/
#define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
enum blkif_state {
BLKIF_STATE_DISCONNECTED,
BLKIF_STATE_CONNECTED,
BLKIF_STATE_SUSPENDED,
BLKIF_STATE_ERROR,
};
struct grant {
grant_ref_t gref;
struct page *page;
struct list_head node;
};
enum blk_req_status {
REQ_PROCESSING,
REQ_WAITING,
REQ_DONE,
REQ_ERROR,
REQ_EOPNOTSUPP,
};
struct blk_shadow {
struct blkif_request req;
struct request *request;
struct grant **grants_used;
struct grant **indirect_grants;
struct scatterlist *sg;
unsigned int num_sg;
enum blk_req_status status;
#define NO_ASSOCIATED_ID ~0UL
/*
* Id of the sibling if we ever need 2 requests when handling a
* block I/O request
*/
unsigned long associated_id;
};
struct blkif_req {
blk_status_t error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
}
static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;
static struct delayed_work blkfront_work;
static LIST_HEAD(info_list);
/*
* Maximum number of segments in indirect requests, the actual value used by
* the frontend driver is the minimum of this value and the value provided
* by the backend driver.
*/
static unsigned int xen_blkif_max_segments = 32;
module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
MODULE_PARM_DESC(max_indirect_segments,
"Maximum amount of segments in indirect requests (default is 32)");
static unsigned int xen_blkif_max_queues = 4;
module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
/*
* Maximum order of pages to be used for the shared ring between front and
* backend, 4KB page granularity is used.
*/
static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
static bool __read_mostly xen_blkif_trusted = true;
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");
#define BLK_RING_SIZE(info) \
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
/*
* ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
* characters are enough. Define to 20 to keep consistent with backend.
*/
#define RINGREF_NAME_LEN (20)
/*
* queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
*/
#define QUEUE_NAME_LEN (17)
/*
* Per-ring info.
* Every blkfront device can associate with one or more blkfront_ring_info,
* depending on how many hardware queues/rings to be used.
*/
struct blkfront_ring_info {
/* Lock to protect data in every ring buffer. */
spinlock_t ring_lock;
struct blkif_front_ring ring;
unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
unsigned int evtchn, irq;
struct work_struct work;
struct gnttab_free_callback callback;
struct list_head indirect_pages;
struct list_head grants;
unsigned int persistent_gnts_c;
unsigned long shadow_free;
struct blkfront_info *dev_info;
struct blk_shadow shadow[];
};
/*
* We have one of these per vbd, whether ide, scsi or 'other'. They
* hang in private_data off the gendisk structure. We may end up
* putting all kinds of interesting stuff here :-)
*/
struct blkfront_info
{
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
u16 sector_size;
unsigned int physical_sector_size;
unsigned long vdisk_info;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
/* Number of pages per ring buffer. */
unsigned int nr_ring_pages;
struct request_queue *rq;
unsigned int feature_flush:1;
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
/* Connect-time cached feature_persistent parameter */
unsigned int feature_persistent_parm:1;
/* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
/* Number of 4KB segments handled */
unsigned int max_indirect_segments;
int is_ready;
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
unsigned int rinfo_size;
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
struct list_head info_list;
};
static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);
#define PARTS_PER_DISK 16
#define PARTS_PER_EXT_DISK 256
#define BLKIF_MAJOR(dev) ((dev)>>8)
#define BLKIF_MINOR(dev) ((dev) & 0xff)
#define EXT_SHIFT 28
#define EXTENDED (1<<EXT_SHIFT)
#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
#define EMULATED_HD_DISK_MINOR_OFFSET (0)
#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
#define EMULATED_SD_DISK_MINOR_OFFSET (0)
#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
#define DEV_NAME "xvd" /* name in /dev */
/*
* Grants are always the same size as a Xen page (i.e 4KB).
* A physical segment is always the same size as a Linux page.
* Number of grants per physical segment
*/
#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
#define GRANTS_PER_INDIRECT_FRAME \
(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_grants) \
DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
#define for_each_rinfo(info, ptr, idx) \
for ((ptr) = (info)->rinfo, (idx) = 0; \
(idx) < (info)->nr_rings; \
(idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
static inline struct blkfront_ring_info *
get_rinfo(const struct blkfront_info *info, unsigned int i)
{
BUG_ON(i >= info->nr_rings);
return (void *)info->rinfo + i * info->rinfo_size;
}
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{
unsigned long free = rinfo->shadow_free;
BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
return free;
}
static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
unsigned long id)
{
if (rinfo->shadow[id].req.u.rw.id != id)
return -EINVAL;
if (rinfo->shadow[id].request == NULL)
return -EINVAL;
rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
rinfo->shadow[id].request = NULL;
rinfo->shadow_free = id;
return 0;
}
static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
{
struct blkfront_info *info = rinfo->dev_info;
struct page *granted_page;
struct grant *gnt_list_entry, *n;
int i = 0;
while (i < num) {
gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
if (!gnt_list_entry)
goto out_of_memory;
if (info->bounce) {
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
}
gnt_list_entry->page = granted_page;
}
gnt_list_entry->gref = INVALID_GRANT_REF;
list_add(&gnt_list_entry->node, &rinfo->grants);
i++;
}
return 0;
out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
&rinfo->grants, node) {
list_del(&gnt_list_entry->node);
if (info->bounce)
__free_page(gnt_list_entry->page);
kfree(gnt_list_entry);
i--;
}
BUG_ON(i != 0);
return -ENOMEM;
}
static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
{
struct grant *gnt_list_entry;
BUG_ON(list_empty(&rinfo->grants));
gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
node);
list_del(&gnt_list_entry->node);
if (gnt_list_entry->gref != INVALID_GRANT_REF)
rinfo->persistent_gnts_c--;
return gnt_list_entry;
}
static inline void grant_foreign_access(const struct grant *gnt_list_entry,
const struct blkfront_info *info)
{
gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
info->xbdev->otherend_id,
gnt_list_entry->page,
0);
}
static struct grant *get_grant(grant_ref_t *gref_head,
unsigned long gfn,
struct blkfront_ring_info *rinfo)
{
struct grant *gnt_list_entry = get_free_grant(rinfo);
struct blkfront_info *info = rinfo->dev_info;
if (gnt_list_entry->gref != INVALID_GRANT_REF)
return gnt_list_entry;
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (info->bounce)
grant_foreign_access(gnt_list_entry, info);
else {
/* Grant access to the GFN passed by the caller */
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id,
gfn, 0);
}
return gnt_list_entry;
}
static struct grant *get_indirect_grant(grant_ref_t *gref_head,
struct blkfront_ring_info *rinfo)
{
struct grant *gnt_list_entry = get_free_grant(rinfo);
struct blkfront_info *info = rinfo->dev_info;
if (gnt_list_entry->gref != INVALID_GRANT_REF)
return gnt_list_entry;
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (!info->bounce) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
BUG_ON(list_empty(&rinfo->indirect_pages));
indirect_page = list_first_entry(&rinfo->indirect_pages,
struct page, lru);
list_del(&indirect_page->lru);
gnt_list_entry->page = indirect_page;
}
grant_foreign_access(gnt_list_entry, info);
return gnt_list_entry;
}
static const char *op_name(int op)
{
static const char *const names[] = {
[BLKIF_OP_READ] = "read",
[BLKIF_OP_WRITE] = "write",
[BLKIF_OP_WRITE_BARRIER] = "barrier",
[BLKIF_OP_FLUSH_DISKCACHE] = "flush",
[BLKIF_OP_DISCARD] = "discard" };
if (op < 0 || op >= ARRAY_SIZE(names))
return "unknown";
if (!names[op])
return "reserved";
return names[op];
}
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
unsigned int end = minor + nr;
int rc;
if (end > nr_minors) {
unsigned long *bitmap, *old;
bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
GFP_KERNEL);
if (bitmap == NULL)
return -ENOMEM;
spin_lock(&minor_lock);
if (end > nr_minors) {
old = minors;
memcpy(bitmap, minors,
BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
minors = bitmap;
nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
} else
old = bitmap;
spin_unlock(&minor_lock);
kfree(old);
}
spin_lock(&minor_lock);
if (find_next_bit(minors, end, minor) >= end) {
bitmap_set(minors, minor, nr);
rc = 0;
} else
rc = -EBUSY;
spin_unlock(&minor_lock);
return rc;
}
static void xlbd_release_minors(unsigned int minor, unsigned int nr)
{
unsigned int end = minor + nr;
BUG_ON(end > nr_minors);
spin_lock(&minor_lock);
bitmap_clear(minors, minor, nr);
spin_unlock(&minor_lock);
}
static void blkif_restart_queue_callback(void *arg)
{
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
schedule_work(&rinfo->work);
}
static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
sector_t nsect = get_capacity(bd->bd_disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
hg->sectors = 0x3f;
sector_div(cylinders, hg->heads * hg->sectors);
hg->cylinders = cylinders;
if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
hg->cylinders = 0xffff;
return 0;
}
static int blkif_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned command, unsigned long argument)
{
struct blkfront_info *info = bdev->bd_disk->private_data;
int i;
switch (command) {
case CDROMMULTISESSION:
for (i = 0; i < sizeof(struct cdrom_multisession); i++)
if (put_user(0, (char __user *)(argument + i)))
return -EFAULT;
return 0;
case CDROM_GET_CAPABILITY:
if (!(info->vdisk_info & VDISK_CDROM))
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
struct request *req,
struct blkif_request **ring_req)
{
unsigned long id;
*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
rinfo->ring.req_prod_pvt++;
id = get_id_from_freelist(rinfo);
rinfo->shadow[id].request = req;
rinfo->shadow[id].status = REQ_PROCESSING;
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
rinfo->shadow[id].req.u.rw.id = id;
return id;
}
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
{
struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
/* Copy the request to the ring page. */
*final_ring_req = *ring_req;
rinfo->shadow[id].status = REQ_WAITING;
return 0;
}
struct setup_rw_req {
unsigned int grant_idx;
struct blkif_request_segment *segments;
struct blkfront_ring_info *rinfo;
struct blkif_request *ring_req;
grant_ref_t gref_head;
unsigned int id;
/* Only used when persistent grant is used and it's a write request */
bool need_copy;
unsigned int bvec_off;
char *bvec_data;
bool require_extra_req;
struct blkif_request *extra_ring_req;
};
static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
unsigned int len, void *data)
{
struct setup_rw_req *setup = data;
int n, ref;
struct grant *gnt_list_entry;
unsigned int fsect, lsect;
/* Convenient aliases */
unsigned int grant_idx = setup->grant_idx;
struct blkif_request *ring_req = setup->ring_req;
struct blkfront_ring_info *rinfo = setup->rinfo;
/*
* We always use the shadow of the first request to store the list
* of grant associated to the block I/O request. This made the
* completion more easy to handle even if the block I/O request is
* split.
*/
struct blk_shadow *shadow = &rinfo->shadow[setup->id];
if (unlikely(setup->require_extra_req &&
grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
/*
* We are using the second request, setup grant_idx
* to be the index of the segment array.
*/
grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
ring_req = setup->extra_ring_req;
}
if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
(grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
if (setup->segments)
kunmap_atomic(setup->segments);
n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
shadow->indirect_grants[n] = gnt_list_entry;
setup->segments = kmap_atomic(gnt_list_entry->page);
ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
}
gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
ref = gnt_list_entry->gref;
/*
* All the grants are stored in the shadow of the first
* request. Therefore we have to use the global index.
*/
shadow->grants_used[setup->grant_idx] = gnt_list_entry;
if (setup->need_copy) {
void *shared_data;
shared_data = kmap_atomic(gnt_list_entry->page);
/*
* this does not wipe data stored outside the
* range sg->offset..sg->offset+sg->length.
* Therefore, blkback *could* see data from
* previous requests. This is OK as long as
* persistent grants are shared with just one
* domain. It may need refactoring if this
* changes
*/
memcpy(shared_data + offset,
setup->bvec_data + setup->bvec_off,
len);
kunmap_atomic(shared_data);
setup->bvec_off += len;
}
fsect = offset >> 9;
lsect = fsect + (len >> 9) - 1;
if (ring_req->operation != BLKIF_OP_INDIRECT) {
ring_req->u.rw.seg[grant_idx] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
} else {
setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
}
(setup->grant_idx)++;
}
static void blkif_setup_extra_req(struct blkif_request *first,
struct blkif_request *second)
{
uint16_t nr_segments = first->u.rw.nr_segments;
/*
* The second request is only present when the first request uses
* all its segments. It's always the continuity of the first one.
*/
first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
second->u.rw.sector_number = first->u.rw.sector_number +
(BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
second->u.rw.handle = first->u.rw.handle;
second->operation = first->operation;
}
static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
{
struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req, *extra_ring_req = NULL;
struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
unsigned long id, extra_id = NO_ASSOCIATED_ID;
bool require_extra_req = false;
int i;
struct setup_rw_req setup = {
.grant_idx = 0,
.segments = NULL,
.rinfo = rinfo,
.need_copy = rq_data_dir(req) && info->bounce,
};
/*
* Used to store if we are able to queue the request by just using
* existing persistent grants, or if we have to get new grants,
* as there are not sufficiently many free.
*/
bool new_persistent_gnts = false;
struct scatterlist *sg;
int num_sg, max_grefs, num_grant;
max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
/*
* If we are using indirect segments we need to account
* for the indirect grefs used in the request.
*/
max_grefs += INDIRECT_GREFS(max_grefs);
/* Check if we have enough persistent grants to allocate a requests */
if (rinfo->persistent_gnts_c < max_grefs) {
new_persistent_gnts = true;
if (gnttab_alloc_grant_references(
max_grefs - rinfo->persistent_gnts_c,
&setup.gref_head) < 0) {
gnttab_request_free_callback(
&rinfo->callback,
blkif_restart_queue_callback,
rinfo,
max_grefs - rinfo->persistent_gnts_c);
return 1;
}
}
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
num_grant = 0;
/* Calculate the number of grant used */
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
num_grant += gnttab_count_grant(sg->offset, sg->length);
require_extra_req = info->max_indirect_segments == 0 &&
num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
rinfo->shadow[id].num_sg = num_sg;
if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
likely(!require_extra_req)) {
/*
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->u.indirect.handle = info->handle;
ring_req->u.indirect.nr_segments = num_grant;
} else {
ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req_op(req) == REQ_OP_FLUSH ||
(req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
* A barrier request a superset of FUA, so we can
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
*/
if (info->feature_flush && info->feature_fua)
ring_req->operation =
BLKIF_OP_WRITE_BARRIER;
else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
else
ring_req->operation = 0;
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
extra_id = blkif_ring_get_request(rinfo, req,
&final_extra_ring_req);
extra_ring_req = &rinfo->shadow[extra_id].req;
/*
* Only the first request contains the scatter-gather
* list.
*/
rinfo->shadow[extra_id].num_sg = 0;
blkif_setup_extra_req(ring_req, extra_ring_req);
/* Link the 2 requests together */
rinfo->shadow[extra_id].associated_id = id;
rinfo->shadow[id].associated_id = extra_id;
}
}
setup.ring_req = ring_req;
setup.id = id;
setup.require_extra_req = require_extra_req;
if (unlikely(require_extra_req))
setup.extra_ring_req = extra_ring_req;
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
if (setup.need_copy) {
setup.bvec_off = sg->offset;
setup.bvec_data = kmap_atomic(sg_page(sg));
}
gnttab_foreach_grant_in_range(sg_page(sg),
sg->offset,
sg->length,
blkif_setup_rw_req_grant,
&setup);
if (setup.need_copy)
kunmap_atomic(setup.bvec_data);
}
if (setup.segments)
kunmap_atomic(setup.segments);
/* Copy request(s) to the ring page. */
*final_ring_req = *ring_req;
rinfo->shadow[id].status = REQ_WAITING;
if (unlikely(require_extra_req)) {
*final_extra_ring_req = *extra_ring_req;
rinfo->shadow[extra_id].status = REQ_WAITING;
}
if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
return 0;
}
/*
* Generate a Xen blkfront IO request from a blk layer request. Reads
* and writes are handled as expected.
*
* @req: a request struct
*/
static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
{
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
return 1;
if (unlikely(req_op(req) == REQ_OP_DISCARD ||
req_op(req) == REQ_OP_SECURE_ERASE))
return blkif_queue_discard_req(req, rinfo);
else
return blkif_queue_rw_req(req, rinfo);
}
static inline void flush_requests(struct blkfront_ring_info *rinfo)
{
int notify;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
if (notify)
notify_remote_via_irq(rinfo->irq);
}
static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
return (blk_rq_is_passthrough(req) ||
((req_op(req) == REQ_OP_FLUSH) &&
!info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
!info->feature_fua));
}
static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
unsigned long flags;
int qid = hctx->queue_num;
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
goto out_busy;
if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
goto out_err;
if (blkif_queue_request(qd->rq, rinfo))
goto out_busy;
flush_requests(rinfo);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_OK;
out_err:
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_IOERR;
out_busy:
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_DEV_RESOURCE;
}
static void blkif_complete_rq(struct request *rq)
{
blk_mq_end_request(rq, blkif_req(rq)->error);
}
static const struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
.complete = blkif_complete_rq,
};
static void blkif_set_queue_limits(struct blkfront_info *info)
{
struct request_queue *rq = info->rq;
struct gendisk *gd = info->gd;
unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) {
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_max_secure_erase_sectors(rq,
get_capacity(gd));
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_logical_block_size(rq, info->sector_size);
blk_queue_physical_block_size(rq, info->physical_sector_size);
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
/* Each segment in a request is up to an aligned page in size. */
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
blk_queue_max_segment_size(rq, PAGE_SIZE);
/* Ensure a merged request will fit in a single I/O ring slot. */
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
/* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511);
}
static const char *flush_info(struct blkfront_info *info)
{
if (info->feature_flush && info->feature_fua)
return "barrier: enabled;";
else if (info->feature_flush)
return "flush diskcache: enabled;";
else
return "barrier or flush: disabled;";
}
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;",
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
{
int major;
major = BLKIF_MAJOR(vdevice);
*minor = BLKIF_MINOR(vdevice);
switch (major) {
case XEN_IDE0_MAJOR:
*offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
*minor = ((*minor / 64) * PARTS_PER_DISK) +
EMULATED_HD_DISK_MINOR_OFFSET;
break;
case XEN_IDE1_MAJOR:
*offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
*minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
EMULATED_HD_DISK_MINOR_OFFSET;
break;
case XEN_SCSI_DISK0_MAJOR:
*offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
*minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
break;
case XEN_SCSI_DISK1_MAJOR:
case XEN_SCSI_DISK2_MAJOR:
case XEN_SCSI_DISK3_MAJOR:
case XEN_SCSI_DISK4_MAJOR:
case XEN_SCSI_DISK5_MAJOR:
case XEN_SCSI_DISK6_MAJOR:
case XEN_SCSI_DISK7_MAJOR:
*offset = (*minor / PARTS_PER_DISK) +
((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
EMULATED_SD_DISK_NAME_OFFSET;
*minor = *minor +
((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
EMULATED_SD_DISK_MINOR_OFFSET;
break;
case XEN_SCSI_DISK8_MAJOR:
case XEN_SCSI_DISK9_MAJOR:
case XEN_SCSI_DISK10_MAJOR:
case XEN_SCSI_DISK11_MAJOR:
case XEN_SCSI_DISK12_MAJOR:
case XEN_SCSI_DISK13_MAJOR:
case XEN_SCSI_DISK14_MAJOR:
case XEN_SCSI_DISK15_MAJOR:
*offset = (*minor / PARTS_PER_DISK) +
((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
EMULATED_SD_DISK_NAME_OFFSET;
*minor = *minor +
((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
EMULATED_SD_DISK_MINOR_OFFSET;
break;
case XENVBD_MAJOR:
*offset = *minor / PARTS_PER_DISK;
break;
default:
printk(KERN_WARNING "blkfront: your disk configuration is "
"incorrect, please use an xvd device instead\n");
return -ENODEV;
}
return 0;
}
static char *encode_disk_name(char *ptr, unsigned int n)
{
if (n >= 26)
ptr = encode_disk_name(ptr, n / 26 - 1);
*ptr = 'a' + n % 26;
return ptr + 1;
}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info, u16 sector_size,
unsigned int physical_sector_size)
{
struct gendisk *gd;
int nr_minors = 1;
int err;
unsigned int offset;
int minor;
int nr_parts;
char *ptr;
BUG_ON(info->gd != NULL);
BUG_ON(info->rq != NULL);
if ((info->vdevice>>EXT_SHIFT) > 1) {
/* this is above the extended range; something is wrong */
printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
return -ENODEV;
}
if (!VDEV_IS_EXTENDED(info->vdevice)) {
err = xen_translate_vdev(info->vdevice, &minor, &offset);
if (err)
return err;
nr_parts = PARTS_PER_DISK;
} else {
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
offset = minor / nr_parts;
if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
}
if (minor >> MINORBITS) {
pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
info->vdevice, minor);
return -ENODEV;
}
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
err = xlbd_reserve_minors(minor, nr_minors);
if (err)
return err;
memset(&info->tag_set, 0, sizeof(info->tag_set));
info->tag_set.ops = &blkfront_mq_ops;
info->tag_set.nr_hw_queues = info->nr_rings;
if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
/*
* When indirect descriptior is not supported, the I/O request
* will be split between multiple request in the ring.
* To avoid problems when sending the request, divide by
* 2 the depth of the queue.
*/
info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
err = blk_mq_alloc_tag_set(&info->tag_set);
if (err)
goto out_release_minors;
gd = blk_mq_alloc_disk(&info->tag_set, info);
if (IS_ERR(gd)) {
err = PTR_ERR(gd);
goto out_free_tag_set;
}
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
if (nr_minors > 1)
*ptr = 0;
else
snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
"%d", minor & (nr_parts - 1));
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
gd->minors = nr_minors;
gd->fops = &xlvbd_block_fops;
gd->private_data = info;
set_capacity(gd, capacity);
info->rq = gd->queue;
info->gd = gd;
info->sector_size = sector_size;
info->physical_sector_size = physical_sector_size;
blkif_set_queue_limits(info);
xlvbd_flush(info);
if (info->vdisk_info & VDISK_READONLY)
set_disk_ro(gd, 1);
if (info->vdisk_info & VDISK_REMOVABLE)
gd->flags |= GENHD_FL_REMOVABLE;
return 0;
out_free_tag_set:
blk_mq_free_tag_set(&info->tag_set);
out_release_minors:
xlbd_release_minors(minor, nr_minors);
return err;
}
/* Already hold rinfo->ring_lock. */
static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
{
if (!RING_FULL(&rinfo->ring))
blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
}
static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
{
unsigned long flags;
spin_lock_irqsave(&rinfo->ring_lock, flags);
kick_pending_request_queues_locked(rinfo);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
}
static void blkif_restart_queue(struct work_struct *work)
{
struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(rinfo);
}
static void blkif_free_ring(struct blkfront_ring_info *rinfo)
{
struct grant *persistent_gnt, *n;
struct blkfront_info *info = rinfo->dev_info;
int i, j, segs;
/*
* Remove indirect pages, this only happens when using indirect
* descriptors but not persistent grants
*/
if (!list_empty(&rinfo->indirect_pages)) {
struct page *indirect_page, *n;
BUG_ON(info->bounce);
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
}
}
/* Remove all persistent grants. */
if (!list_empty(&rinfo->grants)) {
list_for_each_entry_safe(persistent_gnt, n,
&rinfo->grants, node) {
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != INVALID_GRANT_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
NULL);
rinfo->persistent_gnts_c--;
}
if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
}
BUG_ON(rinfo->persistent_gnts_c != 0);
for (i = 0; i < BLK_RING_SIZE(info); i++) {
/*
* Clear persistent grants present in requests already
* on the shared ring
*/
if (!rinfo->shadow[i].request)
goto free_shadow;
segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
rinfo->shadow[i].req.u.indirect.nr_segments :
rinfo->shadow[i].req.u.rw.nr_segments;
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
/*
* If this is not an indirect operation don't try to
* free indirect segments
*/
goto free_shadow;
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
persistent_gnt = rinfo->shadow[i].indirect_grants[j];
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
free_shadow:
kvfree(rinfo->shadow[i].grants_used);
rinfo->shadow[i].grants_used = NULL;
kvfree(rinfo->shadow[i].indirect_grants);
rinfo->shadow[i].indirect_grants = NULL;
kvfree(rinfo->shadow[i].sg);
rinfo->shadow[i].sg = NULL;
}
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work(&rinfo->work);
/* Free resources associated with old device channel. */
xenbus_teardown_ring((void **)&rinfo->ring.sring, info->nr_ring_pages,
rinfo->ring_ref);
if (rinfo->irq)
unbind_from_irqhandler(rinfo->irq, rinfo);
rinfo->evtchn = rinfo->irq = 0;
}
static void blkif_free(struct blkfront_info *info, int suspend)
{
unsigned int i;
struct blkfront_ring_info *rinfo;
/* Prevent new requests being issued until we fix things up. */
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
if (info->rq)
blk_mq_stop_hw_queues(info->rq);
for_each_rinfo(info, rinfo, i)
blkif_free_ring(rinfo);
kvfree(info->rinfo);
info->rinfo = NULL;
info->nr_rings = 0;
}
struct copy_from_grant {
const struct blk_shadow *s;
unsigned int grant_idx;
unsigned int bvec_offset;
char *bvec_data;
};
static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
unsigned int len, void *data)
{
struct copy_from_grant *info = data;
char *shared_data;
/* Convenient aliases */
const struct blk_shadow *s = info->s;
shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
memcpy(info->bvec_data + info->bvec_offset,
shared_data + offset, len);
info->bvec_offset += len;
info->grant_idx++;
kunmap_atomic(shared_data);
}
static enum blk_req_status blkif_rsp_to_req_status(int rsp)
{
switch (rsp)
{
case BLKIF_RSP_OKAY:
return REQ_DONE;
case BLKIF_RSP_EOPNOTSUPP:
return REQ_EOPNOTSUPP;
case BLKIF_RSP_ERROR:
default:
return REQ_ERROR;
}
}
/*
* Get the final status of the block request based on two ring response
*/
static int blkif_get_final_status(enum blk_req_status s1,
enum blk_req_status s2)
{
BUG_ON(s1 < REQ_DONE);
BUG_ON(s2 < REQ_DONE);
if (s1 == REQ_ERROR || s2 == REQ_ERROR)
return BLKIF_RSP_ERROR;
else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
return BLKIF_RSP_EOPNOTSUPP;
return BLKIF_RSP_OKAY;
}
/*
* Return values:
* 1 response processed.
* 0 missing further responses.
* -1 error while processing.
*/
static int blkif_completion(unsigned long *id,
struct blkfront_ring_info *rinfo,
struct blkif_response *bret)
{
int i = 0;
struct scatterlist *sg;
int num_sg, num_grant;
struct blkfront_info *info = rinfo->dev_info;
struct blk_shadow *s = &rinfo->shadow[*id];
struct copy_from_grant data = {
.grant_idx = 0,
};
num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
/* The I/O request may be split in two. */
if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
/* Keep the status of the current response in shadow. */
s->status = blkif_rsp_to_req_status(bret->status);
/* Wait the second response if not yet here. */
if (s2->status < REQ_DONE)
return 0;
bret->status = blkif_get_final_status(s->status,
s2->status);
/*
* All the grants is stored in the first shadow in order
* to make the completion code simpler.
*/
num_grant += s2->req.u.rw.nr_segments;
/*
* The two responses may not come in order. Only the
* first request will store the scatter-gather list.
*/
if (s2->num_sg != 0) {
/* Update "id" with the ID of the first response. */
*id = s->associated_id;
s = s2;
}
/*
* We don't need anymore the second request, so recycling
* it now.
*/
if (add_id_to_freelist(rinfo, s->associated_id))
WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
info->gd->disk_name, s->associated_id);
}
data.s = s;
num_sg = s->num_sg;
if (bret->operation == BLKIF_OP_READ && info->bounce) {
for_each_sg(s->sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
data.bvec_offset = sg->offset;
data.bvec_data = kmap_atomic(sg_page(sg));
gnttab_foreach_grant_in_range(sg_page(sg),
sg->offset,
sg->length,
blkif_copy_from_grant,
&data);
kunmap_atomic(data.bvec_data);
}
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < num_grant; i++) {
if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
/*
* If the grant is still mapped by the backend (the
* backend has chosen to make this grant persistent)
* we add it at the head of the list, so it will be
* reused first.
*/
if (!info->feature_persistent) {
pr_alert("backed has not unmapped grant: %u\n",
s->grants_used[i]->gref);
return -1;
}
list_add(&s->grants_used[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
/*
* If the grant is not mapped by the backend we add it
* to the tail of the list, so it will not be picked
* again unless we run out of persistent grants.
*/
s->grants_used[i]->gref = INVALID_GRANT_REF;
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
}
}
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
if (!info->feature_persistent) {
pr_alert("backed has not unmapped grant: %u\n",
s->indirect_grants[i]->gref);
return -1;
}
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
struct page *indirect_page;
/*
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
if (!info->bounce) {
indirect_page = s->indirect_grants[i]->page;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
s->indirect_grants[i]->gref = INVALID_GRANT_REF;
list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
}
}
}
return 1;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
struct request *req;
struct blkif_response bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
struct blkfront_info *info = rinfo->dev_info;
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
return IRQ_HANDLED;
}
spin_lock_irqsave(&rinfo->ring_lock, flags);
again:
rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
pr_alert("%s: illegal number of responses %u\n",
info->gd->disk_name, rp - rinfo->ring.rsp_cons);
goto err;
}
for (i = rinfo->ring.rsp_cons; i != rp; i++) {
unsigned long id;
unsigned int op;
eoiflag = 0;
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
id = bret.id;
/*
* The backend has messed up and given us an id that we would
* never have given to it (we stamp it up to BLK_RING_SIZE -
* look in get_id_from_freelist.
*/
if (id >= BLK_RING_SIZE(info)) {
pr_alert("%s: response has incorrect id (%ld)\n",
info->gd->disk_name, id);
goto err;
}
if (rinfo->shadow[id].status != REQ_WAITING) {
pr_alert("%s: response references no pending request\n",
info->gd->disk_name);
goto err;
}
rinfo->shadow[id].status = REQ_PROCESSING;
req = rinfo->shadow[id].request;
op = rinfo->shadow[id].req.operation;
if (op == BLKIF_OP_INDIRECT)
op = rinfo->shadow[id].req.u.indirect.indirect_op;
if (bret.operation != op) {
pr_alert("%s: response has wrong operation (%u instead of %u)\n",
info->gd->disk_name, bret.operation, op);
goto err;
}
if (bret.operation != BLKIF_OP_DISCARD) {
int ret;
/*
* We may need to wait for an extra response if the
* I/O request is split in 2
*/
ret = blkif_completion(&id, rinfo, &bret);
if (!ret)
continue;
if (unlikely(ret < 0))
goto err;
}
if (add_id_to_freelist(rinfo, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
info->gd->disk_name, op_name(bret.operation), id);
continue;
}
if (bret.status == BLKIF_RSP_OKAY)
blkif_req(req)->error = BLK_STS_OK;
else
blkif_req(req)->error = BLK_STS_IOERR;
switch (bret.operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
pr_warn_ratelimited("blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret.operation));
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
blk_queue_max_discard_sectors(rq, 0);
blk_queue_max_secure_erase_sectors(rq, 0);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
pr_warn_ratelimited("blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret.operation));
blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(bret.status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret.operation));
blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(blkif_req(req)->error)) {
if (blkif_req(req)->error == BLK_STS_NOTSUPP)
blkif_req(req)->error = BLK_STS_OK;
info->feature_fua = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
fallthrough;
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret.status != BLKIF_RSP_OKAY))
dev_dbg_ratelimited(&info->xbdev->dev,
"Bad return from blkdev data request: %#x\n",
bret.status);
break;
default:
BUG();
}
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
rinfo->ring.rsp_cons = i;
if (i != rinfo->ring.req_prod_pvt) {
int more_to_do;
RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
if (more_to_do)
goto again;
} else
rinfo->ring.sring->rsp_event = i + 1;
kick_pending_request_queues_locked(rinfo);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
xen_irq_lateeoi(irq, eoiflag);
return IRQ_HANDLED;
err:
info->connected = BLKIF_STATE_ERROR;
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
/* No EOI in order to avoid further interrupts. */
pr_alert("%s disabled for further use\n", info->gd->disk_name);
return IRQ_HANDLED;
}
static int setup_blkring(struct xenbus_device *dev,
struct blkfront_ring_info *rinfo)
{
struct blkif_sring *sring;
int err;
struct blkfront_info *info = rinfo->dev_info;
unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
err = xenbus_setup_ring(dev, GFP_NOIO, (void **)&sring,
info->nr_ring_pages, rinfo->ring_ref);
if (err)
goto fail;
XEN_FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
if (err)
goto fail;
err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
0, "blkif", rinfo);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
goto fail;
}
rinfo->irq = err;
return 0;
fail:
blkif_free(info, 0);
return err;
}
/*
* Write out per-ring/queue nodes including ring-ref and event-channel, and each
* ring buffer may have multi pages depending on ->nr_ring_pages.
*/
static int write_per_ring_nodes(struct xenbus_transaction xbt,
struct blkfront_ring_info *rinfo, const char *dir)
{
int err;
unsigned int i;
const char *message = NULL;
struct blkfront_info *info = rinfo->dev_info;
if (info->nr_ring_pages == 1) {
err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
if (err) {
message = "writing ring-ref";
goto abort_transaction;
}
} else {
for (i = 0; i < info->nr_ring_pages; i++) {
char ring_ref_name[RINGREF_NAME_LEN];
snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
err = xenbus_printf(xbt, dir, ring_ref_name,
"%u", rinfo->ring_ref[i]);
if (err) {
message = "writing ring-ref";
goto abort_transaction;
}
}
}
err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
if (err) {
message = "writing event-channel";
goto abort_transaction;
}
return 0;
abort_transaction:
xenbus_transaction_end(xbt, 1);
if (message)
xenbus_dev_fatal(info->xbdev, err, "%s", message);
return err;
}
/* Enable the persistent grants feature. */
static bool feature_persistent = true;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC(feature_persistent,
"Enables the persistent grants feature");
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
{
const char *message = NULL;
struct xenbus_transaction xbt;
int err;
unsigned int i, max_page_order;
unsigned int ring_page_order;
struct blkfront_ring_info *rinfo;
if (!info)
return -ENODEV;
/* Check if backend is trusted. */
info->bounce = !xen_blkif_trusted ||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
info->nr_ring_pages = 1 << ring_page_order;
err = negotiate_mq(info);
if (err)
goto destroy_blkring;
for_each_rinfo(info, rinfo, i) {
/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo);
if (err)
goto destroy_blkring;
}
again:
err = xenbus_transaction_start(&xbt);
if (err) {
xenbus_dev_fatal(dev, err, "starting transaction");
goto destroy_blkring;
}
if (info->nr_ring_pages > 1) {
err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
ring_page_order);
if (err) {
message = "writing ring-page-order";
goto abort_transaction;
}
}
/* We already got the number of queues/rings in _probe */
if (info->nr_rings == 1) {
err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
if (err)
goto destroy_blkring;
} else {
char *path;
size_t pathsize;
err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
info->nr_rings);
if (err) {
message = "writing multi-queue-num-queues";
goto abort_transaction;
}
pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
path = kmalloc(pathsize, GFP_KERNEL);
if (!path) {
err = -ENOMEM;
message = "ENOMEM while writing ring references";
goto abort_transaction;
}
for_each_rinfo(info, rinfo, i) {
memset(path, 0, pathsize);
snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
err = write_per_ring_nodes(xbt, rinfo, path);
if (err) {
kfree(path);
goto destroy_blkring;
}
}
kfree(path);
}
err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
XEN_IO_PROTO_ABI_NATIVE);
if (err) {
message = "writing protocol";
goto abort_transaction;
}
info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
err = xenbus_transaction_end(xbt, 0);
if (err) {
if (err == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, err, "completing transaction");
goto destroy_blkring;
}
for_each_rinfo(info, rinfo, i) {
unsigned int j;
for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1;
rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
}
xenbus_switch_state(dev, XenbusStateInitialised);
return 0;
abort_transaction:
xenbus_transaction_end(xbt, 1);
if (message)
xenbus_dev_fatal(dev, err, "%s", message);
destroy_blkring:
blkif_free(info, 0);
return err;
}
static int negotiate_mq(struct blkfront_info *info)
{
unsigned int backend_max_queues;
unsigned int i;
struct blkfront_ring_info *rinfo;
BUG_ON(info->nr_rings);
/* Check if backend supports multiple queues. */
backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
/* We need at least one ring. */
if (!info->nr_rings)
info->nr_rings = 1;
info->rinfo_size = struct_size(info->rinfo, shadow,
BLK_RING_SIZE(info));
info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
return -ENOMEM;
}
for_each_rinfo(info, rinfo, i) {
INIT_LIST_HEAD(&rinfo->indirect_pages);
INIT_LIST_HEAD(&rinfo->grants);
rinfo->dev_info = info;
INIT_WORK(&rinfo->work, blkif_restart_queue);
spin_lock_init(&rinfo->ring_lock);
}
return 0;
}
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
* inform the backend of the appropriate details for those. Switch to
* Initialised state.
*/
static int blkfront_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int err, vdevice;
struct blkfront_info *info;
/* FIXME: Use dynamic device id if this is not set. */
err = xenbus_scanf(XBT_NIL, dev->nodename,
"virtual-device", "%i", &vdevice);
if (err != 1) {
/* go looking in the extended area instead */
err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
"%i", &vdevice);
if (err != 1) {
xenbus_dev_fatal(dev, err, "reading virtual-device");
return err;
}
}
if (xen_hvm_domain()) {
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
if (xen_has_pv_and_legacy_disk_devices()) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
major = BLKIF_MAJOR(vdevice);
else
major = XENVBD_MAJOR;
if (major != XENVBD_MAJOR) {
printk(KERN_INFO
"%s: HVM does not support vbd %d as xen block device\n",
__func__, vdevice);
return -ENODEV;
}
}
/* do not create a PV cdrom device if we are an HVM guest */
type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
if (IS_ERR(type))
return -ENODEV;
if (strncmp(type, "cdrom", 5) == 0) {
kfree(type);
return -ENODEV;
}
kfree(type);
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
return -ENOMEM;
}
info->xbdev = dev;
mutex_init(&info->mutex);
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
mutex_lock(&blkfront_mutex);
list_add(&info->info_list, &info_list);
mutex_unlock(&blkfront_mutex);
return 0;
}
static int blkif_recover(struct blkfront_info *info)
{
unsigned int r_index;
struct request *req, *n;
int rc;
struct bio *bio;
unsigned int segs;
struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
blkif_set_queue_limits(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
if (rc)
return rc;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
for_each_rinfo(info, rinfo, r_index) {
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(rinfo);
}
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
blk_mq_requeue_request(req, false);
}
blk_mq_start_stopped_hw_queues(info->rq, true);
blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
submit_bio(bio);
}
return 0;
}
/*
* We are reconnecting to the backend, due to a suspend/resume, or a backend
* driver restart. We tear down our blkif structure and recreate it, but
* leave the device-layer structures intact so that this is transparent to the
* rest of the kernel.
*/
static int blkfront_resume(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
unsigned int i, j;
struct blkfront_ring_info *rinfo;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests);
for_each_rinfo(info, rinfo, i) {
struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow;
for (j = 0; j < BLK_RING_SIZE(info); j++) {
/* Not in use? */
if (!shadow[j].request)
continue;
/*
* Get the bios in the request so we can re-queue them.
*/
if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
req_op(shadow[j].request) == REQ_OP_DISCARD ||
req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
* we need to requeue the whole request
*
* XXX: but this doesn't make any sense for a
* write with the FUA flag set..
*/
list_add(&shadow[j].request->queuelist, &info->requests);
continue;
}
merge_bio.head = shadow[j].request->bio;
merge_bio.tail = shadow[j].request->biotail;
bio_list_merge(&info->bio_list, &merge_bio);
shadow[j].request->bio = NULL;
blk_mq_end_request(shadow[j].request, BLK_STS_OK);
}
}
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = talk_to_blkback(dev, info);
if (!err)
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
/*
* We have to wait for the backend to switch to
* connected state, since we want to read which
* features it supports.
*/
return err;
}
static void blkfront_closing(struct blkfront_info *info)
{
struct xenbus_device *xbdev = info->xbdev;
struct blkfront_ring_info *rinfo;
unsigned int i;
if (xbdev->state == XenbusStateClosing)
return;
/* No more blkif_request(). */
if (info->rq && info->gd) {
blk_mq_stop_hw_queues(info->rq);
blk_mark_disk_dead(info->gd);
}
for_each_rinfo(info, rinfo, i) {
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work(&rinfo->work);
}
xenbus_frontend_closed(xbdev);
}
static void blkfront_setup_discard(struct blkfront_info *info)
{
info->feature_discard = 1;
info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
"discard-granularity",
0);
info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
"discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
}
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{
unsigned int psegs, grants, memflags;
int err, i;
struct blkfront_info *info = rinfo->dev_info;
memflags = memalloc_noio_save();
if (info->max_indirect_segments == 0) {
if (!HAS_EXTRA_REQ)
grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
else {
/*
* When an extra req is required, the maximum
* grants supported is related to the size of the
* Linux block segment.
*/
grants = GRANTS_PER_PSEG;
}
}
else
grants = info->max_indirect_segments;
psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
err = fill_grant_buffer(rinfo,
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
if (err)
goto out_of_memory;
if (!info->bounce && info->max_indirect_segments) {
/*
* We are using indirect descriptors but don't have a bounce
* buffer, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_KERNEL |
__GFP_ZERO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
}
for (i = 0; i < BLK_RING_SIZE(info); i++) {
rinfo->shadow[i].grants_used =
kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
GFP_KERNEL);
rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
GFP_KERNEL);
if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants =
kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments &&
(rinfo->shadow[i].indirect_grants == NULL)))
goto out_of_memory;
sg_init_table(rinfo->shadow[i].sg, psegs);
}
memalloc_noio_restore(memflags);
return 0;
out_of_memory:
for (i = 0; i < BLK_RING_SIZE(info); i++) {
kvfree(rinfo->shadow[i].grants_used);
rinfo->shadow[i].grants_used = NULL;
kvfree(rinfo->shadow[i].sg);
rinfo->shadow[i].sg = NULL;
kvfree(rinfo->shadow[i].indirect_grants);
rinfo->shadow[i].indirect_grants = NULL;
}
if (!list_empty(&rinfo->indirect_pages)) {
struct page *indirect_page, *n;
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
}
}
memalloc_noio_restore(memflags);
return -ENOMEM;
}
/*
* Gather all backend feature-*
*/
static void blkfront_gather_backend_features(struct blkfront_info *info)
{
unsigned int indirect_segments;
info->feature_flush = 0;
info->feature_fua = 0;
/*
* If there's no "feature-barrier" defined, then it means
* we're dealing with a very old backend which writes
* synchronously; nothing to do.
*
* If there are barriers, then we use flush.
*/
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
info->feature_flush = 1;
info->feature_fua = 1;
}
/*
* And if there is "feature-flush-cache" use that above
* barriers.
*/
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
0)) {
info->feature_flush = 1;
info->feature_fua = 0;
}
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
if (info->feature_persistent)
info->bounce = true;
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
if (indirect_segments > xen_blkif_max_segments)
indirect_segments = xen_blkif_max_segments;
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
indirect_segments = 0;
info->max_indirect_segments = indirect_segments;
if (info->feature_persistent) {
mutex_lock(&blkfront_mutex);
schedule_delayed_work(&blkfront_work, HZ * 10);
mutex_unlock(&blkfront_mutex);
}
}
/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
*/
static void blkfront_connect(struct blkfront_info *info)
{
unsigned long long sectors;
unsigned long sector_size;
unsigned int physical_sector_size;
int err, i;
struct blkfront_ring_info *rinfo;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
/*
* Potentially, the back-end may be signalling
* a capacity change; update the capacity.
*/
err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"sectors", "%Lu", §ors);
if (XENBUS_EXIST_ERR(err))
return;
printk(KERN_INFO "Setting capacity to %Lu\n",
sectors);
set_capacity_and_notify(info->gd, sectors);
return;
case BLKIF_STATE_SUSPENDED:
/*
* If we are recovering from suspension, we need to wait
* for the backend to announce it's features before
* reconnecting, at least we need to know if the backend
* supports indirect descriptors, and how many.
*/
blkif_recover(info);
return;
default:
break;
}
dev_dbg(&info->xbdev->dev, "%s:%s.\n",
__func__, info->xbdev->otherend);
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"sectors", "%llu", §ors,
"info", "%u", &info->vdisk_info,
"sector-size", "%lu", §or_size,
NULL);
if (err) {
xenbus_dev_fatal(info->xbdev, err,
"reading backend fields at %s",
info->xbdev->otherend);
return;
}
/*
* physical-sector-size is a newer field, so old backends may not
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
"physical-sector-size",
sector_size);
blkfront_gather_backend_features(info);
for_each_rinfo(info, rinfo, i) {
err = blkfront_setup_indirect(rinfo);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
blkif_free(info, 0);
break;
}
}
err = xlvbd_alloc_gendisk(sectors, info, sector_size,
physical_sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
goto fail;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */
info->connected = BLKIF_STATE_CONNECTED;
for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(rinfo);
err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
if (err) {
put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
goto fail;
}
info->is_ready = 1;
return;
fail:
blkif_free(info, 0);
return;
}
/*
* Callback received when the backend's state changes.
*/
static void blkback_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
switch (backend_state) {
case XenbusStateInitWait:
if (dev->state != XenbusStateInitialising)
break;
if (talk_to_blkback(dev, info))
break;
break;
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
break;
case XenbusStateConnected:
/*
* talk_to_blkback sets state to XenbusStateInitialised
* and blkfront_connect sets it to XenbusStateConnected
* (if connection went OK).
*
* If the backend (or toolstack) decides to poke at backend
* state (and re-trigger the watch by setting the state repeatedly
* to XenbusStateConnected (4)) we need to deal with this.
* This is allowed as this is used to communicate to the guest
* that the size of disk has changed!
*/
if ((dev->state != XenbusStateInitialised) &&
(dev->state != XenbusStateConnected)) {
if (talk_to_blkback(dev, info))
break;
}
blkfront_connect(info);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
fallthrough;
case XenbusStateClosing:
blkfront_closing(info);
break;
}
}
static void blkfront_remove(struct xenbus_device *xbdev)
{
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
if (info->gd)
del_gendisk(info->gd);
mutex_lock(&blkfront_mutex);
list_del(&info->info_list);
mutex_unlock(&blkfront_mutex);
blkif_free(info, 0);
if (info->gd) {
xlbd_release_minors(info->gd->first_minor, info->gd->minors);
put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
}
kfree(info);
}
static int blkfront_is_ready(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
return info->is_ready && info->xbdev;
}
static const struct block_device_operations xlvbd_block_fops =
{
.owner = THIS_MODULE,
.getgeo = blkif_getgeo,
.ioctl = blkif_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
};
static const struct xenbus_device_id blkfront_ids[] = {
{ "vbd" },
{ "" }
};
static struct xenbus_driver blkfront_driver = {
.ids = blkfront_ids,
.probe = blkfront_probe,
.remove = blkfront_remove,
.resume = blkfront_resume,
.otherend_changed = blkback_changed,
.is_ready = blkfront_is_ready,
};
static void purge_persistent_grants(struct blkfront_info *info)
{
unsigned int i;
unsigned long flags;
struct blkfront_ring_info *rinfo;
for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
LIST_HEAD(grants);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (rinfo->persistent_gnts_c == 0) {
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
continue;
}
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
node) {
if (gnt_list_entry->gref == INVALID_GRANT_REF ||
!gnttab_try_end_foreign_access(gnt_list_entry->gref))
continue;
list_del(&gnt_list_entry->node);
rinfo->persistent_gnts_c--;
gnt_list_entry->gref = INVALID_GRANT_REF;
list_add_tail(&gnt_list_entry->node, &grants);
}
list_splice_tail(&grants, &rinfo->grants);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
}
}
static void blkfront_delay_work(struct work_struct *work)
{
struct blkfront_info *info;
bool need_schedule_work = false;
/*
* Note that when using bounce buffers but not persistent grants
* there's no need to run blkfront_delay_work because grants are
* revoked in blkif_completion or else an error is reported and the
* connection is closed.
*/
mutex_lock(&blkfront_mutex);
list_for_each_entry(info, &info_list, info_list) {
if (info->feature_persistent) {
need_schedule_work = true;
mutex_lock(&info->mutex);
purge_persistent_grants(info);
mutex_unlock(&info->mutex);
}
}
if (need_schedule_work)
schedule_delayed_work(&blkfront_work, HZ * 10);
mutex_unlock(&blkfront_mutex);
}
static int __init xlblk_init(void)
{
int ret;
int nr_cpus = num_online_cpus();
if (!xen_domain())
return -ENODEV;
if (!xen_has_pv_disk_devices())
return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
pr_warn("xen_blk: can't get major %d with name %s\n",
XENVBD_MAJOR, DEV_NAME);
return -ENODEV;
}
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
}
if (xen_blkif_max_queues > nr_cpus) {
pr_info("Invalid max_queues (%d), will use default max: %d.\n",
xen_blkif_max_queues, nr_cpus);
xen_blkif_max_queues = nr_cpus;
}
INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
ret = xenbus_register_frontend(&blkfront_driver);
if (ret) {
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
return ret;
}
return 0;
}
module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
cancel_delayed_work_sync(&blkfront_work);
xenbus_unregister_driver(&blkfront_driver);
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
kfree(minors);
}
module_exit(xlblk_exit);
MODULE_DESCRIPTION("Xen virtual block device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
MODULE_ALIAS("xen:vbd");
MODULE_ALIAS("xenblk");
| linux-master | drivers/block/xen-blkfront.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for the N64 cart.
*
* Copyright (c) 2021 Lauri Kasanen
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
enum {
PI_DRAM_REG = 0,
PI_CART_REG,
PI_READ_REG,
PI_WRITE_REG,
PI_STATUS_REG,
};
#define PI_STATUS_DMA_BUSY (1 << 0)
#define PI_STATUS_IO_BUSY (1 << 1)
#define CART_DOMAIN 0x10000000
#define CART_MAX 0x1FFFFFFF
#define MIN_ALIGNMENT 8
static u32 __iomem *reg_base;
static unsigned int start;
module_param(start, uint, 0);
MODULE_PARM_DESC(start, "Start address of the cart block data");
static unsigned int size;
module_param(size, uint, 0);
MODULE_PARM_DESC(size, "Size of the cart block data, in bytes");
static void n64cart_write_reg(const u8 reg, const u32 value)
{
writel(value, reg_base + reg);
}
static u32 n64cart_read_reg(const u8 reg)
{
return readl(reg_base + reg);
}
static void n64cart_wait_dma(void)
{
while (n64cart_read_reg(PI_STATUS_REG) &
(PI_STATUS_DMA_BUSY | PI_STATUS_IO_BUSY))
cpu_relax();
}
/*
* Process a single bvec of a bio.
*/
static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
{
dma_addr_t dma_addr;
const u32 bstart = pos + start;
/* Alignment check */
WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) ||
(bv->bv_len & (MIN_ALIGNMENT - 1)));
dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0);
if (dma_mapping_error(dev, dma_addr))
return false;
n64cart_wait_dma();
n64cart_write_reg(PI_DRAM_REG, dma_addr);
n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
n64cart_wait_dma();
dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE);
return true;
}
static void n64cart_submit_bio(struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
struct device *dev = bio->bi_bdev->bd_disk->private_data;
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) {
if (!n64cart_do_bvec(dev, &bvec, pos)) {
bio_io_error(bio);
return;
}
pos += bvec.bv_len;
}
bio_endio(bio);
}
static const struct block_device_operations n64cart_fops = {
.owner = THIS_MODULE,
.submit_bio = n64cart_submit_bio,
};
/*
* The target device is embedded and RAM-constrained. We save RAM
* by initializing in __init code that gets dropped late in boot.
* For the same reason there is no module or unloading support.
*/
static int __init n64cart_probe(struct platform_device *pdev)
{
struct gendisk *disk;
int err = -ENOMEM;
if (!start || !size) {
pr_err("start or size not specified\n");
return -ENODEV;
}
if (size & 4095) {
pr_err("size must be a multiple of 4K\n");
return -ENODEV;
}
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto out;
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART;
disk->fops = &n64cart_fops;
disk->private_data = &pdev->dev;
strcpy(disk->disk_name, "n64cart");
set_capacity(disk, size >> SECTOR_SHIFT);
set_disk_ro(disk, 1);
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_physical_block_size(disk->queue, 4096);
blk_queue_logical_block_size(disk->queue, 4096);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
pr_info("n64cart: %u kb disk\n", size / 1024);
return 0;
out_cleanup_disk:
put_disk(disk);
out:
return err;
}
static struct platform_driver n64cart_driver = {
.driver = {
.name = "n64cart",
},
};
static int __init n64cart_init(void)
{
return platform_driver_probe(&n64cart_driver, n64cart_probe);
}
module_init(n64cart_init);
MODULE_AUTHOR("Lauri Kasanen <[email protected]>");
MODULE_DESCRIPTION("Driver for the N64 cart");
MODULE_LICENSE("GPL");
| linux-master | drivers/block/n64cart.c |
// SPDX-License-Identifier: GPL-2.0-only
/* sunvdc.c: Sun LDOM Virtual Disk Client.
*
* Copyright (C) 2007, 2008 David S. Miller <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <asm/vio.h>
#include <asm/ldc.h>
#define DRV_MODULE_NAME "sunvdc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.2"
#define DRV_MODULE_RELDATE "November 24, 2014"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller ([email protected])");
MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
#define VDC_TX_RING_SIZE 512
#define VDC_DEFAULT_BLK_SIZE 512
#define MAX_XFER_BLKS (128 * 1024)
#define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
#define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
#define WAITING_FOR_LINK_UP 0x01
#define WAITING_FOR_TX_SPACE 0x02
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
#define VDC_MAX_RETRIES 10
static struct workqueue_struct *sunvdc_wq;
struct vdc_req_entry {
struct request *req;
};
struct vdc_port {
struct vio_driver_state vio;
struct gendisk *disk;
struct vdc_completion *cmp;
u64 req_id;
u64 seq;
struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
unsigned long ring_cookies;
u64 max_xfer_size;
u32 vdisk_block_size;
u32 drain;
u64 ldc_timeout;
struct delayed_work ldc_reset_timer_work;
struct work_struct ldc_reset_work;
/* The server fills these in for us in the disk attribute
* ACK packet.
*/
u64 operations;
u32 vdisk_size;
u8 vdisk_type;
u8 vdisk_mtype;
u32 vdisk_phys_blksz;
struct blk_mq_tag_set tag_set;
char disk_name[32];
};
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
static void vdc_ldc_reset_timer_work(struct work_struct *work);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
return container_of(vio, struct vdc_port, vio);
}
/* Ordered from largest major to lowest */
static struct vio_version vdc_versions[] = {
{ .major = 1, .minor = 2 },
{ .major = 1, .minor = 1 },
{ .major = 1, .minor = 0 },
};
static inline int vdc_version_supported(struct vdc_port *port,
u16 major, u16 minor)
{
return port->vio.ver.major == major && port->vio.ver.minor >= minor;
}
#define VDCBLK_NAME "vdisk"
static int vdc_major;
#define PARTITION_SHIFT 3
static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
{
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
geo->heads = 0xff;
geo->sectors = 0x3f;
sector_div(cylinders, geo->heads * geo->sectors);
geo->cylinders = cylinders;
if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
geo->cylinders = 0xffff;
return 0;
}
/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
* when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
* Needed to be able to install inside an ldom from an iso image.
*/
static int vdc_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned command, unsigned long argument)
{
struct vdc_port *port = bdev->bd_disk->private_data;
int i;
switch (command) {
case CDROMMULTISESSION:
pr_debug(PFX "Multisession CDs not supported\n");
for (i = 0; i < sizeof(struct cdrom_multisession); i++)
if (put_user(0, (char __user *)(argument + i)))
return -EFAULT;
return 0;
case CDROM_GET_CAPABILITY:
if (!vdc_version_supported(port, 1, 1))
return -EINVAL;
switch (port->vdisk_mtype) {
case VD_MEDIA_TYPE_CD:
case VD_MEDIA_TYPE_DVD:
return 0;
default:
return -EINVAL;
}
default:
pr_debug(PFX "ioctl %08x not supported\n", command);
return -EINVAL;
}
}
static const struct block_device_operations vdc_fops = {
.owner = THIS_MODULE,
.getgeo = vdc_getgeo,
.ioctl = vdc_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
};
static void vdc_blk_queue_start(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
/* restart blk queue when ring is half emptied. also called after
* handshake completes, so check for initial handshake before we've
* allocated a disk.
*/
if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
blk_mq_start_stopped_hw_queues(port->disk->queue, true);
}
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
{
if (vio->cmp &&
(waiting_for == -1 ||
vio->cmp->waiting_for == waiting_for)) {
vio->cmp->err = err;
complete(&vio->cmp->com);
vio->cmp = NULL;
}
}
static void vdc_handshake_complete(struct vio_driver_state *vio)
{
struct vdc_port *port = to_vdc_port(vio);
cancel_delayed_work(&port->ldc_reset_timer_work);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
vdc_blk_queue_start(port);
}
static int vdc_handle_unknown(struct vdc_port *port, void *arg)
{
struct vio_msg_tag *pkt = arg;
printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
printk(KERN_ERR PFX "Resetting connection.\n");
ldc_disconnect(port->vio.lp);
return -ECONNRESET;
}
static int vdc_send_attr(struct vio_driver_state *vio)
{
struct vdc_port *port = to_vdc_port(vio);
struct vio_disk_attr_info pkt;
memset(&pkt, 0, sizeof(pkt));
pkt.tag.type = VIO_TYPE_CTRL;
pkt.tag.stype = VIO_SUBTYPE_INFO;
pkt.tag.stype_env = VIO_ATTR_INFO;
pkt.tag.sid = vio_send_sid(vio);
pkt.xfer_mode = VIO_DRING_MODE;
pkt.vdisk_block_size = port->vdisk_block_size;
pkt.max_xfer_size = port->max_xfer_size;
viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
}
static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
{
struct vdc_port *port = to_vdc_port(vio);
struct vio_disk_attr_info *pkt = arg;
viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
"mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
pkt->tag.stype, pkt->operations,
pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
pkt->xfer_mode, pkt->vdisk_block_size,
pkt->max_xfer_size);
if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
switch (pkt->vdisk_type) {
case VD_DISK_TYPE_DISK:
case VD_DISK_TYPE_SLICE:
break;
default:
printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
vio->name, pkt->vdisk_type);
return -ECONNRESET;
}
if (pkt->vdisk_block_size > port->vdisk_block_size) {
printk(KERN_ERR PFX "%s: BLOCK size increased "
"%u --> %u\n",
vio->name,
port->vdisk_block_size, pkt->vdisk_block_size);
return -ECONNRESET;
}
port->operations = pkt->operations;
port->vdisk_type = pkt->vdisk_type;
if (vdc_version_supported(port, 1, 1)) {
port->vdisk_size = pkt->vdisk_size;
port->vdisk_mtype = pkt->vdisk_mtype;
}
if (pkt->max_xfer_size < port->max_xfer_size)
port->max_xfer_size = pkt->max_xfer_size;
port->vdisk_block_size = pkt->vdisk_block_size;
port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
if (vdc_version_supported(port, 1, 2))
port->vdisk_phys_blksz = pkt->phys_block_size;
return 0;
} else {
printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
return -ECONNRESET;
}
}
static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
{
int err = desc->status;
vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
}
static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
unsigned int index)
{
struct vio_disk_desc *desc = vio_dring_entry(dr, index);
struct vdc_req_entry *rqe = &port->rq_arr[index];
struct request *req;
if (unlikely(desc->hdr.state != VIO_DESC_DONE))
return;
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE;
dr->cons = vio_dring_next(dr, index);
req = rqe->req;
if (req == NULL) {
vdc_end_special(port, desc);
return;
}
rqe->req = NULL;
blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
vdc_blk_queue_start(port);
}
static int vdc_ack(struct vdc_port *port, void *msgbuf)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct vio_dring_data *pkt = msgbuf;
if (unlikely(pkt->dring_ident != dr->ident ||
pkt->start_idx != pkt->end_idx ||
pkt->start_idx >= VDC_TX_RING_SIZE))
return 0;
vdc_end_one(port, dr, pkt->start_idx);
return 0;
}
static int vdc_nack(struct vdc_port *port, void *msgbuf)
{
/* XXX Implement me XXX */
return 0;
}
static void vdc_event(void *arg, int event)
{
struct vdc_port *port = arg;
struct vio_driver_state *vio = &port->vio;
unsigned long flags;
int err;
spin_lock_irqsave(&vio->lock, flags);
if (unlikely(event == LDC_EVENT_RESET)) {
vio_link_state_change(vio, event);
queue_work(sunvdc_wq, &port->ldc_reset_work);
goto out;
}
if (unlikely(event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event);
goto out;
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
pr_warn(PFX "Unexpected LDC event %d\n", event);
goto out;
}
err = 0;
while (1) {
union {
struct vio_msg_tag tag;
u64 raw[8];
} msgbuf;
err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
vio_conn_reset(vio);
break;
}
if (err == 0)
break;
viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
msgbuf.tag.type,
msgbuf.tag.stype,
msgbuf.tag.stype_env,
msgbuf.tag.sid);
err = vio_validate_sid(vio, &msgbuf.tag);
if (err < 0)
break;
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
err = vdc_ack(port, &msgbuf);
else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
err = vdc_nack(port, &msgbuf);
else
err = vdc_handle_unknown(port, &msgbuf);
} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
err = vio_control_pkt_engine(vio, &msgbuf);
} else {
err = vdc_handle_unknown(port, &msgbuf);
}
if (err < 0)
break;
}
if (err < 0)
vdc_finish(&port->vio, err, WAITING_FOR_ANY);
out:
spin_unlock_irqrestore(&vio->lock, flags);
}
static int __vdc_tx_trigger(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct vio_dring_data hdr = {
.tag = {
.type = VIO_TYPE_DATA,
.stype = VIO_SUBTYPE_INFO,
.stype_env = VIO_DRING_DATA,
.sid = vio_send_sid(&port->vio),
},
.dring_ident = dr->ident,
.start_idx = dr->prod,
.end_idx = dr->prod,
};
int err, delay;
int retries = 0;
hdr.seq = dr->snd_nxt;
delay = 1;
do {
err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
if (err > 0) {
dr->snd_nxt++;
break;
}
udelay(delay);
if ((delay <<= 1) > 128)
delay = 128;
if (retries++ > VDC_MAX_RETRIES)
break;
} while (err == -EAGAIN);
if (err == -ENOTCONN)
vdc_ldc_reset(port);
return err;
}
static int __send_request(struct request *req)
{
struct vdc_port *port = req->q->disk->private_data;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct scatterlist sg[MAX_RING_COOKIES];
struct vdc_req_entry *rqe;
struct vio_disk_desc *desc;
unsigned int map_perm;
int nsg, err, i;
u64 len;
u8 op;
if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
return -EINVAL;
map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
if (rq_data_dir(req) == READ) {
map_perm |= LDC_MAP_W;
op = VD_OP_BREAD;
} else {
map_perm |= LDC_MAP_R;
op = VD_OP_BWRITE;
}
sg_init_table(sg, port->ring_cookies);
nsg = blk_rq_map_sg(req->q, req, sg);
len = 0;
for (i = 0; i < nsg; i++)
len += sg[i].length;
desc = vio_dring_cur(dr);
err = ldc_map_sg(port->vio.lp, sg, nsg,
desc->cookies, port->ring_cookies,
map_perm);
if (err < 0) {
printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
return err;
}
rqe = &port->rq_arr[dr->prod];
rqe->req = req;
desc->hdr.ack = VIO_ACK_ENABLE;
desc->req_id = port->req_id;
desc->operation = op;
if (port->vdisk_type == VD_DISK_TYPE_DISK) {
desc->slice = 0xff;
} else {
desc->slice = 0;
}
desc->status = ~0;
desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
desc->size = len;
desc->ncookies = err;
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
*/
wmb();
desc->hdr.state = VIO_DESC_READY;
err = __vdc_tx_trigger(port);
if (err < 0) {
printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
} else {
port->req_id++;
dr->prod = vio_dring_next(dr, dr->prod);
}
return err;
}
static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct vdc_port *port = hctx->queue->queuedata;
struct vio_dring_state *dr;
unsigned long flags;
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
blk_mq_start_request(bd->rq);
spin_lock_irqsave(&port->vio.lock, flags);
/*
* Doing drain, just end the request in error
*/
if (unlikely(port->drain)) {
spin_unlock_irqrestore(&port->vio.lock, flags);
return BLK_STS_IOERR;
}
if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
spin_unlock_irqrestore(&port->vio.lock, flags);
blk_mq_stop_hw_queue(hctx);
return BLK_STS_DEV_RESOURCE;
}
if (__send_request(bd->rq) < 0) {
spin_unlock_irqrestore(&port->vio.lock, flags);
return BLK_STS_IOERR;
}
spin_unlock_irqrestore(&port->vio.lock, flags);
return BLK_STS_OK;
}
static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
{
struct vio_dring_state *dr;
struct vio_completion comp;
struct vio_disk_desc *desc;
unsigned int map_perm;
unsigned long flags;
int op_len, err;
void *req_buf;
if (!(((u64)1 << (u64)op) & port->operations))
return -EOPNOTSUPP;
switch (op) {
case VD_OP_BREAD:
case VD_OP_BWRITE:
default:
return -EINVAL;
case VD_OP_FLUSH:
op_len = 0;
map_perm = 0;
break;
case VD_OP_GET_WCE:
op_len = sizeof(u32);
map_perm = LDC_MAP_W;
break;
case VD_OP_SET_WCE:
op_len = sizeof(u32);
map_perm = LDC_MAP_R;
break;
case VD_OP_GET_VTOC:
op_len = sizeof(struct vio_disk_vtoc);
map_perm = LDC_MAP_W;
break;
case VD_OP_SET_VTOC:
op_len = sizeof(struct vio_disk_vtoc);
map_perm = LDC_MAP_R;
break;
case VD_OP_GET_DISKGEOM:
op_len = sizeof(struct vio_disk_geom);
map_perm = LDC_MAP_W;
break;
case VD_OP_SET_DISKGEOM:
op_len = sizeof(struct vio_disk_geom);
map_perm = LDC_MAP_R;
break;
case VD_OP_SCSICMD:
op_len = 16;
map_perm = LDC_MAP_RW;
break;
case VD_OP_GET_DEVID:
op_len = sizeof(struct vio_disk_devid);
map_perm = LDC_MAP_W;
break;
case VD_OP_GET_EFI:
case VD_OP_SET_EFI:
return -EOPNOTSUPP;
}
map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
op_len = (op_len + 7) & ~7;
req_buf = kzalloc(op_len, GFP_KERNEL);
if (!req_buf)
return -ENOMEM;
if (len > op_len)
len = op_len;
if (map_perm & LDC_MAP_R)
memcpy(req_buf, buf, len);
spin_lock_irqsave(&port->vio.lock, flags);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
/* XXX If we want to use this code generically we have to
* XXX handle TX ring exhaustion etc.
*/
desc = vio_dring_cur(dr);
err = ldc_map_single(port->vio.lp, req_buf, op_len,
desc->cookies, port->ring_cookies,
map_perm);
if (err < 0) {
spin_unlock_irqrestore(&port->vio.lock, flags);
kfree(req_buf);
return err;
}
init_completion(&comp.com);
comp.waiting_for = WAITING_FOR_GEN_CMD;
port->vio.cmp = ∁
desc->hdr.ack = VIO_ACK_ENABLE;
desc->req_id = port->req_id;
desc->operation = op;
desc->slice = 0;
desc->status = ~0;
desc->offset = 0;
desc->size = op_len;
desc->ncookies = err;
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
*/
wmb();
desc->hdr.state = VIO_DESC_READY;
err = __vdc_tx_trigger(port);
if (err >= 0) {
port->req_id++;
dr->prod = vio_dring_next(dr, dr->prod);
spin_unlock_irqrestore(&port->vio.lock, flags);
wait_for_completion(&comp.com);
err = comp.err;
} else {
port->vio.cmp = NULL;
spin_unlock_irqrestore(&port->vio.lock, flags);
}
if (map_perm & LDC_MAP_W)
memcpy(buf, req_buf, len);
kfree(req_buf);
return err;
}
static int vdc_alloc_tx_ring(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
unsigned long len, entry_size;
int ncookies;
void *dring;
entry_size = sizeof(struct vio_disk_desc) +
(sizeof(struct ldc_trans_cookie) * port->ring_cookies);
len = (VDC_TX_RING_SIZE * entry_size);
ncookies = VIO_MAX_RING_COOKIES;
dring = ldc_alloc_exp_dring(port->vio.lp, len,
dr->cookies, &ncookies,
(LDC_MAP_SHADOW |
LDC_MAP_DIRECT |
LDC_MAP_RW));
if (IS_ERR(dring))
return PTR_ERR(dring);
dr->base = dring;
dr->entry_size = entry_size;
dr->num_entries = VDC_TX_RING_SIZE;
dr->prod = dr->cons = 0;
dr->pending = VDC_TX_RING_SIZE;
dr->ncookies = ncookies;
return 0;
}
static void vdc_free_tx_ring(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
if (dr->base) {
ldc_free_exp_dring(port->vio.lp, dr->base,
(dr->entry_size * dr->num_entries),
dr->cookies, dr->ncookies);
dr->base = NULL;
dr->entry_size = 0;
dr->num_entries = 0;
dr->pending = 0;
dr->ncookies = 0;
}
}
static int vdc_port_up(struct vdc_port *port)
{
struct vio_completion comp;
init_completion(&comp.com);
comp.err = 0;
comp.waiting_for = WAITING_FOR_LINK_UP;
port->vio.cmp = ∁
vio_port_up(&port->vio);
wait_for_completion(&comp.com);
return comp.err;
}
static void vdc_port_down(struct vdc_port *port)
{
ldc_disconnect(port->vio.lp);
ldc_unbind(port->vio.lp);
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
}
static const struct blk_mq_ops vdc_mq_ops = {
.queue_rq = vdc_queue_rq,
};
static int probe_disk(struct vdc_port *port)
{
struct request_queue *q;
struct gendisk *g;
int err;
err = vdc_port_up(port);
if (err)
return err;
/* Using version 1.2 means vdisk_phys_blksz should be set unless the
* disk is reserved by another system.
*/
if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
return -ENODEV;
if (vdc_version_supported(port, 1, 1)) {
/* vdisk_size should be set during the handshake, if it wasn't
* then the underlying disk is reserved by another system
*/
if (port->vdisk_size == -1)
return -ENODEV;
} else {
struct vio_disk_geom geom;
err = generic_request(port, VD_OP_GET_DISKGEOM,
&geom, sizeof(geom));
if (err < 0) {
printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
"error %d\n", err);
return err;
}
port->vdisk_size = ((u64)geom.num_cyl *
(u64)geom.num_hd *
(u64)geom.num_sec);
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
if (err)
return err;
g = blk_mq_alloc_disk(&port->tag_set, port);
if (IS_ERR(g)) {
printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
port->vio.name);
err = PTR_ERR(g);
goto out_free_tag;
}
port->disk = g;
q = g->queue;
/* Each segment in a request is up to an aligned page in size. */
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_max_segments(q, port->ring_cookies);
blk_queue_max_hw_sectors(q, port->max_xfer_size);
g->major = vdc_major;
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
g->minors = 1 << PARTITION_SHIFT;
strcpy(g->disk_name, port->disk_name);
g->fops = &vdc_fops;
g->queue = q;
g->private_data = port;
set_capacity(g, port->vdisk_size);
if (vdc_version_supported(port, 1, 1)) {
switch (port->vdisk_mtype) {
case VD_MEDIA_TYPE_CD:
pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
g->flags |= GENHD_FL_REMOVABLE;
set_disk_ro(g, 1);
break;
case VD_MEDIA_TYPE_DVD:
pr_info(PFX "Virtual DVD %s\n", port->disk_name);
g->flags |= GENHD_FL_REMOVABLE;
set_disk_ro(g, 1);
break;
case VD_MEDIA_TYPE_FIXED:
pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
break;
}
}
blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
g->disk_name,
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
err = device_add_disk(&port->vio.vdev->dev, g, NULL);
if (err)
goto out_cleanup_disk;
return 0;
out_cleanup_disk:
put_disk(g);
out_free_tag:
blk_mq_free_tag_set(&port->tag_set);
return err;
}
static struct ldc_channel_config vdc_ldc_cfg = {
.event = vdc_event,
.mtu = 64,
.mode = LDC_MODE_UNRELIABLE,
};
static struct vio_driver_ops vdc_vio_ops = {
.send_attr = vdc_send_attr,
.handle_attr = vdc_handle_attr,
.handshake_complete = vdc_handshake_complete,
};
static void print_version(void)
{
static int version_printed;
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
}
struct vdc_check_port_data {
int dev_no;
char *type;
};
static int vdc_device_probed(struct device *dev, void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
struct vdc_check_port_data *port_data;
port_data = (struct vdc_check_port_data *)arg;
if ((vdev->dev_no == port_data->dev_no) &&
(!(strcmp((char *)&vdev->type, port_data->type))) &&
dev_get_drvdata(dev)) {
/* This device has already been configured
* by vdc_port_probe()
*/
return 1;
} else {
return 0;
}
}
/* Determine whether the VIO device is part of an mpgroup
* by locating all the virtual-device-port nodes associated
* with the parent virtual-device node for the VIO device
* and checking whether any of these nodes are vdc-ports
* which have already been configured.
*
* Returns true if this device is part of an mpgroup and has
* already been probed.
*/
static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
{
struct vdc_check_port_data port_data;
struct device *dev;
port_data.dev_no = vdev->dev_no;
port_data.type = (char *)&vdev->type;
dev = device_find_child(vdev->dev.parent, &port_data,
vdc_device_probed);
if (dev)
return true;
return false;
}
static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vdc_port *port;
int err;
const u64 *ldc_timeout;
print_version();
hp = mdesc_grab();
if (!hp)
return -ENODEV;
err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
printk(KERN_ERR PFX "Port id [%llu] too large.\n",
vdev->dev_no);
goto err_out_release_mdesc;
}
/* Check if this device is part of an mpgroup */
if (vdc_port_mpgroup_check(vdev)) {
printk(KERN_WARNING
"VIO: Ignoring extra vdisk port %s",
dev_name(&vdev->dev));
goto err_out_release_mdesc;
}
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
err = -ENOMEM;
goto err_out_release_mdesc;
}
if (vdev->dev_no >= 26)
snprintf(port->disk_name, sizeof(port->disk_name),
VDCBLK_NAME "%c%c",
'a' + ((int)vdev->dev_no / 26) - 1,
'a' + ((int)vdev->dev_no % 26));
else
snprintf(port->disk_name, sizeof(port->disk_name),
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
port->vdisk_size = -1;
/* Actual wall time may be double due to do_generic_file_read() doing
* a readahead I/O first, and once that fails it will try to read a
* single page.
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
vdc_versions, ARRAY_SIZE(vdc_versions),
&vdc_vio_ops, port->disk_name);
if (err)
goto err_out_free_port;
port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
port->max_xfer_size = MAX_XFER_SIZE;
port->ring_cookies = MAX_RING_COOKIES;
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
if (err)
goto err_out_free_port;
err = vdc_alloc_tx_ring(port);
if (err)
goto err_out_free_ldc;
err = probe_disk(port);
if (err)
goto err_out_free_tx_ring;
/* Note that the device driver_data is used to determine
* whether the port has been probed.
*/
dev_set_drvdata(&vdev->dev, port);
mdesc_release(hp);
return 0;
err_out_free_tx_ring:
vdc_free_tx_ring(port);
err_out_free_ldc:
vio_ldc_free(&port->vio);
err_out_free_port:
kfree(port);
err_out_release_mdesc:
mdesc_release(hp);
return err;
}
static void vdc_port_remove(struct vio_dev *vdev)
{
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
blk_mq_stop_hw_queues(port->disk->queue);
flush_work(&port->ldc_reset_work);
cancel_delayed_work_sync(&port->ldc_reset_timer_work);
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
put_disk(port->disk);
blk_mq_free_tag_set(&port->tag_set);
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
dev_set_drvdata(&vdev->dev, NULL);
kfree(port);
}
}
static void vdc_requeue_inflight(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
u32 idx;
for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
struct vdc_req_entry *rqe = &port->rq_arr[idx];
struct request *req;
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE;
dr->cons = vio_dring_next(dr, idx);
req = rqe->req;
if (req == NULL) {
vdc_end_special(port, desc);
continue;
}
rqe->req = NULL;
blk_mq_requeue_request(req, false);
}
}
static void vdc_queue_drain(struct vdc_port *port)
{
struct request_queue *q = port->disk->queue;
/*
* Mark the queue as draining, then freeze/quiesce to ensure
* that all existing requests are seen in ->queue_rq() and killed
*/
port->drain = 1;
spin_unlock_irq(&port->vio.lock);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
{
struct vdc_port *port;
struct vio_driver_state *vio;
port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
vio = &port->vio;
spin_lock_irq(&vio->lock);
if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
port->disk_name, port->ldc_timeout);
vdc_queue_drain(port);
vdc_blk_queue_start(port);
}
spin_unlock_irq(&vio->lock);
}
static void vdc_ldc_reset_work(struct work_struct *work)
{
struct vdc_port *port;
struct vio_driver_state *vio;
unsigned long flags;
port = container_of(work, struct vdc_port, ldc_reset_work);
vio = &port->vio;
spin_lock_irqsave(&vio->lock, flags);
vdc_ldc_reset(port);
spin_unlock_irqrestore(&vio->lock, flags);
}
static void vdc_ldc_reset(struct vdc_port *port)
{
int err;
assert_spin_locked(&port->vio.lock);
pr_warn(PFX "%s ldc link reset\n", port->disk_name);
blk_mq_stop_hw_queues(port->disk->queue);
vdc_requeue_inflight(port);
vdc_port_down(port);
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
if (err) {
pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
return;
}
err = vdc_alloc_tx_ring(port);
if (err) {
pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
goto err_free_ldc;
}
if (port->ldc_timeout)
mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
err_free_ldc:
vio_ldc_free(&port->vio);
}
static const struct vio_device_id vdc_port_match[] = {
{
.type = "vdc-port",
},
{},
};
MODULE_DEVICE_TABLE(vio, vdc_port_match);
static struct vio_driver vdc_port_driver = {
.id_table = vdc_port_match,
.probe = vdc_port_probe,
.remove = vdc_port_remove,
.name = "vdc_port",
};
static int __init vdc_init(void)
{
int err;
sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
if (!sunvdc_wq)
return -ENOMEM;
err = register_blkdev(0, VDCBLK_NAME);
if (err < 0)
goto out_free_wq;
vdc_major = err;
err = vio_register_driver(&vdc_port_driver);
if (err)
goto out_unregister_blkdev;
return 0;
out_unregister_blkdev:
unregister_blkdev(vdc_major, VDCBLK_NAME);
vdc_major = 0;
out_free_wq:
destroy_workqueue(sunvdc_wq);
return err;
}
static void __exit vdc_exit(void)
{
vio_unregister_driver(&vdc_port_driver);
unregister_blkdev(vdc_major, VDCBLK_NAME);
destroy_workqueue(sunvdc_wq);
}
module_init(vdc_init);
module_exit(vdc_exit);
| linux-master | drivers/block/sunvdc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/amiga/amiflop.c
*
* Copyright (C) 1993 Greg Harp
* Portions of this driver are based on code contributed by Brad Pepers
*
* revised 28.5.95 by Joerg Dorchain
* - now no bugs(?) any more for both HD & DD
* - added support for 40 Track 5.25" drives, 80-track hopefully behaves
* like 3.5" dd (no way to test - are there any 5.25" drives out there
* that work on an A4000?)
* - wrote formatting routine (maybe dirty, but works)
*
* june/july 1995 added ms-dos support by Joerg Dorchain
* (portions based on messydos.device and various contributors)
* - currently only 9 and 18 sector disks
*
* - fixed a bug with the internal trackbuffer when using multiple
* disks the same time
* - made formatting a bit safer
* - added command line and machine based default for "silent" df0
*
* december 1995 adapted for 1.2.13pl4 by Joerg Dorchain
* - works but I think it's inefficient. (look in redo_fd_request)
* But the changes were very efficient. (only three and a half lines)
*
* january 1996 added special ioctl for tracking down read/write problems
* - usage ioctl(d, RAW_TRACK, ptr); the raw track buffer (MFM-encoded data
* is copied to area. (area should be large enough since no checking is
* done - 30K is currently sufficient). return the actual size of the
* trackbuffer
* - replaced udelays() by a timer (CIAA timer B) for the waits
* needed for the disk mechanic.
*
* february 1996 fixed error recovery and multiple disk access
* - both got broken the first time I tampered with the driver :-(
* - still not safe, but better than before
*
* revised Marts 3rd, 1996 by Jes Sorensen for use in the 1.3.28 kernel.
* - Minor changes to accept the kdev_t.
* - Replaced some more udelays with ms_delays. Udelay is just a loop,
* and so the delay will be different depending on the given
* processor :-(
* - The driver could use a major cleanup because of the new
* major/minor handling that came with kdev_t. It seems to work for
* the time being, but I can't guarantee that it will stay like
* that when we start using 16 (24?) bit minors.
*
* restructured jan 1997 by Joerg Dorchain
* - Fixed Bug accessing multiple disks
* - some code cleanup
* - added trackbuffer for each drive to speed things up
* - fixed some race conditions (who finds the next may send it to me ;-)
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
#undef DEBUG /* print _LOTS_ of infos */
#define RAW_IOCTL
#ifdef RAW_IOCTL
#define IOCTL_RAW_TRACK 0x5254524B /* 'RTRK' */
#endif
/*
* Defines
*/
/*
* CIAAPRA bits (read only)
*/
#define DSKRDY (0x1<<5) /* disk ready when low */
#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
#define DSKPROT (0x1<<3) /* disk protected when low */
#define DSKCHANGE (0x1<<2) /* low when disk removed */
/*
* CIAAPRB bits (read/write)
*/
#define DSKMOTOR (0x1<<7) /* motor on when low */
#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
#define DSKSTEP (0x1) /* pulse low to step head 1 track */
/*
* DSKBYTR bits (read only)
*/
#define DSKBYT (1<<15) /* register contains valid byte when set */
#define DMAON (1<<14) /* disk DMA enabled */
#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
/* bits 7-0 are data */
/*
* ADKCON/ADKCONR bits
*/
#ifndef SETCLR
#define ADK_SETCLR (1<<15) /* control bit */
#endif
#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
/*
* DSKLEN bits
*/
#define DSKLEN_DMAEN (1<<15)
#define DSKLEN_WRITE (1<<14)
/*
* INTENA/INTREQ bits
*/
#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
/*
* Misc
*/
#define MFM_SYNC 0x4489 /* standard MFM sync value */
/* Values for FD_COMMAND */
#define FD_RECALIBRATE 0x07 /* move to track 0 */
#define FD_SEEK 0x0F /* seek track */
#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
#define FD_WRITE 0xC5 /* write with MT, MFM */
#define FD_SENSEI 0x08 /* Sense Interrupt Status */
#define FD_SPECIFY 0x03 /* specify HUT etc */
#define FD_FORMAT 0x4D /* format one track */
#define FD_VERSION 0x10 /* get version code */
#define FD_CONFIGURE 0x13 /* configure FIFO operation */
#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
#define FD_MAX_UNITS 4 /* Max. Number of drives */
#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
struct fd_data_type {
char *name; /* description of data type */
int sects; /* sectors per track */
int (*read_fkt)(int); /* read whole track */
void (*write_fkt)(int); /* write whole track */
};
struct fd_drive_type {
unsigned long code; /* code returned from drive */
char *name; /* description of drive */
unsigned int tracks; /* number of tracks */
unsigned int heads; /* number of heads */
unsigned int read_size; /* raw read size for one track */
unsigned int write_size; /* raw write size for one track */
unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
unsigned int precomp1; /* start track for precomp 1 */
unsigned int precomp2; /* start track for precomp 2 */
unsigned int step_delay; /* time (in ms) for delay after step */
unsigned int settle_time; /* time to settle after dir change */
unsigned int side_time; /* time needed to change sides */
};
struct amiga_floppy_struct {
struct fd_drive_type *type; /* type of floppy for this unit */
struct fd_data_type *dtype; /* type of floppy for this unit */
int track; /* current track (-1 == unknown) */
unsigned char *trackbuf; /* current track (kmaloc()'d */
int blocks; /* total # blocks on disk */
int changed; /* true when not known */
int disk; /* disk in drive (-1 == unknown) */
int motor; /* true when motor is at speed */
int busy; /* true when drive is active */
int dirty; /* true when trackbuf is not on disk */
int status; /* current error code for unit */
struct gendisk *gendisk[2];
struct blk_mq_tag_set tag_set;
};
/*
* Error codes
*/
#define FD_OK 0 /* operation succeeded */
#define FD_ERROR -1 /* general error (seek, read, write, etc) */
#define FD_NOUNIT 1 /* unit does not exist */
#define FD_UNITBUSY 2 /* unit already active */
#define FD_NOTACTIVE 3 /* unit is not active */
#define FD_NOTREADY 4 /* unit is not ready (motor not on/no disk) */
#define MFM_NOSYNC 1
#define MFM_HEADER 2
#define MFM_DATA 3
#define MFM_TRACK 4
/*
* Floppy ID values
*/
#define FD_NODRIVE 0x00000000 /* response when no unit is present */
#define FD_DD_3 0xffffffff /* double-density 3.5" (880K) drive */
#define FD_HD_3 0x55555555 /* high-density 3.5" (1760K) drive */
#define FD_DD_5 0xaaaaaaaa /* double-density 5.25" (440K) drive */
static DEFINE_MUTEX(amiflop_mutex);
static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */
module_param(fd_def_df0, ulong, 0);
MODULE_LICENSE("GPL");
/*
* Macros
*/
#define MOTOR_ON (ciab.prb &= ~DSKMOTOR)
#define MOTOR_OFF (ciab.prb |= DSKMOTOR)
#define SELECT(mask) (ciab.prb &= ~mask)
#define DESELECT(mask) (ciab.prb |= mask)
#define SELMASK(drive) (1 << (3 + (drive & 3)))
static struct fd_drive_type drive_types[] = {
/* code name tr he rdsz wrsz sm pc1 pc2 sd st st*/
/* warning: times are now in milliseconds (ms) */
{ FD_DD_3, "DD 3.5", 80, 2, 14716, 13630, 1, 80,161, 3, 18, 1},
{ FD_HD_3, "HD 3.5", 80, 2, 28344, 27258, 2, 80,161, 3, 18, 1},
{ FD_DD_5, "DD 5.25", 40, 2, 14716, 13630, 1, 40, 81, 6, 30, 2},
{ FD_NODRIVE, "No Drive", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
};
static int num_dr_types = ARRAY_SIZE(drive_types);
static int amiga_read(int), dos_read(int);
static void amiga_write(int), dos_write(int);
static struct fd_data_type data_types[] = {
{ "Amiga", 11 , amiga_read, amiga_write},
{ "MS-Dos", 9, dos_read, dos_write}
};
/* current info on each unit */
static struct amiga_floppy_struct unit[FD_MAX_UNITS];
static struct timer_list flush_track_timer[FD_MAX_UNITS];
static struct timer_list post_write_timer;
static unsigned long post_write_timer_drive;
static struct timer_list motor_on_timer;
static struct timer_list motor_off_timer[FD_MAX_UNITS];
static int on_attempts;
/* Synchronization of FDC access */
/* request loop (trackbuffer) */
static volatile int fdc_busy = -1;
static volatile int fdc_nested;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_COMPLETION(motor_on_completion);
static volatile int selected = -1; /* currently selected drive */
static int writepending;
static int writefromint;
static char *raw_buf;
static DEFINE_SPINLOCK(amiflop_lock);
#define RAW_BUF_SIZE 30000 /* size of raw disk data */
/*
* These are global variables, as that's the easiest way to give
* information to interrupts. They are the data used for the current
* request.
*/
static volatile char block_flag;
static DECLARE_WAIT_QUEUE_HEAD(wait_fd_block);
/* MS-Dos MFM Coding tables (should go quick and easy) */
static unsigned char mfmencode[16]={
0x2a, 0x29, 0x24, 0x25, 0x12, 0x11, 0x14, 0x15,
0x4a, 0x49, 0x44, 0x45, 0x52, 0x51, 0x54, 0x55
};
static unsigned char mfmdecode[128];
/* floppy internal millisecond timer stuff */
static DECLARE_COMPLETION(ms_wait_completion);
#define MS_TICKS ((amiga_eclock+50)/1000)
/*
* Note that MAX_ERRORS=X doesn't imply that we retry every bad read
* max X times - some types of errors increase the errorcount by 2 or
* even 3, so we might actually retry only X/2 times before giving up.
*/
#define MAX_ERRORS 12
#define custom amiga_custom
/* Prevent "aliased" accesses. */
static int fd_ref[4] = { 0,0,0,0 };
static int fd_device[4] = { 0, 0, 0, 0 };
/*
* Here come the actual hardware access and helper functions.
* They are not reentrant and single threaded because all drives
* share the same hardware and the same trackbuffer.
*/
/* Milliseconds timer */
static irqreturn_t ms_isr(int irq, void *dummy)
{
complete(&ms_wait_completion);
return IRQ_HANDLED;
}
/* all waits are queued up
A more generic routine would do a schedule a la timer.device */
static void ms_delay(int ms)
{
int ticks;
static DEFINE_MUTEX(mutex);
if (ms > 0) {
mutex_lock(&mutex);
ticks = MS_TICKS*ms-1;
ciaa.tblo=ticks%256;
ciaa.tbhi=ticks/256;
ciaa.crb=0x19; /*count eclock, force load, one-shoot, start */
wait_for_completion(&ms_wait_completion);
mutex_unlock(&mutex);
}
}
/* Hardware semaphore */
/* returns true when we would get the semaphore */
static inline int try_fdc(int drive)
{
drive &= 3;
return ((fdc_busy < 0) || (fdc_busy == drive));
}
static void get_fdc(int drive)
{
unsigned long flags;
drive &= 3;
#ifdef DEBUG
printk("get_fdc: drive %d fdc_busy %d fdc_nested %d\n",drive,fdc_busy,fdc_nested);
#endif
local_irq_save(flags);
wait_event(fdc_wait, try_fdc(drive));
fdc_busy = drive;
fdc_nested++;
local_irq_restore(flags);
}
static inline void rel_fdc(void)
{
#ifdef DEBUG
if (fdc_nested == 0)
printk("fd: unmatched rel_fdc\n");
printk("rel_fdc: fdc_busy %d fdc_nested %d\n",fdc_busy,fdc_nested);
#endif
fdc_nested--;
if (fdc_nested == 0) {
fdc_busy = -1;
wake_up(&fdc_wait);
}
}
static void fd_select (int drive)
{
unsigned char prb = ~0;
drive&=3;
#ifdef DEBUG
printk("selecting %d\n",drive);
#endif
if (drive == selected)
return;
get_fdc(drive);
selected = drive;
if (unit[drive].track % 2 != 0)
prb &= ~DSKSIDE;
if (unit[drive].motor == 1)
prb &= ~DSKMOTOR;
ciab.prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3));
ciab.prb = prb;
prb &= ~SELMASK(drive);
ciab.prb = prb;
rel_fdc();
}
static void fd_deselect (int drive)
{
unsigned char prb;
unsigned long flags;
drive&=3;
#ifdef DEBUG
printk("deselecting %d\n",drive);
#endif
if (drive != selected) {
printk(KERN_WARNING "Deselecting drive %d while %d was selected!\n",drive,selected);
return;
}
get_fdc(drive);
local_irq_save(flags);
selected = -1;
prb = ciab.prb;
prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3));
ciab.prb = prb;
local_irq_restore (flags);
rel_fdc();
}
static void motor_on_callback(struct timer_list *unused)
{
if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
complete_all(&motor_on_completion);
} else {
motor_on_timer.expires = jiffies + HZ/10;
add_timer(&motor_on_timer);
}
}
static int fd_motor_on(int nr)
{
nr &= 3;
del_timer(motor_off_timer + nr);
if (!unit[nr].motor) {
unit[nr].motor = 1;
fd_select(nr);
reinit_completion(&motor_on_completion);
mod_timer(&motor_on_timer, jiffies + HZ/2);
on_attempts = 10;
wait_for_completion(&motor_on_completion);
fd_deselect(nr);
}
if (on_attempts == 0) {
on_attempts = -1;
#if 0
printk (KERN_ERR "motor_on failed, turning motor off\n");
fd_motor_off (motor_off_timer + nr);
return 0;
#else
printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n");
#endif
}
return 1;
}
static void fd_motor_off(struct timer_list *timer)
{
unsigned long drive = ((unsigned long)timer -
(unsigned long)&motor_off_timer[0]) /
sizeof(motor_off_timer[0]);
drive&=3;
if (!try_fdc(drive)) {
/* We would be blocked in an interrupt, so try again later */
timer->expires = jiffies + 1;
add_timer(timer);
return;
}
unit[drive].motor = 0;
fd_select(drive);
udelay (1);
fd_deselect(drive);
}
static void floppy_off (unsigned int nr)
{
int drive;
drive = nr & 3;
mod_timer(motor_off_timer + drive, jiffies + 3*HZ);
}
static int fd_calibrate(int drive)
{
unsigned char prb;
int n;
drive &= 3;
get_fdc(drive);
if (!fd_motor_on (drive))
return 0;
fd_select (drive);
prb = ciab.prb;
prb |= DSKSIDE;
prb &= ~DSKDIREC;
ciab.prb = prb;
for (n = unit[drive].type->tracks/2; n != 0; --n) {
if (ciaa.pra & DSKTRACK0)
break;
prb &= ~DSKSTEP;
ciab.prb = prb;
prb |= DSKSTEP;
udelay (2);
ciab.prb = prb;
ms_delay(unit[drive].type->step_delay);
}
ms_delay (unit[drive].type->settle_time);
prb |= DSKDIREC;
n = unit[drive].type->tracks + 20;
for (;;) {
prb &= ~DSKSTEP;
ciab.prb = prb;
prb |= DSKSTEP;
udelay (2);
ciab.prb = prb;
ms_delay(unit[drive].type->step_delay + 1);
if ((ciaa.pra & DSKTRACK0) == 0)
break;
if (--n == 0) {
printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive);
fd_motor_off (motor_off_timer + drive);
unit[drive].track = -1;
rel_fdc();
return 0;
}
}
unit[drive].track = 0;
ms_delay(unit[drive].type->settle_time);
rel_fdc();
fd_deselect(drive);
return 1;
}
static int fd_seek(int drive, int track)
{
unsigned char prb;
int cnt;
#ifdef DEBUG
printk("seeking drive %d to track %d\n",drive,track);
#endif
drive &= 3;
get_fdc(drive);
if (unit[drive].track == track) {
rel_fdc();
return 1;
}
if (!fd_motor_on(drive)) {
rel_fdc();
return 0;
}
if (unit[drive].track < 0 && !fd_calibrate(drive)) {
rel_fdc();
return 0;
}
fd_select (drive);
cnt = unit[drive].track/2 - track/2;
prb = ciab.prb;
prb |= DSKSIDE | DSKDIREC;
if (track % 2 != 0)
prb &= ~DSKSIDE;
if (cnt < 0) {
cnt = - cnt;
prb &= ~DSKDIREC;
}
ciab.prb = prb;
if (track % 2 != unit[drive].track % 2)
ms_delay (unit[drive].type->side_time);
unit[drive].track = track;
if (cnt == 0) {
rel_fdc();
fd_deselect(drive);
return 1;
}
do {
prb &= ~DSKSTEP;
ciab.prb = prb;
prb |= DSKSTEP;
udelay (1);
ciab.prb = prb;
ms_delay (unit[drive].type->step_delay);
} while (--cnt != 0);
ms_delay (unit[drive].type->settle_time);
rel_fdc();
fd_deselect(drive);
return 1;
}
static unsigned long fd_get_drive_id(int drive)
{
int i;
ulong id = 0;
drive&=3;
get_fdc(drive);
/* set up for ID */
MOTOR_ON;
udelay(2);
SELECT(SELMASK(drive));
udelay(2);
DESELECT(SELMASK(drive));
udelay(2);
MOTOR_OFF;
udelay(2);
SELECT(SELMASK(drive));
udelay(2);
DESELECT(SELMASK(drive));
udelay(2);
/* loop and read disk ID */
for (i=0; i<32; i++) {
SELECT(SELMASK(drive));
udelay(2);
/* read and store value of DSKRDY */
id <<= 1;
id |= (ciaa.pra & DSKRDY) ? 0 : 1; /* cia regs are low-active! */
DESELECT(SELMASK(drive));
}
rel_fdc();
/*
* RB: At least A500/A2000's df0: don't identify themselves.
* As every (real) Amiga has at least a 3.5" DD drive as df0:
* we default to that if df0: doesn't identify as a certain
* type.
*/
if(drive == 0 && id == FD_NODRIVE)
{
id = fd_def_df0;
printk(KERN_NOTICE "fd: drive 0 didn't identify, setting default %08lx\n", (ulong)fd_def_df0);
}
/* return the ID value */
return (id);
}
static irqreturn_t fd_block_done(int irq, void *dummy)
{
if (block_flag)
custom.dsklen = 0x4000;
if (block_flag == 2) { /* writing */
writepending = 2;
post_write_timer.expires = jiffies + 1; /* at least 2 ms */
post_write_timer_drive = selected;
add_timer(&post_write_timer);
}
else { /* reading */
block_flag = 0;
wake_up (&wait_fd_block);
}
return IRQ_HANDLED;
}
static void raw_read(int drive)
{
drive&=3;
get_fdc(drive);
wait_event(wait_fd_block, !block_flag);
fd_select(drive);
/* setup adkcon bits correctly */
custom.adkcon = ADK_MSBSYNC;
custom.adkcon = ADK_SETCLR|ADK_WORDSYNC|ADK_FAST;
custom.dsksync = MFM_SYNC;
custom.dsklen = 0;
custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf);
custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN;
custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN;
block_flag = 1;
wait_event(wait_fd_block, !block_flag);
custom.dsklen = 0;
fd_deselect(drive);
rel_fdc();
}
static int raw_write(int drive)
{
ushort adk;
drive&=3;
get_fdc(drive); /* corresponds to rel_fdc() in post_write() */
if ((ciaa.pra & DSKPROT) == 0) {
rel_fdc();
return 0;
}
wait_event(wait_fd_block, !block_flag);
fd_select(drive);
/* clear adkcon bits */
custom.adkcon = ADK_PRECOMP1|ADK_PRECOMP0|ADK_WORDSYNC|ADK_MSBSYNC;
/* set appropriate adkcon bits */
adk = ADK_SETCLR|ADK_FAST;
if ((ulong)unit[drive].track >= unit[drive].type->precomp2)
adk |= ADK_PRECOMP1;
else if ((ulong)unit[drive].track >= unit[drive].type->precomp1)
adk |= ADK_PRECOMP0;
custom.adkcon = adk;
custom.dsklen = DSKLEN_WRITE;
custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf);
custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE;
custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE;
block_flag = 2;
return 1;
}
/*
* to be called at least 2ms after the write has finished but before any
* other access to the hardware.
*/
static void post_write (unsigned long drive)
{
#ifdef DEBUG
printk("post_write for drive %ld\n",drive);
#endif
drive &= 3;
custom.dsklen = 0;
block_flag = 0;
writepending = 0;
writefromint = 0;
unit[drive].dirty = 0;
wake_up(&wait_fd_block);
fd_deselect(drive);
rel_fdc(); /* corresponds to get_fdc() in raw_write */
}
static void post_write_callback(struct timer_list *timer)
{
post_write(post_write_timer_drive);
}
/*
* The following functions are to convert the block contents into raw data
* written to disk and vice versa.
* (Add other formats here ;-))
*/
static unsigned long scan_sync(unsigned long raw, unsigned long end)
{
ushort *ptr = (ushort *)raw, *endp = (ushort *)end;
while (ptr < endp && *ptr++ != 0x4489)
;
if (ptr < endp) {
while (*ptr == 0x4489 && ptr < endp)
ptr++;
return (ulong)ptr;
}
return 0;
}
static inline unsigned long checksum(unsigned long *addr, int len)
{
unsigned long csum = 0;
len /= sizeof(*addr);
while (len-- > 0)
csum ^= *addr++;
csum = ((csum>>1) & 0x55555555) ^ (csum & 0x55555555);
return csum;
}
static unsigned long decode (unsigned long *data, unsigned long *raw,
int len)
{
ulong *odd, *even;
/* convert length from bytes to longwords */
len >>= 2;
odd = raw;
even = odd + len;
/* prepare return pointer */
raw += len * 2;
do {
*data++ = ((*odd++ & 0x55555555) << 1) | (*even++ & 0x55555555);
} while (--len != 0);
return (ulong)raw;
}
struct header {
unsigned char magic;
unsigned char track;
unsigned char sect;
unsigned char ord;
unsigned char labels[16];
unsigned long hdrchk;
unsigned long datachk;
};
static int amiga_read(int drive)
{
unsigned long raw;
unsigned long end;
int scnt;
unsigned long csum;
struct header hdr;
drive&=3;
raw = (long) raw_buf;
end = raw + unit[drive].type->read_size;
for (scnt = 0;scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) {
if (!(raw = scan_sync(raw, end))) {
printk (KERN_INFO "can't find sync for sector %d\n", scnt);
return MFM_NOSYNC;
}
raw = decode ((ulong *)&hdr.magic, (ulong *)raw, 4);
raw = decode ((ulong *)&hdr.labels, (ulong *)raw, 16);
raw = decode ((ulong *)&hdr.hdrchk, (ulong *)raw, 4);
raw = decode ((ulong *)&hdr.datachk, (ulong *)raw, 4);
csum = checksum((ulong *)&hdr,
(char *)&hdr.hdrchk-(char *)&hdr);
#ifdef DEBUG
printk ("(%x,%d,%d,%d) (%lx,%lx,%lx,%lx) %lx %lx\n",
hdr.magic, hdr.track, hdr.sect, hdr.ord,
*(ulong *)&hdr.labels[0], *(ulong *)&hdr.labels[4],
*(ulong *)&hdr.labels[8], *(ulong *)&hdr.labels[12],
hdr.hdrchk, hdr.datachk);
#endif
if (hdr.hdrchk != csum) {
printk(KERN_INFO "MFM_HEADER: %08lx,%08lx\n", hdr.hdrchk, csum);
return MFM_HEADER;
}
/* verify track */
if (hdr.track != unit[drive].track) {
printk(KERN_INFO "MFM_TRACK: %d, %d\n", hdr.track, unit[drive].track);
return MFM_TRACK;
}
raw = decode ((ulong *)(unit[drive].trackbuf + hdr.sect*512),
(ulong *)raw, 512);
csum = checksum((ulong *)(unit[drive].trackbuf + hdr.sect*512), 512);
if (hdr.datachk != csum) {
printk(KERN_INFO "MFM_DATA: (%x:%d:%d:%d) sc=%d %lx, %lx\n",
hdr.magic, hdr.track, hdr.sect, hdr.ord, scnt,
hdr.datachk, csum);
printk (KERN_INFO "data=(%lx,%lx,%lx,%lx)\n",
((ulong *)(unit[drive].trackbuf+hdr.sect*512))[0],
((ulong *)(unit[drive].trackbuf+hdr.sect*512))[1],
((ulong *)(unit[drive].trackbuf+hdr.sect*512))[2],
((ulong *)(unit[drive].trackbuf+hdr.sect*512))[3]);
return MFM_DATA;
}
}
return 0;
}
static void encode(unsigned long data, unsigned long *dest)
{
unsigned long data2;
data &= 0x55555555;
data2 = data ^ 0x55555555;
data |= ((data2 >> 1) | 0x80000000) & (data2 << 1);
if (*(dest - 1) & 0x00000001)
data &= 0x7FFFFFFF;
*dest = data;
}
static void encode_block(unsigned long *dest, unsigned long *src, int len)
{
int cnt, to_cnt = 0;
unsigned long data;
/* odd bits */
for (cnt = 0; cnt < len / 4; cnt++) {
data = src[cnt] >> 1;
encode(data, dest + to_cnt++);
}
/* even bits */
for (cnt = 0; cnt < len / 4; cnt++) {
data = src[cnt];
encode(data, dest + to_cnt++);
}
}
static unsigned long *putsec(int disk, unsigned long *raw, int cnt)
{
struct header hdr;
int i;
disk&=3;
*raw = (raw[-1]&1) ? 0x2AAAAAAA : 0xAAAAAAAA;
raw++;
*raw++ = 0x44894489;
hdr.magic = 0xFF;
hdr.track = unit[disk].track;
hdr.sect = cnt;
hdr.ord = unit[disk].dtype->sects * unit[disk].type->sect_mult - cnt;
for (i = 0; i < 16; i++)
hdr.labels[i] = 0;
hdr.hdrchk = checksum((ulong *)&hdr,
(char *)&hdr.hdrchk-(char *)&hdr);
hdr.datachk = checksum((ulong *)(unit[disk].trackbuf+cnt*512), 512);
encode_block(raw, (ulong *)&hdr.magic, 4);
raw += 2;
encode_block(raw, (ulong *)&hdr.labels, 16);
raw += 8;
encode_block(raw, (ulong *)&hdr.hdrchk, 4);
raw += 2;
encode_block(raw, (ulong *)&hdr.datachk, 4);
raw += 2;
encode_block(raw, (ulong *)(unit[disk].trackbuf+cnt*512), 512);
raw += 256;
return raw;
}
static void amiga_write(int disk)
{
unsigned int cnt;
unsigned long *ptr = (unsigned long *)raw_buf;
disk&=3;
/* gap space */
for (cnt = 0; cnt < 415 * unit[disk].type->sect_mult; cnt++)
*ptr++ = 0xaaaaaaaa;
/* sectors */
for (cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++)
ptr = putsec (disk, ptr, cnt);
*(ushort *)ptr = (ptr[-1]&1) ? 0x2AA8 : 0xAAA8;
}
struct dos_header {
unsigned char track, /* 0-80 */
side, /* 0-1 */
sec, /* 0-...*/
len_desc;/* 2 */
unsigned short crc; /* on 68000 we got an alignment problem,
but this compiler solves it by adding silently
adding a pad byte so data won't fit
and this took about 3h to discover.... */
unsigned char gap1[22]; /* for longword-alignedness (0x4e) */
};
/* crc routines are borrowed from the messydos-handler */
/* excerpt from the messydos-device
; The CRC is computed not only over the actual data, but including
; the SYNC mark (3 * $a1) and the 'ID/DATA - Address Mark' ($fe/$fb).
; As we don't read or encode these fields into our buffers, we have to
; preload the registers containing the CRC with the values they would have
; after stepping over these fields.
;
; How CRCs "really" work:
;
; First, you should regard a bitstring as a series of coefficients of
; polynomials. We calculate with these polynomials in modulo-2
; arithmetic, in which both add and subtract are done the same as
; exclusive-or. Now, we modify our data (a very long polynomial) in
; such a way that it becomes divisible by the CCITT-standard 16-bit
; 16 12 5
; polynomial: x + x + x + 1, represented by $11021. The easiest
; way to do this would be to multiply (using proper arithmetic) our
; datablock with $11021. So we have:
; data * $11021 =
; data * ($10000 + $1021) =
; data * $10000 + data * $1021
; The left part of this is simple: Just add two 0 bytes. But then
; the right part (data $1021) remains difficult and even could have
; a carry into the left part. The solution is to use a modified
; multiplication, which has a result that is not correct, but with
; a difference of any multiple of $11021. We then only need to keep
; the 16 least significant bits of the result.
;
; The following algorithm does this for us:
;
; unsigned char *data, c, crclo, crchi;
; while (not done) {
; c = *data++ + crchi;
; crchi = (@ c) >> 8 + crclo;
; crclo = @ c;
; }
;
; Remember, + is done with EOR, the @ operator is in two tables (high
; and low byte separately), which is calculated as
;
; $1021 * (c & $F0)
; xor $1021 * (c & $0F)
; xor $1021 * (c >> 4) (* is regular multiplication)
;
;
; Anyway, the end result is the same as the remainder of the division of
; the data by $11021. I am afraid I need to study theory a bit more...
my only works was to code this from manx to C....
*/
static ushort dos_crc(void * data_a3, int data_d0, int data_d1, int data_d3)
{
static unsigned char CRCTable1[] = {
0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70,0x81,0x91,0xa1,0xb1,0xc1,0xd1,0xe1,0xf1,
0x12,0x02,0x32,0x22,0x52,0x42,0x72,0x62,0x93,0x83,0xb3,0xa3,0xd3,0xc3,0xf3,0xe3,
0x24,0x34,0x04,0x14,0x64,0x74,0x44,0x54,0xa5,0xb5,0x85,0x95,0xe5,0xf5,0xc5,0xd5,
0x36,0x26,0x16,0x06,0x76,0x66,0x56,0x46,0xb7,0xa7,0x97,0x87,0xf7,0xe7,0xd7,0xc7,
0x48,0x58,0x68,0x78,0x08,0x18,0x28,0x38,0xc9,0xd9,0xe9,0xf9,0x89,0x99,0xa9,0xb9,
0x5a,0x4a,0x7a,0x6a,0x1a,0x0a,0x3a,0x2a,0xdb,0xcb,0xfb,0xeb,0x9b,0x8b,0xbb,0xab,
0x6c,0x7c,0x4c,0x5c,0x2c,0x3c,0x0c,0x1c,0xed,0xfd,0xcd,0xdd,0xad,0xbd,0x8d,0x9d,
0x7e,0x6e,0x5e,0x4e,0x3e,0x2e,0x1e,0x0e,0xff,0xef,0xdf,0xcf,0xbf,0xaf,0x9f,0x8f,
0x91,0x81,0xb1,0xa1,0xd1,0xc1,0xf1,0xe1,0x10,0x00,0x30,0x20,0x50,0x40,0x70,0x60,
0x83,0x93,0xa3,0xb3,0xc3,0xd3,0xe3,0xf3,0x02,0x12,0x22,0x32,0x42,0x52,0x62,0x72,
0xb5,0xa5,0x95,0x85,0xf5,0xe5,0xd5,0xc5,0x34,0x24,0x14,0x04,0x74,0x64,0x54,0x44,
0xa7,0xb7,0x87,0x97,0xe7,0xf7,0xc7,0xd7,0x26,0x36,0x06,0x16,0x66,0x76,0x46,0x56,
0xd9,0xc9,0xf9,0xe9,0x99,0x89,0xb9,0xa9,0x58,0x48,0x78,0x68,0x18,0x08,0x38,0x28,
0xcb,0xdb,0xeb,0xfb,0x8b,0x9b,0xab,0xbb,0x4a,0x5a,0x6a,0x7a,0x0a,0x1a,0x2a,0x3a,
0xfd,0xed,0xdd,0xcd,0xbd,0xad,0x9d,0x8d,0x7c,0x6c,0x5c,0x4c,0x3c,0x2c,0x1c,0x0c,
0xef,0xff,0xcf,0xdf,0xaf,0xbf,0x8f,0x9f,0x6e,0x7e,0x4e,0x5e,0x2e,0x3e,0x0e,0x1e
};
static unsigned char CRCTable2[] = {
0x00,0x21,0x42,0x63,0x84,0xa5,0xc6,0xe7,0x08,0x29,0x4a,0x6b,0x8c,0xad,0xce,0xef,
0x31,0x10,0x73,0x52,0xb5,0x94,0xf7,0xd6,0x39,0x18,0x7b,0x5a,0xbd,0x9c,0xff,0xde,
0x62,0x43,0x20,0x01,0xe6,0xc7,0xa4,0x85,0x6a,0x4b,0x28,0x09,0xee,0xcf,0xac,0x8d,
0x53,0x72,0x11,0x30,0xd7,0xf6,0x95,0xb4,0x5b,0x7a,0x19,0x38,0xdf,0xfe,0x9d,0xbc,
0xc4,0xe5,0x86,0xa7,0x40,0x61,0x02,0x23,0xcc,0xed,0x8e,0xaf,0x48,0x69,0x0a,0x2b,
0xf5,0xd4,0xb7,0x96,0x71,0x50,0x33,0x12,0xfd,0xdc,0xbf,0x9e,0x79,0x58,0x3b,0x1a,
0xa6,0x87,0xe4,0xc5,0x22,0x03,0x60,0x41,0xae,0x8f,0xec,0xcd,0x2a,0x0b,0x68,0x49,
0x97,0xb6,0xd5,0xf4,0x13,0x32,0x51,0x70,0x9f,0xbe,0xdd,0xfc,0x1b,0x3a,0x59,0x78,
0x88,0xa9,0xca,0xeb,0x0c,0x2d,0x4e,0x6f,0x80,0xa1,0xc2,0xe3,0x04,0x25,0x46,0x67,
0xb9,0x98,0xfb,0xda,0x3d,0x1c,0x7f,0x5e,0xb1,0x90,0xf3,0xd2,0x35,0x14,0x77,0x56,
0xea,0xcb,0xa8,0x89,0x6e,0x4f,0x2c,0x0d,0xe2,0xc3,0xa0,0x81,0x66,0x47,0x24,0x05,
0xdb,0xfa,0x99,0xb8,0x5f,0x7e,0x1d,0x3c,0xd3,0xf2,0x91,0xb0,0x57,0x76,0x15,0x34,
0x4c,0x6d,0x0e,0x2f,0xc8,0xe9,0x8a,0xab,0x44,0x65,0x06,0x27,0xc0,0xe1,0x82,0xa3,
0x7d,0x5c,0x3f,0x1e,0xf9,0xd8,0xbb,0x9a,0x75,0x54,0x37,0x16,0xf1,0xd0,0xb3,0x92,
0x2e,0x0f,0x6c,0x4d,0xaa,0x8b,0xe8,0xc9,0x26,0x07,0x64,0x45,0xa2,0x83,0xe0,0xc1,
0x1f,0x3e,0x5d,0x7c,0x9b,0xba,0xd9,0xf8,0x17,0x36,0x55,0x74,0x93,0xb2,0xd1,0xf0
};
/* look at the asm-code - what looks in C a bit strange is almost as good as handmade */
register int i;
register unsigned char *CRCT1, *CRCT2, *data, c, crch, crcl;
CRCT1=CRCTable1;
CRCT2=CRCTable2;
data=data_a3;
crcl=data_d1;
crch=data_d0;
for (i=data_d3; i>=0; i--) {
c = (*data++) ^ crch;
crch = CRCT1[c] ^ crcl;
crcl = CRCT2[c];
}
return (crch<<8)|crcl;
}
static inline ushort dos_hdr_crc (struct dos_header *hdr)
{
return dos_crc(&(hdr->track), 0xb2, 0x30, 3); /* precomputed magic */
}
static inline ushort dos_data_crc(unsigned char *data)
{
return dos_crc(data, 0xe2, 0x95 ,511); /* precomputed magic */
}
static inline unsigned char dos_decode_byte(ushort word)
{
register ushort w2;
register unsigned char byte;
register unsigned char *dec = mfmdecode;
w2=word;
w2>>=8;
w2&=127;
byte = dec[w2];
byte <<= 4;
w2 = word & 127;
byte |= dec[w2];
return byte;
}
static unsigned long dos_decode(unsigned char *data, unsigned short *raw, int len)
{
int i;
for (i = 0; i < len; i++)
*data++=dos_decode_byte(*raw++);
return ((ulong)raw);
}
#ifdef DEBUG
static void dbg(unsigned long ptr)
{
printk("raw data @%08lx: %08lx, %08lx ,%08lx, %08lx\n", ptr,
((ulong *)ptr)[0], ((ulong *)ptr)[1],
((ulong *)ptr)[2], ((ulong *)ptr)[3]);
}
#endif
static int dos_read(int drive)
{
unsigned long end;
unsigned long raw;
int scnt;
unsigned short crc,data_crc[2];
struct dos_header hdr;
drive&=3;
raw = (long) raw_buf;
end = raw + unit[drive].type->read_size;
for (scnt=0; scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) {
do { /* search for the right sync of each sec-hdr */
if (!(raw = scan_sync (raw, end))) {
printk(KERN_INFO "dos_read: no hdr sync on "
"track %d, unit %d for sector %d\n",
unit[drive].track,drive,scnt);
return MFM_NOSYNC;
}
#ifdef DEBUG
dbg(raw);
#endif
} while (*((ushort *)raw)!=0x5554); /* loop usually only once done */
raw+=2; /* skip over headermark */
raw = dos_decode((unsigned char *)&hdr,(ushort *) raw,8);
crc = dos_hdr_crc(&hdr);
#ifdef DEBUG
printk("(%3d,%d,%2d,%d) %x\n", hdr.track, hdr.side,
hdr.sec, hdr.len_desc, hdr.crc);
#endif
if (crc != hdr.crc) {
printk(KERN_INFO "dos_read: MFM_HEADER %04x,%04x\n",
hdr.crc, crc);
return MFM_HEADER;
}
if (hdr.track != unit[drive].track/unit[drive].type->heads) {
printk(KERN_INFO "dos_read: MFM_TRACK %d, %d\n",
hdr.track,
unit[drive].track/unit[drive].type->heads);
return MFM_TRACK;
}
if (hdr.side != unit[drive].track%unit[drive].type->heads) {
printk(KERN_INFO "dos_read: MFM_SIDE %d, %d\n",
hdr.side,
unit[drive].track%unit[drive].type->heads);
return MFM_TRACK;
}
if (hdr.len_desc != 2) {
printk(KERN_INFO "dos_read: unknown sector len "
"descriptor %d\n", hdr.len_desc);
return MFM_DATA;
}
#ifdef DEBUG
printk("hdr accepted\n");
#endif
if (!(raw = scan_sync (raw, end))) {
printk(KERN_INFO "dos_read: no data sync on track "
"%d, unit %d for sector%d, disk sector %d\n",
unit[drive].track, drive, scnt, hdr.sec);
return MFM_NOSYNC;
}
#ifdef DEBUG
dbg(raw);
#endif
if (*((ushort *)raw)!=0x5545) {
printk(KERN_INFO "dos_read: no data mark after "
"sync (%d,%d,%d,%d) sc=%d\n",
hdr.track,hdr.side,hdr.sec,hdr.len_desc,scnt);
return MFM_NOSYNC;
}
raw+=2; /* skip data mark (included in checksum) */
raw = dos_decode((unsigned char *)(unit[drive].trackbuf + (hdr.sec - 1) * 512), (ushort *) raw, 512);
raw = dos_decode((unsigned char *)data_crc,(ushort *) raw,4);
crc = dos_data_crc(unit[drive].trackbuf + (hdr.sec - 1) * 512);
if (crc != data_crc[0]) {
printk(KERN_INFO "dos_read: MFM_DATA (%d,%d,%d,%d) "
"sc=%d, %x %x\n", hdr.track, hdr.side,
hdr.sec, hdr.len_desc, scnt,data_crc[0], crc);
printk(KERN_INFO "data=(%lx,%lx,%lx,%lx,...)\n",
((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[0],
((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[1],
((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[2],
((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[3]);
return MFM_DATA;
}
}
return 0;
}
static inline ushort dos_encode_byte(unsigned char byte)
{
register unsigned char *enc, b2, b1;
register ushort word;
enc=mfmencode;
b1=byte;
b2=b1>>4;
b1&=15;
word=enc[b2] <<8 | enc [b1];
return (word|((word&(256|64)) ? 0: 128));
}
static void dos_encode_block(ushort *dest, unsigned char *src, int len)
{
int i;
for (i = 0; i < len; i++) {
*dest=dos_encode_byte(*src++);
*dest|=((dest[-1]&1)||(*dest&0x4000))? 0: 0x8000;
dest++;
}
}
static unsigned long *ms_putsec(int drive, unsigned long *raw, int cnt)
{
static struct dos_header hdr={0,0,0,2,0,
{78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78}};
int i;
static ushort crc[2]={0,0x4e4e};
drive&=3;
/* id gap 1 */
/* the MFM word before is always 9254 */
for(i=0;i<6;i++)
*raw++=0xaaaaaaaa;
/* 3 sync + 1 headermark */
*raw++=0x44894489;
*raw++=0x44895554;
/* fill in the variable parts of the header */
hdr.track=unit[drive].track/unit[drive].type->heads;
hdr.side=unit[drive].track%unit[drive].type->heads;
hdr.sec=cnt+1;
hdr.crc=dos_hdr_crc(&hdr);
/* header (without "magic") and id gap 2*/
dos_encode_block((ushort *)raw,(unsigned char *) &hdr.track,28);
raw+=14;
/*id gap 3 */
for(i=0;i<6;i++)
*raw++=0xaaaaaaaa;
/* 3 syncs and 1 datamark */
*raw++=0x44894489;
*raw++=0x44895545;
/* data */
dos_encode_block((ushort *)raw,
(unsigned char *)unit[drive].trackbuf+cnt*512,512);
raw+=256;
/*data crc + jd's special gap (long words :-/) */
crc[0]=dos_data_crc(unit[drive].trackbuf+cnt*512);
dos_encode_block((ushort *) raw,(unsigned char *)crc,4);
raw+=2;
/* data gap */
for(i=0;i<38;i++)
*raw++=0x92549254;
return raw; /* wrote 652 MFM words */
}
static void dos_write(int disk)
{
int cnt;
unsigned long raw = (unsigned long) raw_buf;
unsigned long *ptr=(unsigned long *)raw;
disk&=3;
/* really gap4 + indexgap , but we write it first and round it up */
for (cnt=0;cnt<425;cnt++)
*ptr++=0x92549254;
/* the following is just guessed */
if (unit[disk].type->sect_mult==2) /* check for HD-Disks */
for(cnt=0;cnt<473;cnt++)
*ptr++=0x92549254;
/* now the index marks...*/
for (cnt=0;cnt<20;cnt++)
*ptr++=0x92549254;
for (cnt=0;cnt<6;cnt++)
*ptr++=0xaaaaaaaa;
*ptr++=0x52245224;
*ptr++=0x52245552;
for (cnt=0;cnt<20;cnt++)
*ptr++=0x92549254;
/* sectors */
for(cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++)
ptr=ms_putsec(disk,ptr,cnt);
*(ushort *)ptr = 0xaaa8; /* MFM word before is always 0x9254 */
}
/*
* Here comes the high level stuff (i.e. the filesystem interface)
* and helper functions.
* Normally this should be the only part that has to be adapted to
* different kernel versions.
*/
/* FIXME: this assumes the drive is still spinning -
* which is only true if we complete writing a track within three seconds
*/
static void flush_track_callback(struct timer_list *timer)
{
unsigned long nr = ((unsigned long)timer -
(unsigned long)&flush_track_timer[0]) /
sizeof(flush_track_timer[0]);
nr&=3;
writefromint = 1;
if (!try_fdc(nr)) {
/* we might block in an interrupt, so try again later */
flush_track_timer[nr].expires = jiffies + 1;
add_timer(flush_track_timer + nr);
return;
}
get_fdc(nr);
(*unit[nr].dtype->write_fkt)(nr);
if (!raw_write(nr)) {
printk (KERN_NOTICE "floppy disk write protected\n");
writefromint = 0;
writepending = 0;
}
rel_fdc();
}
static int non_int_flush_track (unsigned long nr)
{
unsigned long flags;
nr&=3;
writefromint = 0;
del_timer(&post_write_timer);
get_fdc(nr);
if (!fd_motor_on(nr)) {
writepending = 0;
rel_fdc();
return 0;
}
local_irq_save(flags);
if (writepending != 2) {
local_irq_restore(flags);
(*unit[nr].dtype->write_fkt)(nr);
if (!raw_write(nr)) {
printk (KERN_NOTICE "floppy disk write protected "
"in write!\n");
writepending = 0;
return 0;
}
wait_event(wait_fd_block, block_flag != 2);
}
else {
local_irq_restore(flags);
ms_delay(2); /* 2 ms post_write delay */
post_write(nr);
}
rel_fdc();
return 1;
}
static int get_track(int drive, int track)
{
int error, errcnt;
drive&=3;
if (unit[drive].track == track)
return 0;
get_fdc(drive);
if (!fd_motor_on(drive)) {
rel_fdc();
return -1;
}
if (unit[drive].dirty == 1) {
del_timer (flush_track_timer + drive);
non_int_flush_track (drive);
}
errcnt = 0;
while (errcnt < MAX_ERRORS) {
if (!fd_seek(drive, track))
return -1;
raw_read(drive);
error = (*unit[drive].dtype->read_fkt)(drive);
if (error == 0) {
rel_fdc();
return 0;
}
/* Read Error Handling: recalibrate and try again */
unit[drive].track = -1;
errcnt++;
}
rel_fdc();
return -1;
}
static blk_status_t amiflop_rw_cur_segment(struct amiga_floppy_struct *floppy,
struct request *rq)
{
int drive = floppy - unit;
unsigned int cnt, block, track, sector;
char *data;
for (cnt = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
blk_rq_pos(rq), cnt,
(rq_data_dir(rq) == READ) ? "read" : "write");
#endif
block = blk_rq_pos(rq) + cnt;
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
data = bio_data(rq->bio) + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
#endif
if (get_track(drive, track) == -1)
return BLK_STS_IOERR;
if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
} else {
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive))
return BLK_STS_IOERR;
/*
* setup a callback to write the track buffer
* after a short (1 tick) delay.
*/
floppy->dirty = 1;
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
}
}
return BLK_STS_OK;
}
static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
struct amiga_floppy_struct *floppy = rq->q->disk->private_data;
blk_status_t err;
if (!spin_trylock_irq(&amiflop_lock))
return BLK_STS_DEV_RESOURCE;
blk_mq_start_request(rq);
do {
err = amiflop_rw_cur_segment(floppy, rq);
} while (blk_update_request(rq, err, blk_rq_cur_bytes(rq)));
blk_mq_end_request(rq, err);
spin_unlock_irq(&amiflop_lock);
return BLK_STS_OK;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
int drive = MINOR(bdev->bd_dev) & 3;
geo->heads = unit[drive].type->heads;
geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
geo->cylinders = unit[drive].type->tracks;
return 0;
}
static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
int drive = p - unit;
static struct floppy_struct getprm;
void __user *argp = (void __user *)param;
switch(cmd){
case FDFMTBEG:
get_fdc(drive);
if (fd_ref[drive] > 1) {
rel_fdc();
return -EBUSY;
}
if (fd_motor_on(drive) == 0) {
rel_fdc();
return -ENODEV;
}
if (fd_calibrate(drive) == 0) {
rel_fdc();
return -ENXIO;
}
floppy_off(drive);
rel_fdc();
break;
case FDFMTTRK:
if (param < p->type->tracks * p->type->heads)
{
get_fdc(drive);
if (fd_seek(drive,param) != 0){
memset(p->trackbuf, FD_FILL_BYTE,
p->dtype->sects * p->type->sect_mult * 512);
non_int_flush_track(drive);
}
floppy_off(drive);
rel_fdc();
}
else
return -EINVAL;
break;
case FDFMTEND:
floppy_off(drive);
invalidate_bdev(bdev);
break;
case FDGETPRM:
memset((void *)&getprm, 0, sizeof (getprm));
getprm.track=p->type->tracks;
getprm.head=p->type->heads;
getprm.sect=p->dtype->sects * p->type->sect_mult;
getprm.size=p->blocks;
if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct)))
return -EFAULT;
break;
case FDSETPRM:
case FDDEFPRM:
return -EINVAL;
case FDFLUSH: /* unconditionally, even if not needed */
del_timer (flush_track_timer + drive);
non_int_flush_track(drive);
break;
#ifdef RAW_IOCTL
case IOCTL_RAW_TRACK:
if (copy_to_user(argp, raw_buf, p->type->read_size))
return -EFAULT;
else
return p->type->read_size;
#endif
default:
return -ENOSYS;
}
return 0;
}
static int fd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&amiflop_mutex);
ret = fd_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&amiflop_mutex);
return ret;
}
static void fd_probe(int dev)
{
unsigned long code;
int type;
int drive;
drive = dev & 3;
code = fd_get_drive_id(drive);
/* get drive type */
for (type = 0; type < num_dr_types; type++)
if (drive_types[type].code == code)
break;
if (type >= num_dr_types) {
printk(KERN_WARNING "fd_probe: unsupported drive type "
"%08lx found\n", code);
unit[drive].type = &drive_types[num_dr_types-1]; /* FD_NODRIVE */
return;
}
unit[drive].type = drive_types + type;
unit[drive].track = -1;
unit[drive].disk = -1;
unit[drive].motor = 0;
unit[drive].busy = 0;
unit[drive].status = -1;
}
/*
* floppy_open check for aliasing (/dev/fd0 can be the same as
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
int drive = disk->first_minor & 3;
int system = (disk->first_minor & 4) >> 2;
int old_dev;
unsigned long flags;
mutex_lock(&amiflop_mutex);
old_dev = fd_device[drive];
if (fd_ref[drive] && old_dev != system) {
mutex_unlock(&amiflop_mutex);
return -EBUSY;
}
if (unit[drive].type->code == FD_NODRIVE) {
mutex_unlock(&amiflop_mutex);
return -ENXIO;
}
if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
disk_check_media_change(disk);
if (mode & BLK_OPEN_WRITE) {
int wrprot;
get_fdc(drive);
fd_select (drive);
wrprot = !(ciaa.pra & DSKPROT);
fd_deselect (drive);
rel_fdc();
if (wrprot) {
mutex_unlock(&amiflop_mutex);
return -EROFS;
}
}
}
local_irq_save(flags);
fd_ref[drive]++;
fd_device[drive] = system;
local_irq_restore(flags);
unit[drive].dtype=&data_types[system];
unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks*
data_types[system].sects*unit[drive].type->sect_mult;
set_capacity(unit[drive].gendisk[system], unit[drive].blocks);
printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
unit[drive].type->name, data_types[system].name);
mutex_unlock(&amiflop_mutex);
return 0;
}
static void floppy_release(struct gendisk *disk)
{
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
mutex_lock(&amiflop_mutex);
if (unit[drive].dirty == 1) {
del_timer (flush_track_timer + drive);
non_int_flush_track (drive);
}
if (!fd_ref[drive]--) {
printk(KERN_CRIT "floppy_release with fd_ref == 0");
fd_ref[drive] = 0;
}
#ifdef MODULE
floppy_off (drive);
#endif
mutex_unlock(&amiflop_mutex);
}
/*
* check_events is never called from an interrupt, so we can relax a bit
* here, sleep etc. Note that floppy-on tries to set current_DOR to point
* to the desired drive, but it will probably not survive the sleep if
* several floppies are used at the same time: thus the loop.
*/
static unsigned amiga_check_events(struct gendisk *disk, unsigned int clearing)
{
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
int changed;
static int first_time = 1;
if (first_time)
changed = first_time--;
else {
get_fdc(drive);
fd_select (drive);
changed = !(ciaa.pra & DSKCHANGE);
fd_deselect (drive);
rel_fdc();
}
if (changed) {
fd_probe(drive);
p->track = -1;
p->dirty = 0;
writepending = 0; /* if this was true before, too bad! */
writefromint = 0;
return DISK_EVENT_MEDIA_CHANGE;
}
return 0;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.check_events = amiga_check_events,
};
static const struct blk_mq_ops amiflop_mq_ops = {
.queue_rq = amiflop_queue_rq,
};
static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
int err;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive + system;
disk->minors = 1;
disk->fops = &floppy_fops;
disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (system)
sprintf(disk->disk_name, "fd%d_msdos", drive);
else
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
set_capacity(disk, 880 * 2);
unit[drive].gendisk[system] = disk;
err = add_disk(disk);
if (err)
put_disk(disk);
return err;
}
static int fd_alloc_drive(int drive)
{
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
if (!unit[drive].trackbuf)
goto out;
memset(&unit[drive].tag_set, 0, sizeof(unit[drive].tag_set));
unit[drive].tag_set.ops = &amiflop_mq_ops;
unit[drive].tag_set.nr_hw_queues = 1;
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
pr_cont(" fd%d", drive);
if (fd_alloc_disk(drive, 0) || fd_alloc_disk(drive, 1))
goto out_cleanup_tagset;
return 0;
out_cleanup_tagset:
blk_mq_free_tag_set(&unit[drive].tag_set);
out_cleanup_trackbuf:
kfree(unit[drive].trackbuf);
out:
unit[drive].type->code = FD_NODRIVE;
return -ENOMEM;
}
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
pr_info("FD: probing units\nfound");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
if (fd_alloc_drive(drive) < 0) {
pr_cont(" no mem for fd%d", drive);
nomem = 1;
continue;
}
drives++;
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
pr_cont(" no drives");
pr_cont("\n");
return drives;
}
pr_cont("\n");
return -ENOMEM;
}
static int __init amiga_floppy_probe(struct platform_device *pdev)
{
int i, ret;
if (register_blkdev(FLOPPY_MAJOR,"fd"))
return -EBUSY;
ret = -ENOMEM;
raw_buf = amiga_chip_alloc(RAW_BUF_SIZE, "Floppy");
if (!raw_buf) {
printk("fd: cannot get chip mem buffer\n");
goto out_blkdev;
}
ret = -EBUSY;
if (request_irq(IRQ_AMIGA_DSKBLK, fd_block_done, 0, "floppy_dma", NULL)) {
printk("fd: cannot get irq for dma\n");
goto out_irq;
}
if (request_irq(IRQ_AMIGA_CIAA_TB, ms_isr, 0, "floppy_timer", NULL)) {
printk("fd: cannot get irq for timer\n");
goto out_irq2;
}
ret = -ENODEV;
if (fd_probe_drives() < 1) /* No usable drives */
goto out_probe;
/* initialize variables */
timer_setup(&motor_on_timer, motor_on_callback, 0);
motor_on_timer.expires = 0;
for (i = 0; i < FD_MAX_UNITS; i++) {
timer_setup(&motor_off_timer[i], fd_motor_off, 0);
motor_off_timer[i].expires = 0;
timer_setup(&flush_track_timer[i], flush_track_callback, 0);
flush_track_timer[i].expires = 0;
unit[i].track = -1;
}
timer_setup(&post_write_timer, post_write_callback, 0);
post_write_timer.expires = 0;
for (i = 0; i < 128; i++)
mfmdecode[i]=255;
for (i = 0; i < 16; i++)
mfmdecode[mfmencode[i]]=i;
/* make sure that disk DMA is enabled */
custom.dmacon = DMAF_SETCLR | DMAF_DISK;
/* init ms timer */
ciaa.crb = 8; /* one-shot, stop */
return 0;
out_probe:
free_irq(IRQ_AMIGA_CIAA_TB, NULL);
out_irq2:
free_irq(IRQ_AMIGA_DSKBLK, NULL);
out_irq:
amiga_chip_free(raw_buf);
out_blkdev:
unregister_blkdev(FLOPPY_MAJOR,"fd");
return ret;
}
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",
},
};
static int __init amiga_floppy_init(void)
{
return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe);
}
module_init(amiga_floppy_init);
#ifndef MODULE
static int __init amiga_floppy_setup (char *str)
{
int n;
if (!MACH_IS_AMIGA)
return 0;
if (!get_option(&str, &n))
return 0;
printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
fd_def_df0 = n;
return 1;
}
__setup("floppy=", amiga_floppy_setup);
#endif
MODULE_ALIAS("platform:amiga-floppy");
| linux-master | drivers/block/amiflop.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 1993 by Theodore Ts'o.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/wait.h>
#include <linux/blkpg.h>
#include <linux/init.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/freezer.h>
#include <linux/mutex.h>
#include <linux/writeback.h>
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/splice.h>
#include <linux/sysfs.h>
#include <linux/miscdevice.h>
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
#include <linux/uaccess.h>
#include <linux/blk-mq.h>
#include <linux/spinlock.h>
#include <uapi/linux/loop.h>
/* Possible states of device */
enum {
Lo_unbound,
Lo_bound,
Lo_rundown,
Lo_deleting,
};
struct loop_func_table;
struct loop_device {
int lo_number;
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
char lo_file_name[LO_NAME_SIZE];
struct file * lo_backing_file;
struct block_device *lo_device;
gfp_t old_gfp_mask;
spinlock_t lo_lock;
int lo_state;
spinlock_t lo_work_lock;
struct workqueue_struct *workqueue;
struct work_struct rootcg_work;
struct list_head rootcg_cmd_list;
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
struct blk_mq_tag_set tag_set;
struct gendisk *lo_disk;
struct mutex lo_mutex;
bool idr_visible;
};
struct loop_cmd {
struct list_head list_entry;
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
long ret;
struct kiocb iocb;
struct bio_vec *bvec;
struct cgroup_subsys_state *blkcg_css;
struct cgroup_subsys_state *memcg_css;
};
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
#define LOOP_DEFAULT_HW_Q_DEPTH 128
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
static DEFINE_MUTEX(loop_validate_mutex);
/**
* loop_global_lock_killable() - take locks for safe loop_validate_file() test
*
* @lo: struct loop_device
* @global: true if @lo is about to bind another "struct loop_device", false otherwise
*
* Returns 0 on success, -EINTR otherwise.
*
* Since loop_validate_file() traverses on other "struct loop_device" if
* is_loop_device() is true, we need a global lock for serializing concurrent
* loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
*/
static int loop_global_lock_killable(struct loop_device *lo, bool global)
{
int err;
if (global) {
err = mutex_lock_killable(&loop_validate_mutex);
if (err)
return err;
}
err = mutex_lock_killable(&lo->lo_mutex);
if (err && global)
mutex_unlock(&loop_validate_mutex);
return err;
}
/**
* loop_global_unlock() - release locks taken by loop_global_lock_killable()
*
* @lo: struct loop_device
* @global: true if @lo was about to bind another "struct loop_device", false otherwise
*/
static void loop_global_unlock(struct loop_device *lo, bool global)
{
mutex_unlock(&lo->lo_mutex);
if (global)
mutex_unlock(&loop_validate_mutex);
}
static int max_part;
static int part_shift;
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
loff_t loopsize;
/* Compute loopsize in bytes */
loopsize = i_size_read(file->f_mapping->host);
if (offset > 0)
loopsize -= offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
if (sizelimit > 0 && sizelimit < loopsize)
loopsize = sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
*/
return loopsize >> 9;
}
static loff_t get_loop_size(struct loop_device *lo, struct file *file)
{
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
}
static void __loop_update_dio(struct loop_device *lo, bool dio)
{
struct file *file = lo->lo_backing_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
unsigned short sb_bsize = 0;
unsigned dio_align = 0;
bool use_dio;
if (inode->i_sb->s_bdev) {
sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
dio_align = sb_bsize - 1;
}
/*
* We support direct I/O only if lo_offset is aligned with the
* logical I/O size of backing device, and the logical block
* size of loop is bigger than the backing device's.
*
* TODO: the above condition may be loosed in the future, and
* direct I/O may be switched runtime at that time because most
* of requests in sane applications should be PAGE_SIZE aligned
*/
if (dio) {
if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
!(lo->lo_offset & dio_align) &&
(file->f_mode & FMODE_CAN_ODIRECT))
use_dio = true;
else
use_dio = false;
} else {
use_dio = false;
}
if (lo->use_dio == use_dio)
return;
/* flush dirty pages before changing direct IO */
vfs_fsync(file, 0);
/*
* The flag of LO_FLAGS_DIRECT_IO is handled similarly with
* LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
* will get updated by ioctl(LOOP_GET_STATUS)
*/
if (lo->lo_state == Lo_bound)
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
if (use_dio) {
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
} else {
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
}
if (lo->lo_state == Lo_bound)
blk_mq_unfreeze_queue(lo->lo_queue);
}
/**
* loop_set_size() - sets device size and notifies userspace
* @lo: struct loop_device to set the size for
* @size: new size of the loop device
*
* Callers must validate that the size passed into this function fits into
* a sector_t, eg using loop_validate_size()
*/
static void loop_set_size(struct loop_device *lo, loff_t size)
{
if (!set_capacity_and_notify(lo->lo_disk, size))
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;
iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
file_end_write(file);
if (likely(bw == bvec->bv_len))
return 0;
printk_ratelimited(KERN_ERR
"loop: Write error at byte offset %llu, length %i.\n",
(unsigned long long)*ppos, bvec->bv_len);
if (bw >= 0)
bw = -EIO;
return bw;
}
static int lo_write_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec;
struct req_iterator iter;
int ret = 0;
rq_for_each_segment(bvec, rq, iter) {
ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
if (ret < 0)
break;
cond_resched();
}
return ret;
}
static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec;
struct req_iterator iter;
struct iov_iter i;
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
flush_dcache_page(bvec.bv_page);
if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
cond_resched();
}
return 0;
}
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
/*
* We use fallocate to manipulate the space mappings used by the image
* a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
if (!bdev_max_discard_sectors(lo->lo_device))
return -EOPNOTSUPP;
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
return -EIO;
return ret;
}
static int lo_req_flush(struct loop_device *lo, struct request *rq)
{
int ret = vfs_fsync(lo->lo_backing_file, 0);
if (unlikely(ret && ret != -EINVAL))
ret = -EIO;
return ret;
}
static void lo_complete_rq(struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;
if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret);
goto end_io;
}
/*
* Short READ - if we got some data, advance our request and
* retry it. If we got no data, end the rest with EIO.
*/
if (cmd->ret) {
blk_update_request(rq, BLK_STS_OK, cmd->ret);
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
if (cmd->use_aio) {
struct bio *bio = rq->bio;
while (bio) {
zero_fill_bio(bio);
bio = bio->bi_next;
}
}
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
}
}
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
cmd->ret = ret;
lo_rw_aio_do_completion(cmd);
}
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
loff_t pos, int rw)
{
struct iov_iter iter;
struct req_iterator rq_iter;
struct bio_vec *bvec;
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
struct bio_vec tmp;
unsigned int offset;
int nr_bvec = 0;
int ret;
rq_for_each_bvec(tmp, rq, rq_iter)
nr_bvec++;
if (rq->bio != rq->biotail) {
bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
GFP_NOIO);
if (!bvec)
return -EIO;
cmd->bvec = bvec;
/*
* The bios of the request may be started from the middle of
* the 'bvec' because of bio splitting, so we can't directly
* copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
* API will take care of all details for us.
*/
rq_for_each_bvec(tmp, rq, rq_iter) {
*bvec = tmp;
bvec++;
}
bvec = cmd->bvec;
offset = 0;
} else {
/*
* Same here, this bio may be started from the middle of the
* 'bvec' because of bio splitting, so offset from the bvec
* must be passed to iov iterator
*/
offset = bio->bi_iter.bi_bvec_done;
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
}
atomic_set(&cmd->ref, 2);
iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
if (rw == ITER_SOURCE)
ret = call_write_iter(file, &cmd->iocb, &iter);
else
ret = call_read_iter(file, &cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
if (ret != -EIOCBQUEUED)
lo_rw_aio_complete(&cmd->iocb, ret);
return 0;
}
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
/*
* lo_write_simple and lo_read_simple should have been covered
* by io submit style function like lo_rw_aio(), one blocker
* is that lo_read_simple() need to call flush_dcache_page after
* the page is written from kernel, and it isn't easy to handle
* this in io submit style function which submits all segments
* of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page().
*/
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
case REQ_OP_WRITE_ZEROES:
/*
* If the caller doesn't want deallocation, call zeroout to
* write zeroes the range. Otherwise, punch them out.
*/
return lo_fallocate(lo, rq, pos,
(rq->cmd_flags & REQ_NOUNMAP) ?
FALLOC_FL_ZERO_RANGE :
FALLOC_FL_PUNCH_HOLE);
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
else
return lo_write_simple(lo, rq, pos);
case REQ_OP_READ:
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, ITER_DEST);
else
return lo_read_simple(lo, rq, pos);
default:
WARN_ON_ONCE(1);
return -EIO;
}
}
static inline void loop_update_dio(struct loop_device *lo)
{
__loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
lo->use_dio);
}
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
mutex_lock(&lo->lo_disk->open_mutex);
rc = bdev_disk_changed(lo->lo_disk, false);
mutex_unlock(&lo->lo_disk->open_mutex);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
}
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
}
static int loop_validate_file(struct file *file, struct block_device *bdev)
{
struct inode *inode = file->f_mapping->host;
struct file *f = file;
/* Avoid recursion */
while (is_loop_device(f)) {
struct loop_device *l;
lockdep_assert_held(&loop_validate_mutex);
if (f->f_mapping->host->i_rdev == bdev->bd_dev)
return -EBADF;
l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
if (l->lo_state != Lo_bound)
return -EINVAL;
/* Order wrt setting lo->lo_backing_file in loop_configure(). */
rmb();
f = l->lo_backing_file;
}
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
return -EINVAL;
return 0;
}
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
* the original file and in High Availability environments to switch to
* an alternative location for the content in case of server meltdown.
* This can only work if the loop device is used read-only, and if the
* new backing store is the same size and type as the old backing store.
*/
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
struct file *file = fget(arg);
struct file *old_file;
int error;
bool partscan;
bool is_loop;
if (!file)
return -EBADF;
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
is_loop = is_loop_device(file);
error = loop_global_lock_killable(lo, is_loop);
if (error)
goto out_putf;
error = -ENXIO;
if (lo->lo_state != Lo_bound)
goto out_err;
/* the loop device has to be read-only */
error = -EINVAL;
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
goto out_err;
error = loop_validate_file(file, bdev);
if (error)
goto out_err;
old_file = lo->lo_backing_file;
error = -EINVAL;
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_err;
/* and ... switch */
disk_force_media_change(lo->lo_disk);
blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
mapping_set_gfp_mask(file->f_mapping,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop);
/*
* Flush loop_validate_file() before fput(), for l->lo_backing_file
* might be pointing at old_file which might be the last reference.
*/
if (!is_loop) {
mutex_lock(&loop_validate_mutex);
mutex_unlock(&loop_validate_mutex);
}
/*
* We must drop file reference outside of lo_mutex as dropping
* the file ref can take open_mutex which creates circular locking
* dependency.
*/
fput(old_file);
if (partscan)
loop_reread_partitions(lo);
error = 0;
done:
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
return error;
out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
goto done;
}
/* loop sysfs attributes */
static ssize_t loop_attr_show(struct device *dev, char *page,
ssize_t (*callback)(struct loop_device *, char *))
{
struct gendisk *disk = dev_to_disk(dev);
struct loop_device *lo = disk->private_data;
return callback(lo, page);
}
#define LOOP_ATTR_RO(_name) \
static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
static ssize_t loop_attr_do_show_##_name(struct device *d, \
struct device_attribute *attr, char *b) \
{ \
return loop_attr_show(d, b, loop_attr_##_name##_show); \
} \
static struct device_attribute loop_attr_##_name = \
__ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
{
ssize_t ret;
char *p = NULL;
spin_lock_irq(&lo->lo_lock);
if (lo->lo_backing_file)
p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
spin_unlock_irq(&lo->lo_lock);
if (IS_ERR_OR_NULL(p))
ret = PTR_ERR(p);
else {
ret = strlen(p);
memmove(buf, p, ret);
buf[ret++] = '\n';
buf[ret] = 0;
}
return ret;
}
static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
{
return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
}
static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
{
return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
}
static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
{
int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
}
static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
{
int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
}
static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
{
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
}
LOOP_ATTR_RO(backing_file);
LOOP_ATTR_RO(offset);
LOOP_ATTR_RO(sizelimit);
LOOP_ATTR_RO(autoclear);
LOOP_ATTR_RO(partscan);
LOOP_ATTR_RO(dio);
static struct attribute *loop_attrs[] = {
&loop_attr_backing_file.attr,
&loop_attr_offset.attr,
&loop_attr_sizelimit.attr,
&loop_attr_autoclear.attr,
&loop_attr_partscan.attr,
&loop_attr_dio.attr,
NULL,
};
static struct attribute_group loop_attribute_group = {
.name = "loop",
.attrs= loop_attrs,
};
static void loop_sysfs_init(struct loop_device *lo)
{
lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
&loop_attribute_group);
}
static void loop_sysfs_exit(struct loop_device *lo)
{
if (lo->sysfs_inited)
sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
&loop_attribute_group);
}
static void loop_config_discard(struct loop_device *lo)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
u32 granularity, max_discard_sectors;
/*
* If the backing device is a block device, mirror its zeroing
* capability. Set the discard sectors to the block device's zeroing
* capabilities because loop discards result in blkdev_issue_zeroout(),
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode)) {
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
queue_physical_block_size(backingq);
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard.
*/
} else if (!file->f_op->fallocate) {
max_discard_sectors = 0;
granularity = 0;
} else {
struct kstatfs sbuf;
max_discard_sectors = UINT_MAX >> 9;
if (!vfs_statfs(&file->f_path, &sbuf))
granularity = sbuf.f_bsize;
else
max_discard_sectors = 0;
}
if (max_discard_sectors) {
q->limits.discard_granularity = granularity;
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
} else {
q->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
}
}
struct loop_worker {
struct rb_node rb_node;
struct work_struct work;
struct list_head cmd_list;
struct list_head idle_list;
struct loop_device *lo;
struct cgroup_subsys_state *blkcg_css;
unsigned long last_ran_at;
};
static void loop_workfn(struct work_struct *work);
#ifdef CONFIG_BLK_CGROUP
static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
{
return !css || css == blkcg_root_css;
}
#else
static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
{
return !css;
}
#endif
static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
{
struct rb_node **node, *parent = NULL;
struct loop_worker *cur_worker, *worker = NULL;
struct work_struct *work;
struct list_head *cmd_list;
spin_lock_irq(&lo->lo_work_lock);
if (queue_on_root_worker(cmd->blkcg_css))
goto queue_work;
node = &lo->worker_tree.rb_node;
while (*node) {
parent = *node;
cur_worker = container_of(*node, struct loop_worker, rb_node);
if (cur_worker->blkcg_css == cmd->blkcg_css) {
worker = cur_worker;
break;
} else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) {
node = &(*node)->rb_left;
} else {
node = &(*node)->rb_right;
}
}
if (worker)
goto queue_work;
worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
*/
if (!worker) {
cmd->blkcg_css = NULL;
if (cmd->memcg_css)
css_put(cmd->memcg_css);
cmd->memcg_css = NULL;
goto queue_work;
}
worker->blkcg_css = cmd->blkcg_css;
css_get(worker->blkcg_css);
INIT_WORK(&worker->work, loop_workfn);
INIT_LIST_HEAD(&worker->cmd_list);
INIT_LIST_HEAD(&worker->idle_list);
worker->lo = lo;
rb_link_node(&worker->rb_node, parent, node);
rb_insert_color(&worker->rb_node, &lo->worker_tree);
queue_work:
if (worker) {
/*
* We need to remove from the idle list here while
* holding the lock so that the idle timer doesn't
* free the worker
*/
if (!list_empty(&worker->idle_list))
list_del_init(&worker->idle_list);
work = &worker->work;
cmd_list = &worker->cmd_list;
} else {
work = &lo->rootcg_work;
cmd_list = &lo->rootcg_cmd_list;
}
list_add_tail(&cmd->list_entry, cmd_list);
queue_work(lo->workqueue, work);
spin_unlock_irq(&lo->lo_work_lock);
}
static void loop_set_timer(struct loop_device *lo)
{
timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
}
static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
{
struct loop_worker *pos, *worker;
spin_lock_irq(&lo->lo_work_lock);
list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
idle_list) {
if (!delete_all &&
time_is_after_jiffies(worker->last_ran_at +
LOOP_IDLE_WORKER_TIMEOUT))
break;
list_del(&worker->idle_list);
rb_erase(&worker->rb_node, &lo->worker_tree);
css_put(worker->blkcg_css);
kfree(worker);
}
if (!list_empty(&lo->idle_worker_list))
loop_set_timer(lo);
spin_unlock_irq(&lo->lo_work_lock);
}
static void loop_free_idle_workers_timer(struct timer_list *timer)
{
struct loop_device *lo = container_of(timer, struct loop_device, timer);
return loop_free_idle_workers(lo, false);
}
static void loop_update_rotational(struct loop_device *lo)
{
struct file *file = lo->lo_backing_file;
struct inode *file_inode = file->f_mapping->host;
struct block_device *file_bdev = file_inode->i_sb->s_bdev;
struct request_queue *q = lo->lo_queue;
bool nonrot = true;
/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
if (file_bdev)
nonrot = bdev_nonrot(file_bdev);
if (nonrot)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
* @info: struct loop_info64 to configure the device with
*
* Configures the loop device parameters according to the passed
* in loop_info64 configuration.
*/
static int
loop_set_status_from_info(struct loop_device *lo,
const struct loop_info64 *info)
{
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
switch (info->lo_encrypt_type) {
case LO_CRYPT_NONE:
break;
case LO_CRYPT_XOR:
pr_warn("support for the xor transformation has been removed.\n");
return -EINVAL;
case LO_CRYPT_CRYPTOAPI:
pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
return -EINVAL;
default:
return -EINVAL;
}
/* Avoid assigning overflow values */
if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
return -EOVERFLOW;
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
return 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
struct inode *inode;
struct address_space *mapping;
int error;
loff_t size;
bool partscan;
unsigned short bsize;
bool is_loop;
if (!file)
return -EBADF;
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
*/
if (!(mode & BLK_OPEN_EXCL)) {
error = bd_prepare_to_claim(bdev, loop_configure, NULL);
if (error)
goto out_putf;
}
error = loop_global_lock_killable(lo, is_loop);
if (error)
goto out_bdev;
error = -EBUSY;
if (lo->lo_state != Lo_unbound)
goto out_unlock;
error = loop_validate_file(file, bdev);
if (error)
goto out_unlock;
mapping = file->f_mapping;
inode = mapping->host;
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
}
if (config->block_size) {
error = blk_validate_block_size(config->block_size);
if (error)
goto out_unlock;
}
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
lo->lo_flags |= LO_FLAGS_READ_ONLY;
if (!lo->workqueue) {
lo->workqueue = alloc_workqueue("loop%d",
WQ_UNBOUND | WQ_FREEZABLE,
0, lo->lo_number);
if (!lo->workqueue) {
error = -ENOMEM;
goto out_unlock;
}
}
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
if (config->block_size)
bsize = config->block_size;
else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
/* In case of direct I/O, match underlying block size */
bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
else
bsize = 512;
blk_queue_logical_block_size(lo->lo_queue, bsize);
blk_queue_physical_block_size(lo->lo_queue, bsize);
blk_queue_io_min(lo->lo_queue, bsize);
loop_config_discard(lo);
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
size = get_loop_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
wmb();
lo->lo_state = Lo_bound;
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_configure);
return 0;
out_unlock:
loop_global_unlock(lo, is_loop);
out_bdev:
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_configure);
out_putf:
fput(file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return error;
}
static void __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false);
/*
* Freeze the request queue when unbinding on a live file descriptor and
* thus an open device. When called from ->release we are guaranteed
* that there is no I/O in progress already.
*/
if (!release)
blk_mq_freeze_queue(lo->lo_queue);
spin_lock_irq(&lo->lo_lock);
filp = lo->lo_backing_file;
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
lo->lo_device = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
blk_queue_logical_block_size(lo->lo_queue, 512);
blk_queue_physical_block_size(lo->lo_queue, 512);
blk_queue_io_min(lo->lo_queue, 512);
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
if (!release)
blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;
/*
* open_mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
* must be at least one and it can only become zero when the
* current holder is released.
*/
if (!release)
mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
if (!release)
mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err);
/* Device is gone, no point in returning error */
}
/*
* lo->lo_state is set to Lo_unbound here after above partscan has
* finished. There cannot be anybody else entering __loop_clr_fd() as
* Lo_rundown state protects us from all the other places trying to
* change the 'lo' device.
*/
lo->lo_flags = 0;
if (!part_shift)
set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
/*
* Need not hold lo_mutex to fput backing file. Calling fput holding
* lo_mutex triggers a circular lock dependency possibility warning as
* fput can take open_mutex which is usually taken before lo_mutex.
*/
fput(filp);
}
static int loop_clr_fd(struct loop_device *lo)
{
int err;
/*
* Since lo_ioctl() is called without locks held, it is possible that
* loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel.
*
* Therefore, use global lock when setting Lo_rundown state in order to
* make sure that loop_validate_file() will fail if the "struct file"
* which loop_configure()/loop_change_fd() found via fget() was this
* loop device.
*/
err = loop_global_lock_killable(lo, true);
if (err)
return err;
if (lo->lo_state != Lo_bound) {
loop_global_unlock(lo, true);
return -ENXIO;
}
/*
* If we've explicitly asked to tear down the loop device,
* and it has an elevated reference count, set it for auto-teardown when
* the last reference goes away. This stops $!~#$@ udev from
* preventing teardown because it decided that it needs to run blkid on
* the loopback device whenever they appear. xfstests is notorious for
* failing tests because blkid via udev races with a losetup
* <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
* command to fail with EBUSY.
*/
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
lo->lo_state = Lo_rundown;
loop_global_unlock(lo, true);
__loop_clr_fd(lo, false);
return 0;
}
static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
}
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
size_changed = true;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
}
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
prev_lo_flags = lo->lo_flags;
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
/* Mask out flags that can't be set using LOOP_SET_STATUS. */
lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
/* For those flags, use the previous values instead */
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
/* For flags that can't be cleared, use previous values too */
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
if (size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
lo->lo_backing_file);
loop_set_size(lo, new_size);
}
loop_config_discard(lo);
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
partscan = true;
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
loop_reread_partitions(lo);
return err;
}
static int
loop_get_status(struct loop_device *lo, struct loop_info64 *info)
{
struct path path;
struct kstat stat;
int ret;
ret = mutex_lock_killable(&lo->lo_mutex);
if (ret)
return ret;
if (lo->lo_state != Lo_bound) {
mutex_unlock(&lo->lo_mutex);
return -ENXIO;
}
memset(info, 0, sizeof(*info));
info->lo_number = lo->lo_number;
info->lo_offset = lo->lo_offset;
info->lo_sizelimit = lo->lo_sizelimit;
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
/* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
path_get(&path);
mutex_unlock(&lo->lo_mutex);
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) {
info->lo_device = huge_encode_dev(stat.dev);
info->lo_inode = stat.ino;
info->lo_rdevice = huge_encode_dev(stat.rdev);
}
path_put(&path);
return ret;
}
static void
loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
{
memset(info64, 0, sizeof(*info64));
info64->lo_number = info->lo_number;
info64->lo_device = info->lo_device;
info64->lo_inode = info->lo_inode;
info64->lo_rdevice = info->lo_rdevice;
info64->lo_offset = info->lo_offset;
info64->lo_sizelimit = 0;
info64->lo_flags = info->lo_flags;
memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
}
static int
loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
{
memset(info, 0, sizeof(*info));
info->lo_number = info64->lo_number;
info->lo_device = info64->lo_device;
info->lo_inode = info64->lo_inode;
info->lo_rdevice = info64->lo_rdevice;
info->lo_offset = info64->lo_offset;
info->lo_flags = info64->lo_flags;
memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info->lo_device != info64->lo_device ||
info->lo_rdevice != info64->lo_rdevice ||
info->lo_inode != info64->lo_inode ||
info->lo_offset != info64->lo_offset)
return -EOVERFLOW;
return 0;
}
static int
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
{
struct loop_info info;
struct loop_info64 info64;
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
return -EFAULT;
loop_info64_from_old(&info, &info64);
return loop_set_status(lo, &info64);
}
static int
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
{
struct loop_info64 info64;
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
return -EFAULT;
return loop_set_status(lo, &info64);
}
static int
loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
struct loop_info info;
struct loop_info64 info64;
int err;
if (!arg)
return -EINVAL;
err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_old(&info64, &info);
if (!err && copy_to_user(arg, &info, sizeof(info)))
err = -EFAULT;
return err;
}
static int
loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
struct loop_info64 info64;
int err;
if (!arg)
return -EINVAL;
err = loop_get_status(lo, &info64);
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
err = -EFAULT;
return err;
}
static int loop_set_capacity(struct loop_device *lo)
{
loff_t size;
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
size = get_loop_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
int error = -ENXIO;
if (lo->lo_state != Lo_bound)
goto out;
__loop_update_dio(lo, !!arg);
if (lo->use_dio == !!arg)
return 0;
error = -EINVAL;
out:
return error;
}
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
int err = 0;
if (lo->lo_state != Lo_bound)
return -ENXIO;
err = blk_validate_block_size(arg);
if (err)
return err;
if (lo->lo_queue->limits.logical_block_size == arg)
return 0;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
blk_queue_logical_block_size(lo->lo_queue, arg);
blk_queue_physical_block_size(lo->lo_queue, arg);
blk_queue_io_min(lo->lo_queue, arg);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
return err;
}
static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
unsigned long arg)
{
int err;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
switch (cmd) {
case LOOP_SET_CAPACITY:
err = loop_set_capacity(lo);
break;
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
case LOOP_SET_BLOCK_SIZE:
err = loop_set_block_size(lo, arg);
break;
default:
err = -EINVAL;
}
mutex_unlock(&lo->lo_mutex);
return err;
}
static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
void __user *argp = (void __user *) arg;
int err;
switch (cmd) {
case LOOP_SET_FD: {
/*
* Legacy case - pass in a zeroed out struct loop_config with
* only the file descriptor set , which corresponds with the
* default parameters we'd have used otherwise.
*/
struct loop_config config;
memset(&config, 0, sizeof(config));
config.fd = arg;
return loop_configure(lo, mode, bdev, &config);
}
case LOOP_CONFIGURE: {
struct loop_config config;
if (copy_from_user(&config, argp, sizeof(config)))
return -EFAULT;
return loop_configure(lo, mode, bdev, &config);
}
case LOOP_CHANGE_FD:
return loop_change_fd(lo, bdev, arg);
case LOOP_CLR_FD:
return loop_clr_fd(lo);
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status_old(lo, argp);
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status64(lo, argp);
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
case LOOP_SET_BLOCK_SIZE:
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
default:
err = lo_simple_ioctl(lo, cmd, arg);
break;
}
return err;
}
#ifdef CONFIG_COMPAT
struct compat_loop_info {
compat_int_t lo_number; /* ioctl r/o */
compat_dev_t lo_device; /* ioctl r/o */
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
compat_int_t lo_encrypt_type; /* obsolete, ignored */
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];
unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
compat_ulong_t lo_init[2];
char reserved[4];
};
/*
* Transfer 32-bit compatibility structure in userspace to 64-bit loop info
* - noinlined to reduce stack space usage in main part of driver
*/
static noinline int
loop_info64_from_compat(const struct compat_loop_info __user *arg,
struct loop_info64 *info64)
{
struct compat_loop_info info;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
memset(info64, 0, sizeof(*info64));
info64->lo_number = info.lo_number;
info64->lo_device = info.lo_device;
info64->lo_inode = info.lo_inode;
info64->lo_rdevice = info.lo_rdevice;
info64->lo_offset = info.lo_offset;
info64->lo_sizelimit = 0;
info64->lo_flags = info.lo_flags;
memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
return 0;
}
/*
* Transfer 64-bit loop info to 32-bit compatibility structure in userspace
* - noinlined to reduce stack space usage in main part of driver
*/
static noinline int
loop_info64_to_compat(const struct loop_info64 *info64,
struct compat_loop_info __user *arg)
{
struct compat_loop_info info;
memset(&info, 0, sizeof(info));
info.lo_number = info64->lo_number;
info.lo_device = info64->lo_device;
info.lo_inode = info64->lo_inode;
info.lo_rdevice = info64->lo_rdevice;
info.lo_offset = info64->lo_offset;
info.lo_flags = info64->lo_flags;
memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info.lo_device != info64->lo_device ||
info.lo_rdevice != info64->lo_rdevice ||
info.lo_inode != info64->lo_inode ||
info.lo_offset != info64->lo_offset)
return -EOVERFLOW;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int
loop_set_status_compat(struct loop_device *lo,
const struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int ret;
ret = loop_info64_from_compat(arg, &info64);
if (ret < 0)
return ret;
return loop_set_status(lo, &info64);
}
static int
loop_get_status_compat(struct loop_device *lo,
struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int err;
if (!arg)
return -EINVAL;
err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_compat(&info64, arg);
return err;
}
static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
switch(cmd) {
case LOOP_SET_STATUS:
err = loop_set_status_compat(lo,
(const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:
err = loop_get_status_compat(lo,
(struct compat_loop_info __user *)arg);
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
case LOOP_CONFIGURE:
arg = (unsigned long) compat_ptr(arg);
fallthrough;
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
case LOOP_SET_DIRECT_IO:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
#endif
static void lo_release(struct gendisk *disk)
{
struct loop_device *lo = disk->private_data;
if (disk_openers(disk) > 0)
return;
mutex_lock(&lo->lo_mutex);
if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
/*
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
__loop_clr_fd(lo, true);
return;
}
mutex_unlock(&lo->lo_mutex);
}
static void lo_free_disk(struct gendisk *disk)
{
struct loop_device *lo = disk->private_data;
if (lo->workqueue)
destroy_workqueue(lo->workqueue);
loop_free_idle_workers(lo, true);
timer_shutdown_sync(&lo->timer);
mutex_destroy(&lo->lo_mutex);
kfree(lo);
}
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.release = lo_release,
.ioctl = lo_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lo_compat_ioctl,
#endif
.free_disk = lo_free_disk,
};
/*
* And now the modules code and kernel interface.
*/
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
* the default isn't a hard limit (as before commit 85c50197716c
* changed the default value from 0 for max_loop=0 reasons), just
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
static bool max_loop_specified;
static int max_loop_param_set_int(const char *val,
const struct kernel_param *kp)
{
int ret;
ret = param_set_int(val, kp);
if (ret < 0)
return ret;
max_loop_specified = true;
return 0;
}
static const struct kernel_param_ops max_loop_param_ops = {
.set = max_loop_param_set_int,
.get = param_get_int,
};
module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
#else
module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
#endif
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
{
int qd, ret;
ret = kstrtoint(s, 0, &qd);
if (ret < 0)
return ret;
if (qd < 1)
return -EINVAL;
hw_queue_depth = qd;
return 0;
}
static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
.set = loop_set_hw_queue_depth,
.get = param_get_int,
};
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct loop_device *lo = rq->q->queuedata;
blk_mq_start_request(rq);
if (lo->lo_state != Lo_bound)
return BLK_STS_IOERR;
switch (req_op(rq)) {
case REQ_OP_FLUSH:
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
cmd->use_aio = false;
break;
default:
cmd->use_aio = lo->use_dio;
break;
}
/* always use the first bio's css */
cmd->blkcg_css = NULL;
cmd->memcg_css = NULL;
#ifdef CONFIG_BLK_CGROUP
if (rq->bio) {
cmd->blkcg_css = bio_blkcg_css(rq->bio);
#ifdef CONFIG_MEMCG
if (cmd->blkcg_css) {
cmd->memcg_css =
cgroup_get_e_css(cmd->blkcg_css->cgroup,
&memory_cgrp_subsys);
}
#endif
}
#endif
loop_queue_work(lo, cmd);
return BLK_STS_OK;
}
static void loop_handle_cmd(struct loop_cmd *cmd)
{
struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
struct request *rq = blk_mq_rq_from_pdu(cmd);
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css);
if (cmd_memcg_css)
old_memcg = set_active_memcg(
mem_cgroup_from_css(cmd_memcg_css));
/*
* do_req_filebacked() may call blk_mq_complete_request() synchronously
* or asynchronously if using aio. Hence, do not touch 'cmd' after
* do_req_filebacked() has returned unless we are sure that 'cmd' has
* not yet been completed.
*/
ret = do_req_filebacked(lo, rq);
if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
if (cmd_memcg_css) {
set_active_memcg(old_memcg);
css_put(cmd_memcg_css);
}
failed:
/* complete non-aio request */
if (!use_aio || ret) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
cmd->ret = ret ? -EIO : 0;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
}
static void loop_process_work(struct loop_worker *worker,
struct list_head *cmd_list, struct loop_device *lo)
{
int orig_flags = current->flags;
struct loop_cmd *cmd;
current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
spin_lock_irq(&lo->lo_work_lock);
while (!list_empty(cmd_list)) {
cmd = container_of(
cmd_list->next, struct loop_cmd, list_entry);
list_del(cmd_list->next);
spin_unlock_irq(&lo->lo_work_lock);
loop_handle_cmd(cmd);
cond_resched();
spin_lock_irq(&lo->lo_work_lock);
}
/*
* We only add to the idle list if there are no pending cmds
* *and* the worker will not run again which ensures that it
* is safe to free any worker on the idle list
*/
if (worker && !work_pending(&worker->work)) {
worker->last_ran_at = jiffies;
list_add_tail(&worker->idle_list, &lo->idle_worker_list);
loop_set_timer(lo);
}
spin_unlock_irq(&lo->lo_work_lock);
current->flags = orig_flags;
}
static void loop_workfn(struct work_struct *work)
{
struct loop_worker *worker =
container_of(work, struct loop_worker, work);
loop_process_work(worker, &worker->cmd_list, worker->lo);
}
static void loop_rootcg_workfn(struct work_struct *work)
{
struct loop_device *lo =
container_of(work, struct loop_device, rootcg_work);
loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
}
static const struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
.complete = lo_complete_rq,
};
static int loop_add(int i)
{
struct loop_device *lo;
struct gendisk *disk;
int err;
err = -ENOMEM;
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
if (!lo)
goto out;
lo->worker_tree = RB_ROOT;
INIT_LIST_HEAD(&lo->idle_worker_list);
timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
lo->lo_state = Lo_unbound;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
goto out_free_dev;
/* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
if (err == -ENOSPC)
err = -EEXIST;
} else {
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
}
mutex_unlock(&loop_ctl_mutex);
if (err < 0)
goto out_free_dev;
i = err;
lo->tag_set.ops = &loop_mq_ops;
lo->tag_set.nr_hw_queues = 1;
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
if (err)
goto out_free_idr;
disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_cleanup_tags;
}
lo->lo_queue = lo->lo_disk->queue;
blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
/*
* By default, we do buffer IO, so it doesn't make sense to enable
* merge because the I/O submitted to backing file is handled page by
* page. For directio mode, merge does help to dispatch bigger request
* to underlayer disk. We will enable merge once directio is enabled.
*/
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
/*
* Disable partition scanning by default. The in-kernel partition
* scanning can be requested individually per-device during its
* setup. Userspace can always add and remove partitions from all
* devices. The needed partition minors are allocated from the
* extended minor space, the main loop device numbers will continue
* to match the loop minors, regardless of the number of partitions
* used.
*
* If max_part is given, partition scanning is globally enabled for
* all loop devices. The minors for the main loop devices will be
* multiples of max_part.
*
* Note: Global-for-all-devices, set-only-at-init, read-only module
* parameteters like 'max_loop' and 'max_part' make things needlessly
* complicated, are too static, inflexible and may surprise
* userspace tools. Parameters like this in general should be avoided.
*/
if (!part_shift)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
spin_lock_init(&lo->lo_work_lock);
INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
INIT_LIST_HEAD(&lo->rootcg_cmd_list);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
disk->minors = 1 << part_shift;
disk->fops = &lo_fops;
disk->private_data = lo;
disk->queue = lo->lo_queue;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
/* Make this loop device reachable from pathname. */
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
/* Show this loop device. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
return i;
out_cleanup_disk:
put_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, i);
mutex_unlock(&loop_ctl_mutex);
out_free_dev:
kfree(lo);
out:
return err;
}
static void loop_remove(struct loop_device *lo)
{
/* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
mutex_unlock(&loop_ctl_mutex);
put_disk(lo->lo_disk);
}
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
static void loop_probe(dev_t dev)
{
int idx = MINOR(dev) >> part_shift;
if (max_loop_specified && max_loop && idx >= max_loop)
return;
loop_add(idx);
}
#else
#define loop_probe NULL
#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
static int loop_control_remove(int idx)
{
struct loop_device *lo;
int ret;
if (idx < 0) {
pr_warn_once("deleting an unspecified loop device is not supported.\n");
return -EINVAL;
}
/* Hide this loop device for serialization. */
ret = mutex_lock_killable(&loop_ctl_mutex);
if (ret)
return ret;
lo = idr_find(&loop_index_idr, idx);
if (!lo || !lo->idr_visible)
ret = -ENODEV;
else
lo->idr_visible = false;
mutex_unlock(&loop_ctl_mutex);
if (ret)
return ret;
/* Check whether this loop device can be removed. */
ret = mutex_lock_killable(&lo->lo_mutex);
if (ret)
goto mark_visible;
if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
mutex_unlock(&lo->lo_mutex);
ret = -EBUSY;
goto mark_visible;
}
/* Mark this loop device as no more bound, but not quite unbound yet */
lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
loop_remove(lo);
return 0;
mark_visible:
/* Show this loop device again. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
return ret;
}
static int loop_control_get_free(int idx)
{
struct loop_device *lo;
int id, ret;
ret = mutex_lock_killable(&loop_ctl_mutex);
if (ret)
return ret;
idr_for_each_entry(&loop_index_idr, lo, id) {
/* Hitting a race results in creating a new loop device which is harmless. */
if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
goto found;
}
mutex_unlock(&loop_ctl_mutex);
return loop_add(-1);
found:
mutex_unlock(&loop_ctl_mutex);
return id;
}
static long loop_control_ioctl(struct file *file, unsigned int cmd,
unsigned long parm)
{
switch (cmd) {
case LOOP_CTL_ADD:
return loop_add(parm);
case LOOP_CTL_REMOVE:
return loop_control_remove(parm);
case LOOP_CTL_GET_FREE:
return loop_control_get_free(parm);
default:
return -ENOSYS;
}
}
static const struct file_operations loop_ctl_fops = {
.open = nonseekable_open,
.unlocked_ioctl = loop_control_ioctl,
.compat_ioctl = loop_control_ioctl,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice loop_misc = {
.minor = LOOP_CTRL_MINOR,
.name = "loop-control",
.fops = &loop_ctl_fops,
};
MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
MODULE_ALIAS("devname:loop-control");
static int __init loop_init(void)
{
int i;
int err;
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
/*
* Adjust max_part according to part_shift as it is exported
* to user space so that user can decide correct minor number
* if [s]he want to create more devices.
*
* Note that -1 is required because partition 0 is reserved
* for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
if ((1UL << part_shift) > DISK_MAX_PARTS) {
err = -EINVAL;
goto err_out;
}
if (max_loop > 1UL << (MINORBITS - part_shift)) {
err = -EINVAL;
goto err_out;
}
err = misc_register(&loop_misc);
if (err < 0)
goto err_out;
if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
err = -EIO;
goto misc_out;
}
/* pre-create number of devices given by config or max_loop */
for (i = 0; i < max_loop; i++)
loop_add(i);
printk(KERN_INFO "loop: module loaded\n");
return 0;
misc_out:
misc_deregister(&loop_misc);
err_out:
return err;
}
static void __exit loop_exit(void)
{
struct loop_device *lo;
int id;
unregister_blkdev(LOOP_MAJOR, "loop");
misc_deregister(&loop_misc);
/*
* There is no need to use loop_ctl_mutex here, for nobody else can
* access loop_index_idr when this module is unloading (unless forced
* module unloading is requested). If this is not a clean unloading,
* we have no means to avoid kernel crash.
*/
idr_for_each_entry(&loop_index_idr, lo, id)
loop_remove(lo);
idr_destroy(&loop_index_idr);
}
module_init(loop_init);
module_exit(loop_exit);
#ifndef MODULE
static int __init max_loop_setup(char *str)
{
max_loop = simple_strtol(str, NULL, 0);
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
max_loop_specified = true;
#endif
return 1;
}
__setup("max_loop=", max_loop_setup);
#endif
| linux-master | drivers/block/loop.c |
/*
rbd.c -- Export ceph rados objects as a Linux block device
based on drivers/block/osdblk.c:
Copyright 2009 Red Hat, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
For usage instructions, please refer to:
Documentation/ABI/testing/sysfs-bus-rbd
*/
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/cls_lock_client.h>
#include <linux/ceph/striper.h>
#include <linux/ceph/decode.h>
#include <linux/fs_parser.h>
#include <linux/bsearch.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/blk-mq.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/workqueue.h>
#include "rbd_types.h"
#define RBD_DEBUG /* Activate rbd_assert() calls */
/*
* Increment the given counter and return its updated value.
* If the counter is already 0 it will not be incremented.
* If the counter is already at its maximum value returns
* -EINVAL without updating it.
*/
static int atomic_inc_return_safe(atomic_t *v)
{
unsigned int counter;
counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
if (counter <= (unsigned int)INT_MAX)
return (int)counter;
atomic_dec(v);
return -EINVAL;
}
/* Decrement the counter. Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
int counter;
counter = atomic_dec_return(v);
if (counter >= 0)
return counter;
atomic_inc(v);
return -EINVAL;
}
#define RBD_DRV_NAME "rbd"
#define RBD_MINORS_PER_MAJOR 256
#define RBD_SINGLE_MAJOR_PART_SHIFT 4
#define RBD_MAX_PARENT_CHAIN_LEN 16
#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
#define RBD_MAX_SNAP_NAME_LEN \
(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
#define RBD_SNAP_HEAD_NAME "-"
#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
#define RBD_OBJ_PREFIX_LEN_MAX 64
#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
/* Feature bits */
#define RBD_FEATURE_LAYERING (1ULL<<0)
#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
#define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
#define RBD_FEATURE_FAST_DIFF (1ULL<<4)
#define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
#define RBD_FEATURE_DATA_POOL (1ULL<<7)
#define RBD_FEATURE_OPERATIONS (1ULL<<8)
#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
RBD_FEATURE_STRIPINGV2 | \
RBD_FEATURE_EXCLUSIVE_LOCK | \
RBD_FEATURE_OBJECT_MAP | \
RBD_FEATURE_FAST_DIFF | \
RBD_FEATURE_DEEP_FLATTEN | \
RBD_FEATURE_DATA_POOL | \
RBD_FEATURE_OPERATIONS)
/* Features supported by this (client software) implementation. */
#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
*/
#define DEV_NAME_LEN 32
/*
* block device image metadata (in-memory version)
*/
struct rbd_image_header {
/* These six fields never change for a given rbd image */
char *object_prefix;
__u8 obj_order;
u64 stripe_unit;
u64 stripe_count;
s64 data_pool_id;
u64 features; /* Might be changeable someday? */
/* The remaining fields need to be updated occasionally */
u64 image_size;
struct ceph_snap_context *snapc;
char *snap_names; /* format 1 only */
u64 *snap_sizes; /* format 1 only */
};
/*
* An rbd image specification.
*
* The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
* identify an image. Each rbd_dev structure includes a pointer to
* an rbd_spec structure that encapsulates this identity.
*
* Each of the id's in an rbd_spec has an associated name. For a
* user-mapped image, the names are supplied and the id's associated
* with them are looked up. For a layered image, a parent image is
* defined by the tuple, and the names are looked up.
*
* An rbd_dev structure contains a parent_spec pointer which is
* non-null if the image it represents is a child in a layered
* image. This pointer will refer to the rbd_spec structure used
* by the parent rbd_dev for its own identity (i.e., the structure
* is shared between the parent and child).
*
* Since these structures are populated once, during the discovery
* phase of image construction, they are effectively immutable so
* we make no effort to synchronize access to them.
*
* Note that code herein does not assume the image name is known (it
* could be a null pointer).
*/
struct rbd_spec {
u64 pool_id;
const char *pool_name;
const char *pool_ns; /* NULL if default, never "" */
const char *image_id;
const char *image_name;
u64 snap_id;
const char *snap_name;
struct kref kref;
};
/*
* an instance of the client. multiple devices may share an rbd client.
*/
struct rbd_client {
struct ceph_client *client;
struct kref kref;
struct list_head node;
};
struct pending_result {
int result; /* first nonzero result */
int num_pending;
};
struct rbd_img_request;
enum obj_request_type {
OBJ_REQUEST_NODATA = 1,
OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
};
enum obj_operation_type {
OBJ_OP_READ = 1,
OBJ_OP_WRITE,
OBJ_OP_DISCARD,
OBJ_OP_ZEROOUT,
};
#define RBD_OBJ_FLAG_DELETION (1U << 0)
#define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
#define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
#define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
#define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
enum rbd_obj_read_state {
RBD_OBJ_READ_START = 1,
RBD_OBJ_READ_OBJECT,
RBD_OBJ_READ_PARENT,
};
/*
* Writes go through the following state machine to deal with
* layering:
*
* . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
* . | .
* . v .
* . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
* . | . .
* . v v (deep-copyup .
* (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
* flattened) v | . .
* . v . .
* . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
* | not needed) v
* v .
* done . . . . . . . . . . . . . . . . . .
* ^
* |
* RBD_OBJ_WRITE_FLAT
*
* Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
* assert_exists guard is needed or not (in some cases it's not needed
* even if there is a parent).
*/
enum rbd_obj_write_state {
RBD_OBJ_WRITE_START = 1,
RBD_OBJ_WRITE_PRE_OBJECT_MAP,
RBD_OBJ_WRITE_OBJECT,
__RBD_OBJ_WRITE_COPYUP,
RBD_OBJ_WRITE_COPYUP,
RBD_OBJ_WRITE_POST_OBJECT_MAP,
};
enum rbd_obj_copyup_state {
RBD_OBJ_COPYUP_START = 1,
RBD_OBJ_COPYUP_READ_PARENT,
__RBD_OBJ_COPYUP_OBJECT_MAPS,
RBD_OBJ_COPYUP_OBJECT_MAPS,
__RBD_OBJ_COPYUP_WRITE_OBJECT,
RBD_OBJ_COPYUP_WRITE_OBJECT,
};
struct rbd_obj_request {
struct ceph_object_extent ex;
unsigned int flags; /* RBD_OBJ_FLAG_* */
union {
enum rbd_obj_read_state read_state; /* for reads */
enum rbd_obj_write_state write_state; /* for writes */
};
struct rbd_img_request *img_request;
struct ceph_file_extent *img_extents;
u32 num_img_extents;
union {
struct ceph_bio_iter bio_pos;
struct {
struct ceph_bvec_iter bvec_pos;
u32 bvec_count;
u32 bvec_idx;
};
};
enum rbd_obj_copyup_state copyup_state;
struct bio_vec *copyup_bvecs;
u32 copyup_bvec_count;
struct list_head osd_reqs; /* w/ r_private_item */
struct mutex state_mutex;
struct pending_result pending;
struct kref kref;
};
enum img_req_flags {
IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
};
enum rbd_img_state {
RBD_IMG_START = 1,
RBD_IMG_EXCLUSIVE_LOCK,
__RBD_IMG_OBJECT_REQUESTS,
RBD_IMG_OBJECT_REQUESTS,
};
struct rbd_img_request {
struct rbd_device *rbd_dev;
enum obj_operation_type op_type;
enum obj_request_type data_type;
unsigned long flags;
enum rbd_img_state state;
union {
u64 snap_id; /* for reads */
struct ceph_snap_context *snapc; /* for writes */
};
struct rbd_obj_request *obj_request; /* obj req initiator */
struct list_head lock_item;
struct list_head object_extents; /* obj_req.ex structs */
struct mutex state_mutex;
struct pending_result pending;
struct work_struct work;
int work_result;
};
#define for_each_obj_request(ireq, oreq) \
list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
#define for_each_obj_request_safe(ireq, oreq, n) \
list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
enum rbd_watch_state {
RBD_WATCH_STATE_UNREGISTERED,
RBD_WATCH_STATE_REGISTERED,
RBD_WATCH_STATE_ERROR,
};
enum rbd_lock_state {
RBD_LOCK_STATE_UNLOCKED,
RBD_LOCK_STATE_LOCKED,
RBD_LOCK_STATE_RELEASING,
};
/* WatchNotify::ClientId */
struct rbd_client_id {
u64 gid;
u64 handle;
};
struct rbd_mapping {
u64 size;
};
/*
* a single device
*/
struct rbd_device {
int dev_id; /* blkdev unique id */
int major; /* blkdev assigned major */
int minor;
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
spinlock_t lock; /* queue, flags, open_count */
struct rbd_image_header header;
unsigned long flags; /* possibly lock protected */
struct rbd_spec *spec;
struct rbd_options *opts;
char *config_info; /* add{,_single_major} string */
struct ceph_object_id header_oid;
struct ceph_object_locator header_oloc;
struct ceph_file_layout layout; /* used for all rbd requests */
struct mutex watch_mutex;
enum rbd_watch_state watch_state;
struct ceph_osd_linger_request *watch_handle;
u64 watch_cookie;
struct delayed_work watch_dwork;
struct rw_semaphore lock_rwsem;
enum rbd_lock_state lock_state;
char lock_cookie[32];
struct rbd_client_id owner_cid;
struct work_struct acquired_lock_work;
struct work_struct released_lock_work;
struct delayed_work lock_dwork;
struct work_struct unlock_work;
spinlock_t lock_lists_lock;
struct list_head acquiring_list;
struct list_head running_list;
struct completion acquire_wait;
int acquire_err;
struct completion releasing_wait;
spinlock_t object_map_lock;
u8 *object_map;
u64 object_map_size; /* in objects */
u64 object_map_flags;
struct workqueue_struct *task_wq;
struct rbd_spec *parent_spec;
u64 parent_overlap;
atomic_t parent_ref;
struct rbd_device *parent;
/* Block layer tags. */
struct blk_mq_tag_set tag_set;
/* protects updating the header */
struct rw_semaphore header_rwsem;
struct rbd_mapping mapping;
struct list_head node;
/* sysfs related */
struct device dev;
unsigned long open_count; /* protected by lock */
};
/*
* Flag bits for rbd_dev->flags:
* - REMOVING (which is coupled with rbd_dev->open_count) is protected
* by rbd_dev->lock
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
static LIST_HEAD(rbd_dev_list); /* devices */
static DEFINE_SPINLOCK(rbd_dev_list_lock);
static LIST_HEAD(rbd_client_list); /* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
/* Slab caches for frequently-allocated structures */
static struct kmem_cache *rbd_img_request_cache;
static struct kmem_cache *rbd_obj_request_cache;
static int rbd_major;
static DEFINE_IDA(rbd_dev_id_ida);
static struct workqueue_struct *rbd_wq;
static struct ceph_snap_context rbd_empty_snapc = {
.nref = REFCOUNT_INIT(1),
};
/*
* single-major requires >= 0.75 version of userspace rbd utility.
*/
static bool single_major = true;
module_param(single_major, bool, 0444);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count);
static ssize_t remove_store(const struct bus_type *bus, const char *buf,
size_t count);
static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
size_t count);
static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
size_t count);
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
static int rbd_dev_id_to_minor(int dev_id)
{
return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
}
static int minor_to_rbd_dev_id(int minor)
{
return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
}
static bool rbd_is_ro(struct rbd_device *rbd_dev)
{
return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
}
static bool rbd_is_snap(struct rbd_device *rbd_dev)
{
return rbd_dev->spec->snap_id != CEPH_NOSNAP;
}
static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
{
lockdep_assert_held(&rbd_dev->lock_rwsem);
return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
}
static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
{
bool is_lock_owner;
down_read(&rbd_dev->lock_rwsem);
is_lock_owner = __rbd_is_lock_owner(rbd_dev);
up_read(&rbd_dev->lock_rwsem);
return is_lock_owner;
}
static ssize_t supported_features_show(const struct bus_type *bus, char *buf)
{
return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
}
static BUS_ATTR_WO(add);
static BUS_ATTR_WO(remove);
static BUS_ATTR_WO(add_single_major);
static BUS_ATTR_WO(remove_single_major);
static BUS_ATTR_RO(supported_features);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
&bus_attr_remove.attr,
&bus_attr_add_single_major.attr,
&bus_attr_remove_single_major.attr,
&bus_attr_supported_features.attr,
NULL,
};
static umode_t rbd_bus_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
if (!single_major &&
(attr == &bus_attr_add_single_major.attr ||
attr == &bus_attr_remove_single_major.attr))
return 0;
return attr->mode;
}
static const struct attribute_group rbd_bus_group = {
.attrs = rbd_bus_attrs,
.is_visible = rbd_bus_is_visible,
};
__ATTRIBUTE_GROUPS(rbd_bus);
static struct bus_type rbd_bus_type = {
.name = "rbd",
.bus_groups = rbd_bus_groups,
};
static void rbd_root_dev_release(struct device *dev)
{
}
static struct device rbd_root_dev = {
.init_name = "rbd",
.release = rbd_root_dev_release,
};
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (!rbd_dev)
printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
else if (rbd_dev->disk)
printk(KERN_WARNING "%s: %s: %pV\n",
RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
else if (rbd_dev->spec && rbd_dev->spec->image_name)
printk(KERN_WARNING "%s: image %s: %pV\n",
RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
else if (rbd_dev->spec && rbd_dev->spec->image_id)
printk(KERN_WARNING "%s: id %s: %pV\n",
RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
else /* punt */
printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
RBD_DRV_NAME, rbd_dev, &vaf);
va_end(args);
}
#ifdef RBD_DEBUG
#define rbd_assert(expr) \
if (unlikely(!(expr))) { \
printk(KERN_ERR "\nAssertion failure in %s() " \
"at line %d:\n\n" \
"\trbd_assert(%s);\n\n", \
__func__, __LINE__, #expr); \
BUG(); \
}
#else /* !RBD_DEBUG */
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
/*
* Return true if nothing else is pending.
*/
static bool pending_result_dec(struct pending_result *pending, int *result)
{
rbd_assert(pending->num_pending > 0);
if (*result && !pending->result)
pending->result = *result;
if (--pending->num_pending)
return false;
*result = pending->result;
return true;
}
static int rbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
bool removing = false;
spin_lock_irq(&rbd_dev->lock);
if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
removing = true;
else
rbd_dev->open_count++;
spin_unlock_irq(&rbd_dev->lock);
if (removing)
return -ENOENT;
(void) get_device(&rbd_dev->dev);
return 0;
}
static void rbd_release(struct gendisk *disk)
{
struct rbd_device *rbd_dev = disk->private_data;
unsigned long open_count_before;
spin_lock_irq(&rbd_dev->lock);
open_count_before = rbd_dev->open_count--;
spin_unlock_irq(&rbd_dev->lock);
rbd_assert(open_count_before > 0);
put_device(&rbd_dev->dev);
}
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
};
/*
* Initialize an rbd client instance. Success or not, this function
* consumes ceph_opts. Caller holds client_mutex.
*/
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc;
int ret = -ENOMEM;
dout("%s:\n", __func__);
rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
if (!rbdc)
goto out_opt;
kref_init(&rbdc->kref);
INIT_LIST_HEAD(&rbdc->node);
rbdc->client = ceph_create_client(ceph_opts, rbdc);
if (IS_ERR(rbdc->client))
goto out_rbdc;
ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
ret = ceph_open_session(rbdc->client);
if (ret < 0)
goto out_client;
spin_lock(&rbd_client_list_lock);
list_add_tail(&rbdc->node, &rbd_client_list);
spin_unlock(&rbd_client_list_lock);
dout("%s: rbdc %p\n", __func__, rbdc);
return rbdc;
out_client:
ceph_destroy_client(rbdc->client);
out_rbdc:
kfree(rbdc);
out_opt:
if (ceph_opts)
ceph_destroy_options(ceph_opts);
dout("%s: error %d\n", __func__, ret);
return ERR_PTR(ret);
}
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
kref_get(&rbdc->kref);
return rbdc;
}
/*
* Find a ceph client with specific addr and configuration. If
* found, bump its reference count.
*/
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc = NULL, *iter;
if (ceph_opts->flags & CEPH_OPT_NOSHARE)
return NULL;
spin_lock(&rbd_client_list_lock);
list_for_each_entry(iter, &rbd_client_list, node) {
if (!ceph_compare_options(ceph_opts, iter->client)) {
__rbd_get_client(iter);
rbdc = iter;
break;
}
}
spin_unlock(&rbd_client_list_lock);
return rbdc;
}
/*
* (Per device) rbd map options
*/
enum {
Opt_queue_depth,
Opt_alloc_size,
Opt_lock_timeout,
/* int args above */
Opt_pool_ns,
Opt_compression_hint,
/* string args above */
Opt_read_only,
Opt_read_write,
Opt_lock_on_read,
Opt_exclusive,
Opt_notrim,
};
enum {
Opt_compression_hint_none,
Opt_compression_hint_compressible,
Opt_compression_hint_incompressible,
};
static const struct constant_table rbd_param_compression_hint[] = {
{"none", Opt_compression_hint_none},
{"compressible", Opt_compression_hint_compressible},
{"incompressible", Opt_compression_hint_incompressible},
{}
};
static const struct fs_parameter_spec rbd_parameters[] = {
fsparam_u32 ("alloc_size", Opt_alloc_size),
fsparam_enum ("compression_hint", Opt_compression_hint,
rbd_param_compression_hint),
fsparam_flag ("exclusive", Opt_exclusive),
fsparam_flag ("lock_on_read", Opt_lock_on_read),
fsparam_u32 ("lock_timeout", Opt_lock_timeout),
fsparam_flag ("notrim", Opt_notrim),
fsparam_string ("_pool_ns", Opt_pool_ns),
fsparam_u32 ("queue_depth", Opt_queue_depth),
fsparam_flag ("read_only", Opt_read_only),
fsparam_flag ("read_write", Opt_read_write),
fsparam_flag ("ro", Opt_read_only),
fsparam_flag ("rw", Opt_read_write),
{}
};
struct rbd_options {
int queue_depth;
int alloc_size;
unsigned long lock_timeout;
bool read_only;
bool lock_on_read;
bool exclusive;
bool trim;
u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
#define RBD_EXCLUSIVE_DEFAULT false
#define RBD_TRIM_DEFAULT true
struct rbd_parse_opts_ctx {
struct rbd_spec *spec;
struct ceph_options *copts;
struct rbd_options *opts;
};
static char* obj_op_name(enum obj_operation_type op_type)
{
switch (op_type) {
case OBJ_OP_READ:
return "read";
case OBJ_OP_WRITE:
return "write";
case OBJ_OP_DISCARD:
return "discard";
case OBJ_OP_ZEROOUT:
return "zeroout";
default:
return "???";
}
}
/*
* Destroy ceph client
*
* Caller must hold rbd_client_list_lock.
*/
static void rbd_client_release(struct kref *kref)
{
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("%s: rbdc %p\n", __func__, rbdc);
spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc);
}
/*
* Drop reference to ceph client node. If it's not referenced anymore, release
* it.
*/
static void rbd_put_client(struct rbd_client *rbdc)
{
if (rbdc)
kref_put(&rbdc->kref, rbd_client_release);
}
/*
* Get a ceph client with specific addr and configuration, if one does
* not exist create it. Either way, ceph_opts is consumed by this
* function.
*/
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc;
int ret;
mutex_lock(&client_mutex);
rbdc = rbd_client_find(ceph_opts);
if (rbdc) {
ceph_destroy_options(ceph_opts);
/*
* Using an existing client. Make sure ->pg_pools is up to
* date before we look up the pool id in do_rbd_add().
*/
ret = ceph_wait_for_latest_osdmap(rbdc->client,
rbdc->client->options->mount_timeout);
if (ret) {
rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
rbd_put_client(rbdc);
rbdc = ERR_PTR(ret);
}
} else {
rbdc = rbd_client_create(ceph_opts);
}
mutex_unlock(&client_mutex);
return rbdc;
}
static bool rbd_image_format_valid(u32 image_format)
{
return image_format == 1 || image_format == 2;
}
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
size_t size;
u32 snap_count;
/* The header has to start with the magic rbd header text */
if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
return false;
/* The bio layer requires at least sector-sized I/O */
if (ondisk->options.order < SECTOR_SHIFT)
return false;
/* If we use u64 in a few spots we may be able to loosen this */
if (ondisk->options.order > 8 * sizeof (int) - 1)
return false;
/*
* The size of a snapshot header has to fit in a size_t, and
* that limits the number of snapshots.
*/
snap_count = le32_to_cpu(ondisk->snap_count);
size = SIZE_MAX - sizeof (struct ceph_snap_context);
if (snap_count > size / sizeof (__le64))
return false;
/*
* Not only that, but the size of the entire the snapshot
* header must also be representable in a size_t.
*/
size -= snap_count * sizeof (__le64);
if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
return false;
return true;
}
/*
* returns the size of an object in the image
*/
static u32 rbd_obj_bytes(struct rbd_image_header *header)
{
return 1U << header->obj_order;
}
static void rbd_init_layout(struct rbd_device *rbd_dev)
{
if (rbd_dev->header.stripe_unit == 0 ||
rbd_dev->header.stripe_count == 0) {
rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
rbd_dev->header.stripe_count = 1;
}
rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
{
struct rbd_image_header *header = &rbd_dev->header;
bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
u64 *snap_sizes = NULL;
u32 snap_count;
int ret = -ENOMEM;
u32 i;
/* Allocate this now to avoid having to handle failure below */
if (first_time) {
object_prefix = kstrndup(ondisk->object_prefix,
sizeof(ondisk->object_prefix),
GFP_KERNEL);
if (!object_prefix)
return -ENOMEM;
}
/* Allocate the snapshot context and fill it in */
snap_count = le32_to_cpu(ondisk->snap_count);
snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc)
goto out_err;
snapc->seq = le64_to_cpu(ondisk->snap_seq);
if (snap_count) {
struct rbd_image_snap_ondisk *snaps;
u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
/* We'll keep a copy of the snapshot names... */
if (snap_names_len > (u64)SIZE_MAX)
goto out_2big;
snap_names = kmalloc(snap_names_len, GFP_KERNEL);
if (!snap_names)
goto out_err;
/* ...as well as the array of their sizes. */
snap_sizes = kmalloc_array(snap_count,
sizeof(*header->snap_sizes),
GFP_KERNEL);
if (!snap_sizes)
goto out_err;
/*
* Copy the names, and fill in each snapshot's id
* and size.
*
* Note that rbd_dev_v1_header_info() guarantees the
* ondisk buffer we're working with has
* snap_names_len bytes beyond the end of the
* snapshot id array, this memcpy() is safe.
*/
memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
snaps = ondisk->snaps;
for (i = 0; i < snap_count; i++) {
snapc->snaps[i] = le64_to_cpu(snaps[i].id);
snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
}
}
/* We won't fail any more, fill in the header */
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
rbd_init_layout(rbd_dev);
} else {
ceph_put_snap_context(header->snapc);
kfree(header->snap_names);
kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
header->image_size = le64_to_cpu(ondisk->image_size);
header->snapc = snapc;
header->snap_names = snap_names;
header->snap_sizes = snap_sizes;
return 0;
out_2big:
ret = -EIO;
out_err:
kfree(snap_sizes);
kfree(snap_names);
ceph_put_snap_context(snapc);
kfree(object_prefix);
return ret;
}
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
const char *snap_name;
rbd_assert(which < rbd_dev->header.snapc->num_snaps);
/* Skip over names until we find the one we are looking for */
snap_name = rbd_dev->header.snap_names;
while (which--)
snap_name += strlen(snap_name) + 1;
return kstrdup(snap_name, GFP_KERNEL);
}
/*
* Snapshot id comparison function for use with qsort()/bsearch().
* Note that result is for snapshots in *descending* order.
*/
static int snapid_compare_reverse(const void *s1, const void *s2)
{
u64 snap_id1 = *(u64 *)s1;
u64 snap_id2 = *(u64 *)s2;
if (snap_id1 < snap_id2)
return 1;
return snap_id1 == snap_id2 ? 0 : -1;
}
/*
* Search a snapshot context to see if the given snapshot id is
* present.
*
* Returns the position of the snapshot id in the array if it's found,
* or BAD_SNAP_INDEX otherwise.
*
* Note: The snapshot array is in kept sorted (by the osd) in
* reverse order, highest snapshot id first.
*/
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
struct ceph_snap_context *snapc = rbd_dev->header.snapc;
u64 *found;
found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
sizeof (snap_id), snapid_compare_reverse);
return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
}
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
u64 snap_id)
{
u32 which;
const char *snap_name;
which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX)
return ERR_PTR(-ENOENT);
snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
return snap_name ? snap_name : ERR_PTR(-ENOMEM);
}
static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
if (snap_id == CEPH_NOSNAP)
return RBD_SNAP_HEAD_NAME;
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (rbd_dev->image_format == 1)
return rbd_dev_v1_snap_name(rbd_dev, snap_id);
return rbd_dev_v2_snap_name(rbd_dev, snap_id);
}
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_size)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (snap_id == CEPH_NOSNAP) {
*snap_size = rbd_dev->header.image_size;
} else if (rbd_dev->image_format == 1) {
u32 which;
which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX)
return -ENOENT;
*snap_size = rbd_dev->header.snap_sizes[which];
} else {
u64 size = 0;
int ret;
ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
if (ret)
return ret;
*snap_size = size;
}
return 0;
}
static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
u64 snap_id = rbd_dev->spec->snap_id;
u64 size = 0;
int ret;
ret = rbd_snap_size(rbd_dev, snap_id, &size);
if (ret)
return ret;
rbd_dev->mapping.size = size;
return 0;
}
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
rbd_dev->mapping.size = 0;
}
static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
{
struct ceph_bio_iter it = *bio_pos;
ceph_bio_iter_advance(&it, off);
ceph_bio_iter_advance_step(&it, bytes, ({
memzero_bvec(&bv);
}));
}
static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
{
struct ceph_bvec_iter it = *bvec_pos;
ceph_bvec_iter_advance(&it, off);
ceph_bvec_iter_advance_step(&it, bytes, ({
memzero_bvec(&bv);
}));
}
/*
* Zero a range in @obj_req data buffer defined by a bio (list) or
* (private) bio_vec array.
*
* @off is relative to the start of the data buffer.
*/
static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
u32 bytes)
{
dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
switch (obj_req->img_request->data_type) {
case OBJ_REQUEST_BIO:
zero_bios(&obj_req->bio_pos, off, bytes);
break;
case OBJ_REQUEST_BVECS:
case OBJ_REQUEST_OWN_BVECS:
zero_bvecs(&obj_req->bvec_pos, off, bytes);
break;
default:
BUG();
}
}
static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
rbd_assert(obj_request != NULL);
dout("%s: obj %p (was %d)\n", __func__, obj_request,
kref_read(&obj_request->kref));
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request)
{
rbd_assert(obj_request->img_request == NULL);
/* Image request now owns object's original reference */
obj_request->img_request = img_request;
dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
}
static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request)
{
dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
list_del(&obj_request->ex.oe_item);
rbd_assert(obj_request->img_request == img_request);
rbd_obj_request_put(obj_request);
}
static void rbd_osd_submit(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
__func__, osd_req, obj_req, obj_req->ex.oe_objno,
obj_req->ex.oe_off, obj_req->ex.oe_len);
ceph_osdc_start_request(osd_req->r_osdc, osd_req);
}
/*
* The default/initial value for all image request flags is 0. Each
* is conditionally set to 1 at image request initialization time
* and currently never change thereafter.
*/
static void img_request_layered_set(struct rbd_img_request *img_request)
{
set_bit(IMG_REQ_LAYERED, &img_request->flags);
}
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}
static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
return !obj_req->ex.oe_off &&
obj_req->ex.oe_len == rbd_dev->layout.object_size;
}
static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
return obj_req->ex.oe_off + obj_req->ex.oe_len ==
rbd_dev->layout.object_size;
}
/*
* Must be called after rbd_obj_calc_img_extents().
*/
static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
{
rbd_assert(obj_req->img_request->snapc);
if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
dout("%s %p objno %llu discard\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
if (!obj_req->num_img_extents) {
dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
if (rbd_obj_is_entire(obj_req) &&
!obj_req->img_request->snapc->num_snaps) {
dout("%s %p objno %llu entire\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
}
static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
{
return ceph_file_extents_bytes(obj_req->img_extents,
obj_req->num_img_extents);
}
static bool rbd_img_is_write(struct rbd_img_request *img_req)
{
switch (img_req->op_type) {
case OBJ_OP_READ:
return false;
case OBJ_OP_WRITE:
case OBJ_OP_DISCARD:
case OBJ_OP_ZEROOUT:
return true;
default:
BUG();
}
}
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
int result;
dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
osd_req->r_result, obj_req);
/*
* Writes aren't allowed to return a data payload. In some
* guarded write cases (e.g. stat + zero on an empty object)
* a stat response makes it through, but we don't care.
*/
if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
result = 0;
else
result = osd_req->r_result;
rbd_obj_handle_request(obj_req, result);
}
static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
struct ceph_options *opt = rbd_dev->rbd_client->client->options;
osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
osd_req->r_snapid = obj_request->img_request->snap_id;
}
static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&osd_req->r_mtime);
osd_req->r_data_offset = obj_request->ex.oe_off;
}
static struct ceph_osd_request *
__rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
struct ceph_snap_context *snapc, int num_ops)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_request *req;
const char *name_format = rbd_dev->image_format == 1 ?
RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
int ret;
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
if (!req)
return ERR_PTR(-ENOMEM);
list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
req->r_callback = rbd_osd_req_callback;
req->r_priv = obj_req;
/*
* Data objects may be stored in a separate pool, but always in
* the same namespace in that pool as the header in its pool.
*/
ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
req->r_base_oloc.pool = rbd_dev->layout.pool_id;
ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
rbd_dev->header.object_prefix,
obj_req->ex.oe_objno);
if (ret)
return ERR_PTR(ret);
return req;
}
static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
{
rbd_assert(obj_req->img_request->snapc);
return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
num_ops);
}
static struct rbd_obj_request *rbd_obj_request_create(void)
{
struct rbd_obj_request *obj_request;
obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
if (!obj_request)
return NULL;
ceph_object_extent_init(&obj_request->ex);
INIT_LIST_HEAD(&obj_request->osd_reqs);
mutex_init(&obj_request->state_mutex);
kref_init(&obj_request->kref);
dout("%s %p\n", __func__, obj_request);
return obj_request;
}
static void rbd_obj_request_destroy(struct kref *kref)
{
struct rbd_obj_request *obj_request;
struct ceph_osd_request *osd_req;
u32 i;
obj_request = container_of(kref, struct rbd_obj_request, kref);
dout("%s: obj %p\n", __func__, obj_request);
while (!list_empty(&obj_request->osd_reqs)) {
osd_req = list_first_entry(&obj_request->osd_reqs,
struct ceph_osd_request, r_private_item);
list_del_init(&osd_req->r_private_item);
ceph_osdc_put_request(osd_req);
}
switch (obj_request->img_request->data_type) {
case OBJ_REQUEST_NODATA:
case OBJ_REQUEST_BIO:
case OBJ_REQUEST_BVECS:
break; /* Nothing to do */
case OBJ_REQUEST_OWN_BVECS:
kfree(obj_request->bvec_pos.bvecs);
break;
default:
BUG();
}
kfree(obj_request->img_extents);
if (obj_request->copyup_bvecs) {
for (i = 0; i < obj_request->copyup_bvec_count; i++) {
if (obj_request->copyup_bvecs[i].bv_page)
__free_page(obj_request->copyup_bvecs[i].bv_page);
}
kfree(obj_request->copyup_bvecs);
}
kmem_cache_free(rbd_obj_request_cache, obj_request);
}
/* It's OK to call this for a device with no parent */
static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
rbd_dev_remove_parent(rbd_dev);
rbd_spec_put(rbd_dev->parent_spec);
rbd_dev->parent_spec = NULL;
rbd_dev->parent_overlap = 0;
}
/*
* Parent image reference counting is used to determine when an
* image's parent fields can be safely torn down--after there are no
* more in-flight requests to the parent image. When the last
* reference is dropped, cleaning them up is safe.
*/
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
int counter;
if (!rbd_dev->parent_spec)
return;
counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
if (counter > 0)
return;
/* Last reference; clean up parent data structures */
if (!counter)
rbd_dev_unparent(rbd_dev);
else
rbd_warn(rbd_dev, "parent reference underflow");
}
/*
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
int counter = 0;
if (!rbd_dev->parent_spec)
return false;
if (rbd_dev->parent_overlap)
counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
return counter > 0;
}
static void rbd_img_request_init(struct rbd_img_request *img_request,
struct rbd_device *rbd_dev,
enum obj_operation_type op_type)
{
memset(img_request, 0, sizeof(*img_request));
img_request->rbd_dev = rbd_dev;
img_request->op_type = op_type;
INIT_LIST_HEAD(&img_request->lock_item);
INIT_LIST_HEAD(&img_request->object_extents);
mutex_init(&img_request->state_mutex);
}
/*
* Only snap_id is captured here, for reads. For writes, snapshot
* context is captured in rbd_img_object_requests() after exclusive
* lock is ensured to be held.
*/
static void rbd_img_capture_header(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
lockdep_assert_held(&rbd_dev->header_rwsem);
if (!rbd_img_is_write(img_req))
img_req->snap_id = rbd_dev->spec->snap_id;
if (rbd_dev_parent_get(rbd_dev))
img_request_layered_set(img_req);
}
static void rbd_img_request_destroy(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
dout("%s: img %p\n", __func__, img_request);
WARN_ON(!list_empty(&img_request->lock_item));
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
rbd_img_obj_request_del(img_request, obj_request);
if (img_request_layered_test(img_request))
rbd_dev_parent_put(img_request->rbd_dev);
if (rbd_img_is_write(img_request))
ceph_put_snap_context(img_request->snapc);
if (test_bit(IMG_REQ_CHILD, &img_request->flags))
kmem_cache_free(rbd_img_request_cache, img_request);
}
#define BITS_PER_OBJ 2
#define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
#define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
u64 *index, u8 *shift)
{
u32 off;
rbd_assert(objno < rbd_dev->object_map_size);
*index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
*shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
}
static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
{
u64 index;
u8 shift;
lockdep_assert_held(&rbd_dev->object_map_lock);
__rbd_object_map_index(rbd_dev, objno, &index, &shift);
return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
}
static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
{
u64 index;
u8 shift;
u8 *p;
lockdep_assert_held(&rbd_dev->object_map_lock);
rbd_assert(!(val & ~OBJ_MASK));
__rbd_object_map_index(rbd_dev, objno, &index, &shift);
p = &rbd_dev->object_map[index];
*p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
}
static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
{
u8 state;
spin_lock(&rbd_dev->object_map_lock);
state = __rbd_object_map_get(rbd_dev, objno);
spin_unlock(&rbd_dev->object_map_lock);
return state;
}
static bool use_object_map(struct rbd_device *rbd_dev)
{
/*
* An image mapped read-only can't use the object map -- it isn't
* loaded because the header lock isn't acquired. Someone else can
* write to the image and update the object map behind our back.
*
* A snapshot can't be written to, so using the object map is always
* safe.
*/
if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
return false;
return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
!(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
}
static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
{
u8 state;
/* fall back to default logic if object map is disabled or invalid */
if (!use_object_map(rbd_dev))
return true;
state = rbd_object_map_get(rbd_dev, objno);
return state != OBJECT_NONEXISTENT;
}
static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
struct ceph_object_id *oid)
{
if (snap_id == CEPH_NOSNAP)
ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
rbd_dev->spec->image_id);
else
ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
rbd_dev->spec->image_id, snap_id);
}
static int rbd_object_map_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
CEPH_DEFINE_OID_ONSTACK(oid);
u8 lock_type;
char *lock_tag;
struct ceph_locker *lockers;
u32 num_lockers;
bool broke_lock = false;
int ret;
rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
again:
ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
if (ret != -EBUSY || broke_lock) {
if (ret == -EEXIST)
ret = 0; /* already locked by myself */
if (ret)
rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
return ret;
}
ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, &lock_type, &lock_tag,
&lockers, &num_lockers);
if (ret) {
if (ret == -ENOENT)
goto again;
rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
return ret;
}
kfree(lock_tag);
if (num_lockers == 0)
goto again;
rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
ENTITY_NAME(lockers[0].id.name));
ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, lockers[0].id.cookie,
&lockers[0].id.name);
ceph_free_lockers(lockers, num_lockers);
if (ret) {
if (ret == -ENOENT)
goto again;
rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
return ret;
}
broke_lock = true;
goto again;
}
static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
CEPH_DEFINE_OID_ONSTACK(oid);
int ret;
rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
"");
if (ret && ret != -ENOENT)
rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
}
static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
{
u8 struct_v;
u32 struct_len;
u32 header_len;
void *header_end;
int ret;
ceph_decode_32_safe(p, end, header_len, e_inval);
header_end = *p + header_len;
ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
&struct_len);
if (ret)
return ret;
ceph_decode_64_safe(p, end, *object_map_size, e_inval);
*p = header_end;
return 0;
e_inval:
return -EINVAL;
}
static int __rbd_object_map_load(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
CEPH_DEFINE_OID_ONSTACK(oid);
struct page **pages;
void *p, *end;
size_t reply_len;
u64 num_objects;
u64 object_map_bytes;
u64 object_map_size;
int num_pages;
int ret;
rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
num_objects = ceph_get_num_objects(&rbd_dev->layout,
rbd_dev->mapping.size);
object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
BITS_PER_BYTE);
num_pages = calc_pages_for(0, object_map_bytes) + 1;
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
reply_len = num_pages * PAGE_SIZE;
rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
"rbd", "object_map_load", CEPH_OSD_FLAG_READ,
NULL, 0, pages, &reply_len);
if (ret)
goto out;
p = page_address(pages[0]);
end = p + min(reply_len, (size_t)PAGE_SIZE);
ret = decode_object_map_header(&p, end, &object_map_size);
if (ret)
goto out;
if (object_map_size != num_objects) {
rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
object_map_size, num_objects);
ret = -EINVAL;
goto out;
}
if (offset_in_page(p) + object_map_bytes > reply_len) {
ret = -EINVAL;
goto out;
}
rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
if (!rbd_dev->object_map) {
ret = -ENOMEM;
goto out;
}
rbd_dev->object_map_size = object_map_size;
ceph_copy_from_page_vector(pages, rbd_dev->object_map,
offset_in_page(p), object_map_bytes);
out:
ceph_release_page_vector(pages, num_pages);
return ret;
}
static void rbd_object_map_free(struct rbd_device *rbd_dev)
{
kvfree(rbd_dev->object_map);
rbd_dev->object_map = NULL;
rbd_dev->object_map_size = 0;
}
static int rbd_object_map_load(struct rbd_device *rbd_dev)
{
int ret;
ret = __rbd_object_map_load(rbd_dev);
if (ret)
return ret;
ret = rbd_dev_v2_get_flags(rbd_dev);
if (ret) {
rbd_object_map_free(rbd_dev);
return ret;
}
if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
rbd_warn(rbd_dev, "object map is invalid");
return 0;
}
static int rbd_object_map_open(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_object_map_lock(rbd_dev);
if (ret)
return ret;
ret = rbd_object_map_load(rbd_dev);
if (ret) {
rbd_object_map_unlock(rbd_dev);
return ret;
}
return 0;
}
static void rbd_object_map_close(struct rbd_device *rbd_dev)
{
rbd_object_map_free(rbd_dev);
rbd_object_map_unlock(rbd_dev);
}
/*
* This function needs snap_id (or more precisely just something to
* distinguish between HEAD and snapshot object maps), new_state and
* current_state that were passed to rbd_object_map_update().
*
* To avoid allocating and stashing a context we piggyback on the OSD
* request. A HEAD update has two ops (assert_locked). For new_state
* and current_state we decode our own object_map_update op, encoded in
* rbd_cls_object_map_update().
*/
static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct ceph_osd_request *osd_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data;
u64 objno;
u8 state, new_state, current_state;
bool has_current_state;
void *p;
if (osd_req->r_result)
return osd_req->r_result;
/*
* Nothing to do for a snapshot object map.
*/
if (osd_req->r_num_ops == 1)
return 0;
/*
* Update in-memory HEAD object map.
*/
rbd_assert(osd_req->r_num_ops == 2);
osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
p = page_address(osd_data->pages[0]);
objno = ceph_decode_64(&p);
rbd_assert(objno == obj_req->ex.oe_objno);
rbd_assert(ceph_decode_64(&p) == objno + 1);
new_state = ceph_decode_8(&p);
has_current_state = ceph_decode_8(&p);
if (has_current_state)
current_state = ceph_decode_8(&p);
spin_lock(&rbd_dev->object_map_lock);
state = __rbd_object_map_get(rbd_dev, objno);
if (!has_current_state || current_state == state ||
(current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
__rbd_object_map_set(rbd_dev, objno, new_state);
spin_unlock(&rbd_dev->object_map_lock);
return 0;
}
static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
int result;
dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
osd_req->r_result, obj_req);
result = rbd_object_map_update_finish(obj_req, osd_req);
rbd_obj_handle_request(obj_req, result);
}
static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
{
u8 state = rbd_object_map_get(rbd_dev, objno);
if (state == new_state ||
(new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
(new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
return false;
return true;
}
static int rbd_cls_object_map_update(struct ceph_osd_request *req,
int which, u64 objno, u8 new_state,
const u8 *current_state)
{
struct page **pages;
void *p, *start;
int ret;
ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
if (ret)
return ret;
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages))
return PTR_ERR(pages);
p = start = page_address(pages[0]);
ceph_encode_64(&p, objno);
ceph_encode_64(&p, objno + 1);
ceph_encode_8(&p, new_state);
if (current_state) {
ceph_encode_8(&p, 1);
ceph_encode_8(&p, *current_state);
} else {
ceph_encode_8(&p, 0);
}
osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
false, true);
return 0;
}
/*
* Return:
* 0 - object map update sent
* 1 - object map update isn't needed
* <0 - error
*/
static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
u8 new_state, const u8 *current_state)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_request *req;
int num_ops = 1;
int which = 0;
int ret;
if (snap_id == CEPH_NOSNAP) {
if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
return 1;
num_ops++; /* assert_locked */
}
req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
if (!req)
return -ENOMEM;
list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
req->r_callback = rbd_object_map_callback;
req->r_priv = obj_req;
rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
req->r_flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&req->r_mtime);
if (snap_id == CEPH_NOSNAP) {
/*
* Protect against possible race conditions during lock
* ownership transitions.
*/
ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
CEPH_CLS_LOCK_EXCLUSIVE, "", "");
if (ret)
return ret;
}
ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
new_state, current_state);
if (ret)
return ret;
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
return ret;
ceph_osdc_start_request(osdc, req);
return 0;
}
static void prune_extents(struct ceph_file_extent *img_extents,
u32 *num_img_extents, u64 overlap)
{
u32 cnt = *num_img_extents;
/* drop extents completely beyond the overlap */
while (cnt && img_extents[cnt - 1].fe_off >= overlap)
cnt--;
if (cnt) {
struct ceph_file_extent *ex = &img_extents[cnt - 1];
/* trim final overlapping extent */
if (ex->fe_off + ex->fe_len > overlap)
ex->fe_len = overlap - ex->fe_off;
}
*num_img_extents = cnt;
}
/*
* Determine the byte range(s) covered by either just the object extent
* or the entire object in the parent image.
*/
static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
bool entire)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
int ret;
if (!rbd_dev->parent_overlap)
return 0;
ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
entire ? 0 : obj_req->ex.oe_off,
entire ? rbd_dev->layout.object_size :
obj_req->ex.oe_len,
&obj_req->img_extents,
&obj_req->num_img_extents);
if (ret)
return ret;
prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
rbd_dev->parent_overlap);
return 0;
}
static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
switch (obj_req->img_request->data_type) {
case OBJ_REQUEST_BIO:
osd_req_op_extent_osd_data_bio(osd_req, which,
&obj_req->bio_pos,
obj_req->ex.oe_len);
break;
case OBJ_REQUEST_BVECS:
case OBJ_REQUEST_OWN_BVECS:
rbd_assert(obj_req->bvec_pos.iter.bi_size ==
obj_req->ex.oe_len);
rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
&obj_req->bvec_pos);
break;
default:
BUG();
}
}
static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
{
struct page **pages;
/*
* The response data for a STAT call consists of:
* le64 length;
* struct {
* le32 tv_sec;
* le32 tv_nsec;
* } mtime;
*/
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages))
return PTR_ERR(pages);
osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
osd_req_op_raw_data_in_pages(osd_req, which, pages,
8 + sizeof(struct ceph_timespec),
0, false, true);
return 0;
}
static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
u32 bytes)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
int ret;
ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
if (ret)
return ret;
osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
obj_req->copyup_bvec_count, bytes);
return 0;
}
static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
{
obj_req->read_state = RBD_OBJ_READ_START;
return 0;
}
static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
int which)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
u16 opcode;
if (!use_object_map(rbd_dev) ||
!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
osd_req_op_alloc_hint_init(osd_req, which++,
rbd_dev->layout.object_size,
rbd_dev->layout.object_size,
rbd_dev->opts->alloc_hint_flags);
}
if (rbd_obj_is_entire(obj_req))
opcode = CEPH_OSD_OP_WRITEFULL;
else
opcode = CEPH_OSD_OP_WRITE;
osd_req_op_extent_init(osd_req, which, opcode,
obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
rbd_osd_setup_data(osd_req, which);
}
static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
{
int ret;
/* reverse map the entire object onto the parent */
ret = rbd_obj_calc_img_extents(obj_req, true);
if (ret)
return ret;
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
{
return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
CEPH_OSD_OP_ZERO;
}
static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
int which)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
} else {
osd_req_op_extent_init(osd_req, which,
truncate_or_zero_opcode(obj_req),
obj_req->ex.oe_off, obj_req->ex.oe_len,
0, 0);
}
}
static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
u64 off, next_off;
int ret;
/*
* Align the range to alloc_size boundary and punt on discards
* that are too small to free up any space.
*
* alloc_size == object_size && is_tail() is a special case for
* filestore with filestore_punch_hole = false, needed to allow
* truncate (in addition to delete).
*/
if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
!rbd_obj_is_tail(obj_req)) {
off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
rbd_dev->opts->alloc_size);
if (off >= next_off)
return 1;
dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
off, next_off - off);
obj_req->ex.oe_off = off;
obj_req->ex.oe_len = next_off - off;
}
/* reverse map the entire object onto the parent */
ret = rbd_obj_calc_img_extents(obj_req, true);
if (ret)
return ret;
obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
obj_req->flags |= RBD_OBJ_FLAG_DELETION;
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
int which)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
u16 opcode;
if (rbd_obj_is_entire(obj_req)) {
if (obj_req->num_img_extents) {
if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
osd_req_op_init(osd_req, which++,
CEPH_OSD_OP_CREATE, 0);
opcode = CEPH_OSD_OP_TRUNCATE;
} else {
rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
osd_req_op_init(osd_req, which++,
CEPH_OSD_OP_DELETE, 0);
opcode = 0;
}
} else {
opcode = truncate_or_zero_opcode(obj_req);
}
if (opcode)
osd_req_op_extent_init(osd_req, which, opcode,
obj_req->ex.oe_off, obj_req->ex.oe_len,
0, 0);
}
static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
{
int ret;
/* reverse map the entire object onto the parent */
ret = rbd_obj_calc_img_extents(obj_req, true);
if (ret)
return ret;
if (!obj_req->num_img_extents) {
obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
if (rbd_obj_is_entire(obj_req))
obj_req->flags |= RBD_OBJ_FLAG_DELETION;
}
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
static int count_write_ops(struct rbd_obj_request *obj_req)
{
struct rbd_img_request *img_req = obj_req->img_request;
switch (img_req->op_type) {
case OBJ_OP_WRITE:
if (!use_object_map(img_req->rbd_dev) ||
!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
return 2; /* setallochint + write/writefull */
return 1; /* write/writefull */
case OBJ_OP_DISCARD:
return 1; /* delete/truncate/zero */
case OBJ_OP_ZEROOUT:
if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
return 2; /* create + truncate */
return 1; /* delete/truncate/zero */
default:
BUG();
}
}
static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
int which)
{
struct rbd_obj_request *obj_req = osd_req->r_priv;
switch (obj_req->img_request->op_type) {
case OBJ_OP_WRITE:
__rbd_osd_setup_write_ops(osd_req, which);
break;
case OBJ_OP_DISCARD:
__rbd_osd_setup_discard_ops(osd_req, which);
break;
case OBJ_OP_ZEROOUT:
__rbd_osd_setup_zeroout_ops(osd_req, which);
break;
default:
BUG();
}
}
/*
* Prune the list of object requests (adjust offset and/or length, drop
* redundant requests). Prepare object request state machines and image
* request state machine for execution.
*/
static int __rbd_img_fill_request(struct rbd_img_request *img_req)
{
struct rbd_obj_request *obj_req, *next_obj_req;
int ret;
for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
switch (img_req->op_type) {
case OBJ_OP_READ:
ret = rbd_obj_init_read(obj_req);
break;
case OBJ_OP_WRITE:
ret = rbd_obj_init_write(obj_req);
break;
case OBJ_OP_DISCARD:
ret = rbd_obj_init_discard(obj_req);
break;
case OBJ_OP_ZEROOUT:
ret = rbd_obj_init_zeroout(obj_req);
break;
default:
BUG();
}
if (ret < 0)
return ret;
if (ret > 0) {
rbd_img_obj_request_del(img_req, obj_req);
continue;
}
}
img_req->state = RBD_IMG_START;
return 0;
}
union rbd_img_fill_iter {
struct ceph_bio_iter bio_iter;
struct ceph_bvec_iter bvec_iter;
};
struct rbd_img_fill_ctx {
enum obj_request_type pos_type;
union rbd_img_fill_iter *pos;
union rbd_img_fill_iter iter;
ceph_object_extent_fn_t set_pos_fn;
ceph_object_extent_fn_t count_fn;
ceph_object_extent_fn_t copy_fn;
};
static struct ceph_object_extent *alloc_object_extent(void *arg)
{
struct rbd_img_request *img_req = arg;
struct rbd_obj_request *obj_req;
obj_req = rbd_obj_request_create();
if (!obj_req)
return NULL;
rbd_img_obj_request_add(img_req, obj_req);
return &obj_req->ex;
}
/*
* While su != os && sc == 1 is technically not fancy (it's the same
* layout as su == os && sc == 1), we can't use the nocopy path for it
* because ->set_pos_fn() should be called only once per object.
* ceph_file_to_extents() invokes action_fn once per stripe unit, so
* treat su != os && sc == 1 as fancy.
*/
static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
{
return l->stripe_unit != l->object_size;
}
static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
struct ceph_file_extent *img_extents,
u32 num_img_extents,
struct rbd_img_fill_ctx *fctx)
{
u32 i;
int ret;
img_req->data_type = fctx->pos_type;
/*
* Create object requests and set each object request's starting
* position in the provided bio (list) or bio_vec array.
*/
fctx->iter = *fctx->pos;
for (i = 0; i < num_img_extents; i++) {
ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
img_extents[i].fe_off,
img_extents[i].fe_len,
&img_req->object_extents,
alloc_object_extent, img_req,
fctx->set_pos_fn, &fctx->iter);
if (ret)
return ret;
}
return __rbd_img_fill_request(img_req);
}
/*
* Map a list of image extents to a list of object extents, create the
* corresponding object requests (normally each to a different object,
* but not always) and add them to @img_req. For each object request,
* set up its data descriptor to point to the corresponding chunk(s) of
* @fctx->pos data buffer.
*
* Because ceph_file_to_extents() will merge adjacent object extents
* together, each object request's data descriptor may point to multiple
* different chunks of @fctx->pos data buffer.
*
* @fctx->pos data buffer is assumed to be large enough.
*/
static int rbd_img_fill_request(struct rbd_img_request *img_req,
struct ceph_file_extent *img_extents,
u32 num_img_extents,
struct rbd_img_fill_ctx *fctx)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
struct rbd_obj_request *obj_req;
u32 i;
int ret;
if (fctx->pos_type == OBJ_REQUEST_NODATA ||
!rbd_layout_is_fancy(&rbd_dev->layout))
return rbd_img_fill_request_nocopy(img_req, img_extents,
num_img_extents, fctx);
img_req->data_type = OBJ_REQUEST_OWN_BVECS;
/*
* Create object requests and determine ->bvec_count for each object
* request. Note that ->bvec_count sum over all object requests may
* be greater than the number of bio_vecs in the provided bio (list)
* or bio_vec array because when mapped, those bio_vecs can straddle
* stripe unit boundaries.
*/
fctx->iter = *fctx->pos;
for (i = 0; i < num_img_extents; i++) {
ret = ceph_file_to_extents(&rbd_dev->layout,
img_extents[i].fe_off,
img_extents[i].fe_len,
&img_req->object_extents,
alloc_object_extent, img_req,
fctx->count_fn, &fctx->iter);
if (ret)
return ret;
}
for_each_obj_request(img_req, obj_req) {
obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
sizeof(*obj_req->bvec_pos.bvecs),
GFP_NOIO);
if (!obj_req->bvec_pos.bvecs)
return -ENOMEM;
}
/*
* Fill in each object request's private bio_vec array, splitting and
* rearranging the provided bio_vecs in stripe unit chunks as needed.
*/
fctx->iter = *fctx->pos;
for (i = 0; i < num_img_extents; i++) {
ret = ceph_iterate_extents(&rbd_dev->layout,
img_extents[i].fe_off,
img_extents[i].fe_len,
&img_req->object_extents,
fctx->copy_fn, &fctx->iter);
if (ret)
return ret;
}
return __rbd_img_fill_request(img_req);
}
static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
u64 off, u64 len)
{
struct ceph_file_extent ex = { off, len };
union rbd_img_fill_iter dummy = {};
struct rbd_img_fill_ctx fctx = {
.pos_type = OBJ_REQUEST_NODATA,
.pos = &dummy,
};
return rbd_img_fill_request(img_req, &ex, 1, &fctx);
}
static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
{
struct rbd_obj_request *obj_req =
container_of(ex, struct rbd_obj_request, ex);
struct ceph_bio_iter *it = arg;
dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
obj_req->bio_pos = *it;
ceph_bio_iter_advance(it, bytes);
}
static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
{
struct rbd_obj_request *obj_req =
container_of(ex, struct rbd_obj_request, ex);
struct ceph_bio_iter *it = arg;
dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
ceph_bio_iter_advance_step(it, bytes, ({
obj_req->bvec_count++;
}));
}
static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
{
struct rbd_obj_request *obj_req =
container_of(ex, struct rbd_obj_request, ex);
struct ceph_bio_iter *it = arg;
dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
ceph_bio_iter_advance_step(it, bytes, ({
obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
obj_req->bvec_pos.iter.bi_size += bv.bv_len;
}));
}
static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
struct ceph_file_extent *img_extents,
u32 num_img_extents,
struct ceph_bio_iter *bio_pos)
{
struct rbd_img_fill_ctx fctx = {
.pos_type = OBJ_REQUEST_BIO,
.pos = (union rbd_img_fill_iter *)bio_pos,
.set_pos_fn = set_bio_pos,
.count_fn = count_bio_bvecs,
.copy_fn = copy_bio_bvecs,
};
return rbd_img_fill_request(img_req, img_extents, num_img_extents,
&fctx);
}
static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
u64 off, u64 len, struct bio *bio)
{
struct ceph_file_extent ex = { off, len };
struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
}
static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
{
struct rbd_obj_request *obj_req =
container_of(ex, struct rbd_obj_request, ex);
struct ceph_bvec_iter *it = arg;
obj_req->bvec_pos = *it;
ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
ceph_bvec_iter_advance(it, bytes);
}
static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
{
struct rbd_obj_request *obj_req =
container_of(ex, struct rbd_obj_request, ex);
struct ceph_bvec_iter *it = arg;
ceph_bvec_iter_advance_step(it, bytes, ({
obj_req->bvec_count++;
}));
}
static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
{
struct rbd_obj_request *obj_req =
container_of(ex, struct rbd_obj_request, ex);
struct ceph_bvec_iter *it = arg;
ceph_bvec_iter_advance_step(it, bytes, ({
obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
obj_req->bvec_pos.iter.bi_size += bv.bv_len;
}));
}
static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
struct ceph_file_extent *img_extents,
u32 num_img_extents,
struct ceph_bvec_iter *bvec_pos)
{
struct rbd_img_fill_ctx fctx = {
.pos_type = OBJ_REQUEST_BVECS,
.pos = (union rbd_img_fill_iter *)bvec_pos,
.set_pos_fn = set_bvec_pos,
.count_fn = count_bvecs,
.copy_fn = copy_bvecs,
};
return rbd_img_fill_request(img_req, img_extents, num_img_extents,
&fctx);
}
static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
struct ceph_file_extent *img_extents,
u32 num_img_extents,
struct bio_vec *bvecs)
{
struct ceph_bvec_iter it = {
.bvecs = bvecs,
.iter = { .bi_size = ceph_file_extents_bytes(img_extents,
num_img_extents) },
};
return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
&it);
}
static void rbd_img_handle_request_work(struct work_struct *work)
{
struct rbd_img_request *img_req =
container_of(work, struct rbd_img_request, work);
rbd_img_handle_request(img_req, img_req->work_result);
}
static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
{
INIT_WORK(&img_req->work, rbd_img_handle_request_work);
img_req->work_result = result;
queue_work(rbd_wq, &img_req->work);
}
static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
return true;
}
dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
obj_req->ex.oe_objno);
return false;
}
static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
{
struct ceph_osd_request *osd_req;
int ret;
osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
if (IS_ERR(osd_req))
return PTR_ERR(osd_req);
osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
rbd_osd_setup_data(osd_req, 0);
rbd_osd_format_read(osd_req);
ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
if (ret)
return ret;
rbd_osd_submit(osd_req);
return 0;
}
static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
{
struct rbd_img_request *img_req = obj_req->img_request;
struct rbd_device *parent = img_req->rbd_dev->parent;
struct rbd_img_request *child_img_req;
int ret;
child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
if (!child_img_req)
return -ENOMEM;
rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
__set_bit(IMG_REQ_CHILD, &child_img_req->flags);
child_img_req->obj_request = obj_req;
down_read(&parent->header_rwsem);
rbd_img_capture_header(child_img_req);
up_read(&parent->header_rwsem);
dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
obj_req);
if (!rbd_img_is_write(img_req)) {
switch (img_req->data_type) {
case OBJ_REQUEST_BIO:
ret = __rbd_img_fill_from_bio(child_img_req,
obj_req->img_extents,
obj_req->num_img_extents,
&obj_req->bio_pos);
break;
case OBJ_REQUEST_BVECS:
case OBJ_REQUEST_OWN_BVECS:
ret = __rbd_img_fill_from_bvecs(child_img_req,
obj_req->img_extents,
obj_req->num_img_extents,
&obj_req->bvec_pos);
break;
default:
BUG();
}
} else {
ret = rbd_img_fill_from_bvecs(child_img_req,
obj_req->img_extents,
obj_req->num_img_extents,
obj_req->copyup_bvecs);
}
if (ret) {
rbd_img_request_destroy(child_img_req);
return ret;
}
/* avoid parent chain recursion */
rbd_img_schedule(child_img_req, 0);
return 0;
}
static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
int ret;
again:
switch (obj_req->read_state) {
case RBD_OBJ_READ_START:
rbd_assert(!*result);
if (!rbd_obj_may_exist(obj_req)) {
*result = -ENOENT;
obj_req->read_state = RBD_OBJ_READ_OBJECT;
goto again;
}
ret = rbd_obj_read_object(obj_req);
if (ret) {
*result = ret;
return true;
}
obj_req->read_state = RBD_OBJ_READ_OBJECT;
return false;
case RBD_OBJ_READ_OBJECT:
if (*result == -ENOENT && rbd_dev->parent_overlap) {
/* reverse map this object extent onto the parent */
ret = rbd_obj_calc_img_extents(obj_req, false);
if (ret) {
*result = ret;
return true;
}
if (obj_req->num_img_extents) {
ret = rbd_obj_read_from_parent(obj_req);
if (ret) {
*result = ret;
return true;
}
obj_req->read_state = RBD_OBJ_READ_PARENT;
return false;
}
}
/*
* -ENOENT means a hole in the image -- zero-fill the entire
* length of the request. A short read also implies zero-fill
* to the end of the request.
*/
if (*result == -ENOENT) {
rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
*result = 0;
} else if (*result >= 0) {
if (*result < obj_req->ex.oe_len)
rbd_obj_zero_range(obj_req, *result,
obj_req->ex.oe_len - *result);
else
rbd_assert(*result == obj_req->ex.oe_len);
*result = 0;
}
return true;
case RBD_OBJ_READ_PARENT:
/*
* The parent image is read only up to the overlap -- zero-fill
* from the overlap to the end of the request.
*/
if (!*result) {
u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
if (obj_overlap < obj_req->ex.oe_len)
rbd_obj_zero_range(obj_req, obj_overlap,
obj_req->ex.oe_len - obj_overlap);
}
return true;
default:
BUG();
}
}
static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
(obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
dout("%s %p noop for nonexistent\n", __func__, obj_req);
return true;
}
return false;
}
/*
* Return:
* 0 - object map update sent
* 1 - object map update isn't needed
* <0 - error
*/
static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
u8 new_state;
if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
return 1;
if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
new_state = OBJECT_PENDING;
else
new_state = OBJECT_EXISTS;
return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
}
static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
{
struct ceph_osd_request *osd_req;
int num_ops = count_write_ops(obj_req);
int which = 0;
int ret;
if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
num_ops++; /* stat */
osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
if (IS_ERR(osd_req))
return PTR_ERR(osd_req);
if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
ret = rbd_osd_setup_stat(osd_req, which++);
if (ret)
return ret;
}
rbd_osd_setup_write_ops(osd_req, which);
rbd_osd_format_write(osd_req);
ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
if (ret)
return ret;
rbd_osd_submit(osd_req);
return 0;
}
/*
* copyup_bvecs pages are never highmem pages
*/
static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
{
struct ceph_bvec_iter it = {
.bvecs = bvecs,
.iter = { .bi_size = bytes },
};
ceph_bvec_iter_advance_step(&it, bytes, ({
if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
return false;
}));
return true;
}
#define MODS_ONLY U32_MAX
static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
u32 bytes)
{
struct ceph_osd_request *osd_req;
int ret;
dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
rbd_assert(bytes > 0 && bytes != MODS_ONLY);
osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
if (IS_ERR(osd_req))
return PTR_ERR(osd_req);
ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
if (ret)
return ret;
rbd_osd_format_write(osd_req);
ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
if (ret)
return ret;
rbd_osd_submit(osd_req);
return 0;
}
static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
u32 bytes)
{
struct ceph_osd_request *osd_req;
int num_ops = count_write_ops(obj_req);
int which = 0;
int ret;
dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
if (bytes != MODS_ONLY)
num_ops++; /* copyup */
osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
if (IS_ERR(osd_req))
return PTR_ERR(osd_req);
if (bytes != MODS_ONLY) {
ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
if (ret)
return ret;
}
rbd_osd_setup_write_ops(osd_req, which);
rbd_osd_format_write(osd_req);
ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
if (ret)
return ret;
rbd_osd_submit(osd_req);
return 0;
}
static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
{
u32 i;
rbd_assert(!obj_req->copyup_bvecs);
obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
sizeof(*obj_req->copyup_bvecs),
GFP_NOIO);
if (!obj_req->copyup_bvecs)
return -ENOMEM;
for (i = 0; i < obj_req->copyup_bvec_count; i++) {
unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
struct page *page = alloc_page(GFP_NOIO);
if (!page)
return -ENOMEM;
bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
obj_overlap -= len;
}
rbd_assert(!obj_overlap);
return 0;
}
/*
* The target object doesn't exist. Read the data for the entire
* target object up to the overlap point (if any) from the parent,
* so we can use it for a copyup.
*/
static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
int ret;
rbd_assert(obj_req->num_img_extents);
prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
rbd_dev->parent_overlap);
if (!obj_req->num_img_extents) {
/*
* The overlap has become 0 (most likely because the
* image has been flattened). Re-submit the original write
* request -- pass MODS_ONLY since the copyup isn't needed
* anymore.
*/
return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
}
ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
if (ret)
return ret;
return rbd_obj_read_from_parent(obj_req);
}
static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_snap_context *snapc = obj_req->img_request->snapc;
u8 new_state;
u32 i;
int ret;
rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
return;
if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
return;
for (i = 0; i < snapc->num_snaps; i++) {
if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
i + 1 < snapc->num_snaps)
new_state = OBJECT_EXISTS_CLEAN;
else
new_state = OBJECT_EXISTS;
ret = rbd_object_map_update(obj_req, snapc->snaps[i],
new_state, NULL);
if (ret < 0) {
obj_req->pending.result = ret;
return;
}
rbd_assert(!ret);
obj_req->pending.num_pending++;
}
}
static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
{
u32 bytes = rbd_obj_img_extents_bytes(obj_req);
int ret;
rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
/*
* Only send non-zero copyup data to save some I/O and network
* bandwidth -- zero copyup data is equivalent to the object not
* existing.
*/
if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
bytes = 0;
if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
/*
* Send a copyup request with an empty snapshot context to
* deep-copyup the object through all existing snapshots.
* A second request with the current snapshot context will be
* sent for the actual modification.
*/
ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
if (ret) {
obj_req->pending.result = ret;
return;
}
obj_req->pending.num_pending++;
bytes = MODS_ONLY;
}
ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
if (ret) {
obj_req->pending.result = ret;
return;
}
obj_req->pending.num_pending++;
}
static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
int ret;
again:
switch (obj_req->copyup_state) {
case RBD_OBJ_COPYUP_START:
rbd_assert(!*result);
ret = rbd_obj_copyup_read_parent(obj_req);
if (ret) {
*result = ret;
return true;
}
if (obj_req->num_img_extents)
obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
else
obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
return false;
case RBD_OBJ_COPYUP_READ_PARENT:
if (*result)
return true;
if (is_zero_bvecs(obj_req->copyup_bvecs,
rbd_obj_img_extents_bytes(obj_req))) {
dout("%s %p detected zeros\n", __func__, obj_req);
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
}
rbd_obj_copyup_object_maps(obj_req);
if (!obj_req->pending.num_pending) {
*result = obj_req->pending.result;
obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
goto again;
}
obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
return false;
case __RBD_OBJ_COPYUP_OBJECT_MAPS:
if (!pending_result_dec(&obj_req->pending, result))
return false;
fallthrough;
case RBD_OBJ_COPYUP_OBJECT_MAPS:
if (*result) {
rbd_warn(rbd_dev, "snap object map update failed: %d",
*result);
return true;
}
rbd_obj_copyup_write_object(obj_req);
if (!obj_req->pending.num_pending) {
*result = obj_req->pending.result;
obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
goto again;
}
obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
return false;
case __RBD_OBJ_COPYUP_WRITE_OBJECT:
if (!pending_result_dec(&obj_req->pending, result))
return false;
fallthrough;
case RBD_OBJ_COPYUP_WRITE_OBJECT:
return true;
default:
BUG();
}
}
/*
* Return:
* 0 - object map update sent
* 1 - object map update isn't needed
* <0 - error
*/
static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
u8 current_state = OBJECT_PENDING;
if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
return 1;
if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
return 1;
return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
¤t_state);
}
static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
{
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
int ret;
again:
switch (obj_req->write_state) {
case RBD_OBJ_WRITE_START:
rbd_assert(!*result);
rbd_obj_set_copyup_enabled(obj_req);
if (rbd_obj_write_is_noop(obj_req))
return true;
ret = rbd_obj_write_pre_object_map(obj_req);
if (ret < 0) {
*result = ret;
return true;
}
obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
if (ret > 0)
goto again;
return false;
case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
if (*result) {
rbd_warn(rbd_dev, "pre object map update failed: %d",
*result);
return true;
}
ret = rbd_obj_write_object(obj_req);
if (ret) {
*result = ret;
return true;
}
obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
return false;
case RBD_OBJ_WRITE_OBJECT:
if (*result == -ENOENT) {
if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
*result = 0;
obj_req->copyup_state = RBD_OBJ_COPYUP_START;
obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
goto again;
}
/*
* On a non-existent object:
* delete - -ENOENT, truncate/zero - 0
*/
if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
*result = 0;
}
if (*result)
return true;
obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
goto again;
case __RBD_OBJ_WRITE_COPYUP:
if (!rbd_obj_advance_copyup(obj_req, result))
return false;
fallthrough;
case RBD_OBJ_WRITE_COPYUP:
if (*result) {
rbd_warn(rbd_dev, "copyup failed: %d", *result);
return true;
}
ret = rbd_obj_write_post_object_map(obj_req);
if (ret < 0) {
*result = ret;
return true;
}
obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
if (ret > 0)
goto again;
return false;
case RBD_OBJ_WRITE_POST_OBJECT_MAP:
if (*result)
rbd_warn(rbd_dev, "post object map update failed: %d",
*result);
return true;
default:
BUG();
}
}
/*
* Return true if @obj_req is completed.
*/
static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
int *result)
{
struct rbd_img_request *img_req = obj_req->img_request;
struct rbd_device *rbd_dev = img_req->rbd_dev;
bool done;
mutex_lock(&obj_req->state_mutex);
if (!rbd_img_is_write(img_req))
done = rbd_obj_advance_read(obj_req, result);
else
done = rbd_obj_advance_write(obj_req, result);
mutex_unlock(&obj_req->state_mutex);
if (done && *result) {
rbd_assert(*result < 0);
rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
}
return done;
}
/*
* This is open-coded in rbd_img_handle_request() to avoid parent chain
* recursion.
*/
static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
{
if (__rbd_obj_handle_request(obj_req, &result))
rbd_img_handle_request(obj_req->img_request, result);
}
static bool need_exclusive_lock(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
return false;
if (rbd_is_ro(rbd_dev))
return false;
rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
if (rbd_dev->opts->lock_on_read ||
(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
return true;
return rbd_img_is_write(img_req);
}
static bool rbd_lock_add_request(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
bool locked;
lockdep_assert_held(&rbd_dev->lock_rwsem);
locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
spin_lock(&rbd_dev->lock_lists_lock);
rbd_assert(list_empty(&img_req->lock_item));
if (!locked)
list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
else
list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
spin_unlock(&rbd_dev->lock_lists_lock);
return locked;
}
static void rbd_lock_del_request(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
bool need_wakeup;
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
rbd_assert(!list_empty(&img_req->lock_item));
list_del_init(&img_req->lock_item);
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
list_empty(&rbd_dev->running_list));
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
complete(&rbd_dev->releasing_wait);
}
static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
if (!need_exclusive_lock(img_req))
return 1;
if (rbd_lock_add_request(img_req))
return 1;
if (rbd_dev->opts->exclusive) {
WARN_ON(1); /* lock got released? */
return -EROFS;
}
/*
* Note the use of mod_delayed_work() in rbd_acquire_lock()
* and cancel_delayed_work() in wake_lock_waiters().
*/
dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
return 0;
}
static void rbd_img_object_requests(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
struct rbd_obj_request *obj_req;
rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
rbd_assert(!need_exclusive_lock(img_req) ||
__rbd_is_lock_owner(rbd_dev));
if (rbd_img_is_write(img_req)) {
rbd_assert(!img_req->snapc);
down_read(&rbd_dev->header_rwsem);
img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
up_read(&rbd_dev->header_rwsem);
}
for_each_obj_request(img_req, obj_req) {
int result = 0;
if (__rbd_obj_handle_request(obj_req, &result)) {
if (result) {
img_req->pending.result = result;
return;
}
} else {
img_req->pending.num_pending++;
}
}
}
static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
{
int ret;
again:
switch (img_req->state) {
case RBD_IMG_START:
rbd_assert(!*result);
ret = rbd_img_exclusive_lock(img_req);
if (ret < 0) {
*result = ret;
return true;
}
img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
if (ret > 0)
goto again;
return false;
case RBD_IMG_EXCLUSIVE_LOCK:
if (*result)
return true;
rbd_img_object_requests(img_req);
if (!img_req->pending.num_pending) {
*result = img_req->pending.result;
img_req->state = RBD_IMG_OBJECT_REQUESTS;
goto again;
}
img_req->state = __RBD_IMG_OBJECT_REQUESTS;
return false;
case __RBD_IMG_OBJECT_REQUESTS:
if (!pending_result_dec(&img_req->pending, result))
return false;
fallthrough;
case RBD_IMG_OBJECT_REQUESTS:
return true;
default:
BUG();
}
}
/*
* Return true if @img_req is completed.
*/
static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
int *result)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
bool done;
if (need_exclusive_lock(img_req)) {
down_read(&rbd_dev->lock_rwsem);
mutex_lock(&img_req->state_mutex);
done = rbd_img_advance(img_req, result);
if (done)
rbd_lock_del_request(img_req);
mutex_unlock(&img_req->state_mutex);
up_read(&rbd_dev->lock_rwsem);
} else {
mutex_lock(&img_req->state_mutex);
done = rbd_img_advance(img_req, result);
mutex_unlock(&img_req->state_mutex);
}
if (done && *result) {
rbd_assert(*result < 0);
rbd_warn(rbd_dev, "%s%s result %d",
test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
obj_op_name(img_req->op_type), *result);
}
return done;
}
static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
{
again:
if (!__rbd_img_handle_request(img_req, &result))
return;
if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
struct rbd_obj_request *obj_req = img_req->obj_request;
rbd_img_request_destroy(img_req);
if (__rbd_obj_handle_request(obj_req, &result)) {
img_req = obj_req->img_request;
goto again;
}
} else {
struct request *rq = blk_mq_rq_from_pdu(img_req);
rbd_img_request_destroy(img_req);
blk_mq_end_request(rq, errno_to_blk_status(result));
}
}
static const struct rbd_client_id rbd_empty_cid;
static bool rbd_cid_equal(const struct rbd_client_id *lhs,
const struct rbd_client_id *rhs)
{
return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
}
static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
{
struct rbd_client_id cid;
mutex_lock(&rbd_dev->watch_mutex);
cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
cid.handle = rbd_dev->watch_cookie;
mutex_unlock(&rbd_dev->watch_mutex);
return cid;
}
/*
* lock_rwsem must be held for write
*/
static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
const struct rbd_client_id *cid)
{
dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
cid->gid, cid->handle);
rbd_dev->owner_cid = *cid; /* struct */
}
static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
{
mutex_lock(&rbd_dev->watch_mutex);
sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
mutex_unlock(&rbd_dev->watch_mutex);
}
static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
{
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
strcpy(rbd_dev->lock_cookie, cookie);
rbd_set_owner_cid(rbd_dev, &cid);
queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
}
/*
* lock_rwsem must be held for write
*/
static int rbd_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
char cookie[32];
int ret;
WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
rbd_dev->lock_cookie[0] != '\0');
format_lock_cookie(rbd_dev, cookie);
ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
RBD_LOCK_TAG, "", 0);
if (ret && ret != -EEXIST)
return ret;
__rbd_lock(rbd_dev, cookie);
return 0;
}
/*
* lock_rwsem must be held for write
*/
static void rbd_unlock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
rbd_dev->lock_cookie[0] == '\0');
ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, rbd_dev->lock_cookie);
if (ret && ret != -ENOENT)
rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
/* treat errors as the image is unlocked */
rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
rbd_dev->lock_cookie[0] = '\0';
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
}
static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
enum rbd_notify_op notify_op,
struct page ***preply_pages,
size_t *preply_len)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
int buf_size = sizeof(buf);
void *p = buf;
dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
/* encode *LockPayload NotifyMessage (op + ClientId) */
ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_32(&p, notify_op);
ceph_encode_64(&p, cid.gid);
ceph_encode_64(&p, cid.handle);
return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, buf, buf_size,
RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
}
static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
enum rbd_notify_op notify_op)
{
__rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
}
static void rbd_notify_acquired_lock(struct work_struct *work)
{
struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
acquired_lock_work);
rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
}
static void rbd_notify_released_lock(struct work_struct *work)
{
struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
released_lock_work);
rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
}
static int rbd_request_lock(struct rbd_device *rbd_dev)
{
struct page **reply_pages;
size_t reply_len;
bool lock_owner_responded = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
&reply_pages, &reply_len);
if (ret && ret != -ETIMEDOUT) {
rbd_warn(rbd_dev, "failed to request lock: %d", ret);
goto out;
}
if (reply_len > 0 && reply_len <= PAGE_SIZE) {
void *p = page_address(reply_pages[0]);
void *const end = p + reply_len;
u32 n;
ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
while (n--) {
u8 struct_v;
u32 len;
ceph_decode_need(&p, end, 8 + 8, e_inval);
p += 8 + 8; /* skip gid and cookie */
ceph_decode_32_safe(&p, end, len, e_inval);
if (!len)
continue;
if (lock_owner_responded) {
rbd_warn(rbd_dev,
"duplicate lock owners detected");
ret = -EIO;
goto out;
}
lock_owner_responded = true;
ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
&struct_v, &len);
if (ret) {
rbd_warn(rbd_dev,
"failed to decode ResponseMessage: %d",
ret);
goto e_inval;
}
ret = ceph_decode_32(&p);
}
}
if (!lock_owner_responded) {
rbd_warn(rbd_dev, "no lock owners detected");
ret = -ETIMEDOUT;
}
out:
ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
return ret;
e_inval:
ret = -EINVAL;
goto out;
}
/*
* Either image request state machine(s) or rbd_add_acquire_lock()
* (i.e. "rbd map").
*/
static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
{
struct rbd_img_request *img_req;
dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
cancel_delayed_work(&rbd_dev->lock_dwork);
if (!completion_done(&rbd_dev->acquire_wait)) {
rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
list_empty(&rbd_dev->running_list));
rbd_dev->acquire_err = result;
complete_all(&rbd_dev->acquire_wait);
return;
}
list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
mutex_lock(&img_req->state_mutex);
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
rbd_img_schedule(img_req, result);
mutex_unlock(&img_req->state_mutex);
}
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
static bool locker_equal(const struct ceph_locker *lhs,
const struct ceph_locker *rhs)
{
return lhs->id.name.type == rhs->id.name.type &&
lhs->id.name.num == rhs->id.name.num &&
!strcmp(lhs->id.cookie, rhs->id.cookie) &&
ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
}
static void free_locker(struct ceph_locker *locker)
{
if (locker)
ceph_free_lockers(locker, 1);
}
static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_locker *lockers;
u32 num_lockers;
u8 lock_type;
char *lock_tag;
u64 handle;
int ret;
ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
&lock_type, &lock_tag, &lockers, &num_lockers);
if (ret) {
rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
return ERR_PTR(ret);
}
if (num_lockers == 0) {
dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
lockers = NULL;
goto out;
}
if (strcmp(lock_tag, RBD_LOCK_TAG)) {
rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
lock_tag);
goto err_busy;
}
if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
rbd_warn(rbd_dev, "incompatible lock type detected");
goto err_busy;
}
WARN_ON(num_lockers != 1);
ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
&handle);
if (ret != 1) {
rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
lockers[0].id.cookie);
goto err_busy;
}
if (ceph_addr_is_blank(&lockers[0].info.addr)) {
rbd_warn(rbd_dev, "locker has a blank address");
goto err_busy;
}
dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
__func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
&lockers[0].info.addr.in_addr,
le32_to_cpu(lockers[0].info.addr.nonce), handle);
out:
kfree(lock_tag);
return lockers;
err_busy:
kfree(lock_tag);
ceph_free_lockers(lockers, num_lockers);
return ERR_PTR(-EBUSY);
}
static int find_watcher(struct rbd_device *rbd_dev,
const struct ceph_locker *locker)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_watch_item *watchers;
u32 num_watchers;
u64 cookie;
int i;
int ret;
ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, &watchers,
&num_watchers);
if (ret) {
rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
return ret;
}
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
for (i = 0; i < num_watchers; i++) {
/*
* Ignore addr->type while comparing. This mimics
* entity_addr_t::get_legacy_str() + strcmp().
*/
if (ceph_addr_equal_no_type(&watchers[i].addr,
&locker->info.addr) &&
watchers[i].cookie == cookie) {
struct rbd_client_id cid = {
.gid = le64_to_cpu(watchers[i].name.num),
.handle = cookie,
};
dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
rbd_dev, cid.gid, cid.handle);
rbd_set_owner_cid(rbd_dev, &cid);
ret = 1;
goto out;
}
}
dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
ret = 0;
out:
kfree(watchers);
return ret;
}
/*
* lock_rwsem must be held for write
*/
static int rbd_try_lock(struct rbd_device *rbd_dev)
{
struct ceph_client *client = rbd_dev->rbd_client->client;
struct ceph_locker *locker, *refreshed_locker;
int ret;
for (;;) {
locker = refreshed_locker = NULL;
ret = rbd_lock(rbd_dev);
if (!ret)
goto out;
if (ret != -EBUSY) {
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
goto out;
}
/* determine if the current lock holder is still alive */
locker = get_lock_owner_info(rbd_dev);
if (IS_ERR(locker)) {
ret = PTR_ERR(locker);
locker = NULL;
goto out;
}
if (!locker)
goto again;
ret = find_watcher(rbd_dev, locker);
if (ret)
goto out; /* request lock or error */
refreshed_locker = get_lock_owner_info(rbd_dev);
if (IS_ERR(refreshed_locker)) {
ret = PTR_ERR(refreshed_locker);
refreshed_locker = NULL;
goto out;
}
if (!refreshed_locker ||
!locker_equal(locker, refreshed_locker))
goto again;
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
ENTITY_NAME(locker->id.name));
ret = ceph_monc_blocklist_add(&client->monc,
&locker->info.addr);
if (ret) {
rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
ENTITY_NAME(locker->id.name), ret);
goto out;
}
ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
locker->id.cookie, &locker->id.name);
if (ret && ret != -ENOENT) {
rbd_warn(rbd_dev, "failed to break header lock: %d",
ret);
goto out;
}
again:
free_locker(refreshed_locker);
free_locker(locker);
}
out:
free_locker(refreshed_locker);
free_locker(locker);
return ret;
}
static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
ret = rbd_object_map_open(rbd_dev);
if (ret)
return ret;
}
return 0;
}
/*
* Return:
* 0 - lock acquired
* 1 - caller should call rbd_request_lock()
* <0 - error
*/
static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
{
int ret;
down_read(&rbd_dev->lock_rwsem);
dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
rbd_dev->lock_state);
if (__rbd_is_lock_owner(rbd_dev)) {
up_read(&rbd_dev->lock_rwsem);
return 0;
}
up_read(&rbd_dev->lock_rwsem);
down_write(&rbd_dev->lock_rwsem);
dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
rbd_dev->lock_state);
if (__rbd_is_lock_owner(rbd_dev)) {
up_write(&rbd_dev->lock_rwsem);
return 0;
}
ret = rbd_try_lock(rbd_dev);
if (ret < 0) {
rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
goto out;
}
if (ret > 0) {
up_write(&rbd_dev->lock_rwsem);
return ret;
}
rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
rbd_assert(list_empty(&rbd_dev->running_list));
ret = rbd_post_acquire_action(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
/*
* Can't stay in RBD_LOCK_STATE_LOCKED because
* rbd_lock_add_request() would let the request through,
* assuming that e.g. object map is locked and loaded.
*/
rbd_unlock(rbd_dev);
}
out:
wake_lock_waiters(rbd_dev, ret);
up_write(&rbd_dev->lock_rwsem);
return ret;
}
static void rbd_acquire_lock(struct work_struct *work)
{
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, lock_dwork);
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
again:
ret = rbd_try_acquire_lock(rbd_dev);
if (ret <= 0) {
dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
return;
}
ret = rbd_request_lock(rbd_dev);
if (ret == -ETIMEDOUT) {
goto again; /* treat this as a dead client */
} else if (ret == -EROFS) {
rbd_warn(rbd_dev, "peer will not release lock");
down_write(&rbd_dev->lock_rwsem);
wake_lock_waiters(rbd_dev, ret);
up_write(&rbd_dev->lock_rwsem);
} else if (ret < 0) {
rbd_warn(rbd_dev, "error requesting lock: %d", ret);
mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
RBD_RETRY_DELAY);
} else {
/*
* lock owner acked, but resend if we don't see them
* release the lock
*/
dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
rbd_dev);
mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
}
}
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
{
dout("%s rbd_dev %p\n", __func__, rbd_dev);
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
return false;
/*
* Ensure that all in-flight IO is flushed.
*/
rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
rbd_assert(!completion_done(&rbd_dev->releasing_wait));
if (list_empty(&rbd_dev->running_list))
return true;
up_write(&rbd_dev->lock_rwsem);
wait_for_completion(&rbd_dev->releasing_wait);
down_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
return false;
rbd_assert(list_empty(&rbd_dev->running_list));
return true;
}
static void rbd_pre_release_action(struct rbd_device *rbd_dev)
{
if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
rbd_object_map_close(rbd_dev);
}
static void __rbd_release_lock(struct rbd_device *rbd_dev)
{
rbd_assert(list_empty(&rbd_dev->running_list));
rbd_pre_release_action(rbd_dev);
rbd_unlock(rbd_dev);
}
/*
* lock_rwsem must be held for write
*/
static void rbd_release_lock(struct rbd_device *rbd_dev)
{
if (!rbd_quiesce_lock(rbd_dev))
return;
__rbd_release_lock(rbd_dev);
/*
* Give others a chance to grab the lock - we would re-acquire
* almost immediately if we got new IO while draining the running
* list otherwise. We need to ack our own notifications, so this
* lock_dwork will be requeued from rbd_handle_released_lock() by
* way of maybe_kick_acquire().
*/
cancel_delayed_work(&rbd_dev->lock_dwork);
}
static void rbd_release_lock_work(struct work_struct *work)
{
struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
unlock_work);
down_write(&rbd_dev->lock_rwsem);
rbd_release_lock(rbd_dev);
up_write(&rbd_dev->lock_rwsem);
}
static void maybe_kick_acquire(struct rbd_device *rbd_dev)
{
bool have_requests;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
if (__rbd_is_lock_owner(rbd_dev))
return;
spin_lock(&rbd_dev->lock_lists_lock);
have_requests = !list_empty(&rbd_dev->acquiring_list);
spin_unlock(&rbd_dev->lock_lists_lock);
if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
}
}
static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
void **p)
{
struct rbd_client_id cid = { 0 };
if (struct_v >= 2) {
cid.gid = ceph_decode_64(p);
cid.handle = ceph_decode_64(p);
}
dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
cid.handle);
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
__func__, rbd_dev, cid.gid, cid.handle);
} else {
rbd_set_owner_cid(rbd_dev, &cid);
}
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
}
maybe_kick_acquire(rbd_dev);
up_read(&rbd_dev->lock_rwsem);
}
static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
void **p)
{
struct rbd_client_id cid = { 0 };
if (struct_v >= 2) {
cid.gid = ceph_decode_64(p);
cid.handle = ceph_decode_64(p);
}
dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
cid.handle);
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
__func__, rbd_dev, cid.gid, cid.handle,
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
} else {
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
}
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
}
maybe_kick_acquire(rbd_dev);
up_read(&rbd_dev->lock_rwsem);
}
/*
* Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
* ResponseMessage is needed.
*/
static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
void **p)
{
struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
struct rbd_client_id cid = { 0 };
int result = 1;
if (struct_v >= 2) {
cid.gid = ceph_decode_64(p);
cid.handle = ceph_decode_64(p);
}
dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
cid.handle);
if (rbd_cid_equal(&cid, &my_cid))
return result;
down_read(&rbd_dev->lock_rwsem);
if (__rbd_is_lock_owner(rbd_dev)) {
if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
goto out_unlock;
/*
* encode ResponseMessage(0) so the peer can detect
* a missing owner
*/
result = 0;
if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
if (!rbd_dev->opts->exclusive) {
dout("%s rbd_dev %p queueing unlock_work\n",
__func__, rbd_dev);
queue_work(rbd_dev->task_wq,
&rbd_dev->unlock_work);
} else {
/* refuse to release the lock */
result = -EROFS;
}
}
}
out_unlock:
up_read(&rbd_dev->lock_rwsem);
return result;
}
static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
u64 notify_id, u64 cookie, s32 *result)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
char buf[4 + CEPH_ENCODING_START_BLK_LEN];
int buf_size = sizeof(buf);
int ret;
if (result) {
void *p = buf;
/* encode ResponseMessage */
ceph_start_encoding(&p, 1, 1,
buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_32(&p, *result);
} else {
buf_size = 0;
}
ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, notify_id, cookie,
buf, buf_size);
if (ret)
rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
}
static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
u64 cookie)
{
dout("%s rbd_dev %p\n", __func__, rbd_dev);
__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
}
static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
u64 notify_id, u64 cookie, s32 result)
{
dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
}
static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
u64 notifier_id, void *data, size_t data_len)
{
struct rbd_device *rbd_dev = arg;
void *p = data;
void *const end = p + data_len;
u8 struct_v = 0;
u32 len;
u32 notify_op;
int ret;
dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
__func__, rbd_dev, cookie, notify_id, data_len);
if (data_len) {
ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
&struct_v, &len);
if (ret) {
rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
ret);
return;
}
notify_op = ceph_decode_32(&p);
} else {
/* legacy notification for header updates */
notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
len = 0;
}
dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
switch (notify_op) {
case RBD_NOTIFY_OP_ACQUIRED_LOCK:
rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
case RBD_NOTIFY_OP_RELEASED_LOCK:
rbd_handle_released_lock(rbd_dev, struct_v, &p);
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
case RBD_NOTIFY_OP_REQUEST_LOCK:
ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
if (ret <= 0)
rbd_acknowledge_notify_result(rbd_dev, notify_id,
cookie, ret);
else
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
case RBD_NOTIFY_OP_HEADER_UPDATE:
ret = rbd_dev_refresh(rbd_dev);
if (ret)
rbd_warn(rbd_dev, "refresh failed: %d", ret);
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
default:
if (rbd_is_lock_owner(rbd_dev))
rbd_acknowledge_notify_result(rbd_dev, notify_id,
cookie, -EOPNOTSUPP);
else
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
}
}
static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
static void rbd_watch_errcb(void *arg, u64 cookie, int err)
{
struct rbd_device *rbd_dev = arg;
rbd_warn(rbd_dev, "encountered watch error: %d", err);
down_write(&rbd_dev->lock_rwsem);
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
up_write(&rbd_dev->lock_rwsem);
mutex_lock(&rbd_dev->watch_mutex);
if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
__rbd_unregister_watch(rbd_dev);
rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
}
mutex_unlock(&rbd_dev->watch_mutex);
}
/*
* watch_mutex must be locked
*/
static int __rbd_register_watch(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_linger_request *handle;
rbd_assert(!rbd_dev->watch_handle);
dout("%s rbd_dev %p\n", __func__, rbd_dev);
handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, rbd_watch_cb,
rbd_watch_errcb, rbd_dev);
if (IS_ERR(handle))
return PTR_ERR(handle);
rbd_dev->watch_handle = handle;
return 0;
}
/*
* watch_mutex must be locked
*/
static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
rbd_assert(rbd_dev->watch_handle);
dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
if (ret)
rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
rbd_dev->watch_handle = NULL;
}
static int rbd_register_watch(struct rbd_device *rbd_dev)
{
int ret;
mutex_lock(&rbd_dev->watch_mutex);
rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
ret = __rbd_register_watch(rbd_dev);
if (ret)
goto out;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
out:
mutex_unlock(&rbd_dev->watch_mutex);
return ret;
}
static void cancel_tasks_sync(struct rbd_device *rbd_dev)
{
dout("%s rbd_dev %p\n", __func__, rbd_dev);
cancel_work_sync(&rbd_dev->acquired_lock_work);
cancel_work_sync(&rbd_dev->released_lock_work);
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
cancel_work_sync(&rbd_dev->unlock_work);
}
/*
* header_rwsem must not be held to avoid a deadlock with
* rbd_dev_refresh() when flushing notifies.
*/
static void rbd_unregister_watch(struct rbd_device *rbd_dev)
{
cancel_tasks_sync(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
__rbd_unregister_watch(rbd_dev);
rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
mutex_unlock(&rbd_dev->watch_mutex);
cancel_delayed_work_sync(&rbd_dev->watch_dwork);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
/*
* lock_rwsem must be held for write
*/
static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
char cookie[32];
int ret;
if (!rbd_quiesce_lock(rbd_dev))
return;
format_lock_cookie(rbd_dev, cookie);
ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
RBD_LOCK_TAG, cookie);
if (ret) {
if (ret != -EOPNOTSUPP)
rbd_warn(rbd_dev, "failed to update lock cookie: %d",
ret);
/*
* Lock cookie cannot be updated on older OSDs, so do
* a manual release and queue an acquire.
*/
__rbd_release_lock(rbd_dev);
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
} else {
__rbd_lock(rbd_dev, cookie);
wake_lock_waiters(rbd_dev, 0);
}
}
static void rbd_reregister_watch(struct work_struct *work)
{
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
mutex_unlock(&rbd_dev->watch_mutex);
return;
}
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
if (ret != -EBLOCKLISTED && ret != -ENOENT) {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
mutex_unlock(&rbd_dev->watch_mutex);
return;
}
mutex_unlock(&rbd_dev->watch_mutex);
down_write(&rbd_dev->lock_rwsem);
wake_lock_waiters(rbd_dev, ret);
up_write(&rbd_dev->lock_rwsem);
return;
}
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
down_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
rbd_reacquire_lock(rbd_dev);
up_write(&rbd_dev->lock_rwsem);
ret = rbd_dev_refresh(rbd_dev);
if (ret)
rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
}
/*
* Synchronous osd object method call. Returns the number of bytes
* returned in the outbound buffer, or a negative error code.
*/
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
const char *method_name,
const void *outbound,
size_t outbound_size,
void *inbound,
size_t inbound_size)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct page *req_page = NULL;
struct page *reply_page;
int ret;
/*
* Method calls are ultimately read operations. The result
* should placed into the inbound buffer provided. They
* also supply outbound data--parameters for the object
* method. Currently if this is present it will be a
* snapshot id.
*/
if (outbound) {
if (outbound_size > PAGE_SIZE)
return -E2BIG;
req_page = alloc_page(GFP_KERNEL);
if (!req_page)
return -ENOMEM;
memcpy(page_address(req_page), outbound, outbound_size);
}
reply_page = alloc_page(GFP_KERNEL);
if (!reply_page) {
if (req_page)
__free_page(req_page);
return -ENOMEM;
}
ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
CEPH_OSD_FLAG_READ, req_page, outbound_size,
&reply_page, &inbound_size);
if (!ret) {
memcpy(inbound, page_address(reply_page), inbound_size);
ret = inbound_size;
}
if (req_page)
__free_page(req_page);
__free_page(reply_page);
return ret;
}
static void rbd_queue_workfn(struct work_struct *work)
{
struct rbd_img_request *img_request =
container_of(work, struct rbd_img_request, work);
struct rbd_device *rbd_dev = img_request->rbd_dev;
enum obj_operation_type op_type = img_request->op_type;
struct request *rq = blk_mq_rq_from_pdu(img_request);
u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
u64 length = blk_rq_bytes(rq);
u64 mapping_size;
int result;
/* Ignore/skip any zero-length requests */
if (!length) {
dout("%s: zero-length request\n", __func__);
result = 0;
goto err_img_request;
}
blk_mq_start_request(rq);
down_read(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
rbd_img_capture_header(img_request);
up_read(&rbd_dev->header_rwsem);
if (offset + length > mapping_size) {
rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
length, mapping_size);
result = -EIO;
goto err_img_request;
}
dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
img_request, obj_op_name(op_type), offset, length);
if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
result = rbd_img_fill_nodata(img_request, offset, length);
else
result = rbd_img_fill_from_bio(img_request, offset, length,
rq->bio);
if (result)
goto err_img_request;
rbd_img_handle_request(img_request, 0);
return;
err_img_request:
rbd_img_request_destroy(img_request);
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
blk_mq_end_request(rq, errno_to_blk_status(result));
}
static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct rbd_device *rbd_dev = hctx->queue->queuedata;
struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
enum obj_operation_type op_type;
switch (req_op(bd->rq)) {
case REQ_OP_DISCARD:
op_type = OBJ_OP_DISCARD;
break;
case REQ_OP_WRITE_ZEROES:
op_type = OBJ_OP_ZEROOUT;
break;
case REQ_OP_WRITE:
op_type = OBJ_OP_WRITE;
break;
case REQ_OP_READ:
op_type = OBJ_OP_READ;
break;
default:
rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
return BLK_STS_IOERR;
}
rbd_img_request_init(img_req, rbd_dev, op_type);
if (rbd_img_is_write(img_req)) {
if (rbd_is_ro(rbd_dev)) {
rbd_warn(rbd_dev, "%s on read-only mapping",
obj_op_name(img_req->op_type));
return BLK_STS_IOERR;
}
rbd_assert(!rbd_is_snap(rbd_dev));
}
INIT_WORK(&img_req->work, rbd_queue_workfn);
queue_work(rbd_wq, &img_req->work);
return BLK_STS_OK;
}
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
put_disk(rbd_dev->disk);
blk_mq_free_tag_set(&rbd_dev->tag_set);
rbd_dev->disk = NULL;
}
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
void *buf, int buf_len)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_request *req;
struct page **pages;
int num_pages = calc_pages_for(0, buf_len);
int ret;
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
if (!req)
return -ENOMEM;
ceph_oid_copy(&req->r_base_oid, oid);
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out_req;
}
osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
true);
ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
if (ret)
goto out_req;
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0)
ceph_copy_from_page_vector(pages, buf, 0, ret);
out_req:
ceph_osdc_put_request(req);
return ret;
}
/*
* Read the complete header for the given rbd device. On successful
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
u64 names_size = 0;
u32 want_count;
int ret;
/*
* The complete header will include an array of its 64-bit
* snapshot ids, followed by the names of those snapshots as
* a contiguous block of NUL-terminated strings. Note that
* the number of snapshots could change by the time we read
* it in, in which case we re-read it.
*/
do {
size_t size;
kfree(ondisk);
size = sizeof (*ondisk);
size += snap_count * sizeof (struct rbd_image_snap_ondisk);
size += names_size;
ondisk = kmalloc(size, GFP_KERNEL);
if (!ondisk)
return -ENOMEM;
ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, ondisk, size);
if (ret < 0)
goto out;
if ((size_t)ret < size) {
ret = -ENXIO;
rbd_warn(rbd_dev, "short header read (want %zd got %d)",
size, ret);
goto out;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
rbd_warn(rbd_dev, "invalid header");
goto out;
}
names_size = le64_to_cpu(ondisk->snap_names_len);
want_count = snap_count;
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
kfree(ondisk);
return ret;
}
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
/*
* If EXISTS is not set, rbd_dev->disk may be NULL, so don't
* try to update its size. If REMOVING is set, updating size
* is just useless work since the device can't be opened.
*/
if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
!test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity_and_notify(rbd_dev->disk, size);
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
int ret;
down_write(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
ret = rbd_dev_header_info(rbd_dev);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto out;
}
rbd_assert(!rbd_is_snap(rbd_dev));
rbd_dev->mapping.size = rbd_dev->header.image_size;
out:
up_write(&rbd_dev->header_rwsem);
if (!ret && mapping_size != rbd_dev->mapping.size)
rbd_dev_update_size(rbd_dev);
return ret;
}
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
};
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
int err;
memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
if (err)
return err;
disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_tag_set;
}
q = disk->queue;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
disk->major = rbd_dev->major;
disk->first_minor = rbd_dev->minor;
if (single_major)
disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
else
disk->minors = RBD_MINORS_PER_MAJOR;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX);
blk_queue_io_min(q, rbd_dev->opts->alloc_size);
blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
if (rbd_dev->opts->trim) {
q->limits.discard_granularity = rbd_dev->opts->alloc_size;
blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
}
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
rbd_dev->disk = disk;
return 0;
out_tag_set:
blk_mq_free_tag_set(&rbd_dev->tag_set);
return err;
}
/*
sysfs
*/
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
return container_of(dev, struct rbd_device, dev);
}
static ssize_t rbd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n",
(unsigned long long)rbd_dev->mapping.size);
}
static ssize_t rbd_features_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
}
static ssize_t rbd_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
if (rbd_dev->major)
return sprintf(buf, "%d\n", rbd_dev->major);
return sprintf(buf, "(none)\n");
}
static ssize_t rbd_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%d\n", rbd_dev->minor);
}
static ssize_t rbd_client_addr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
struct ceph_entity_addr *client_addr =
ceph_client_addr(rbd_dev->rbd_client->client);
return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
le32_to_cpu(client_addr->nonce));
}
static ssize_t rbd_client_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "client%lld\n",
ceph_client_gid(rbd_dev->rbd_client->client));
}
static ssize_t rbd_cluster_fsid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
}
static ssize_t rbd_config_info_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return sprintf(buf, "%s\n", rbd_dev->config_info);
}
static ssize_t rbd_pool_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
}
static ssize_t rbd_pool_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n",
(unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_pool_ns_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
}
static ssize_t rbd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
if (rbd_dev->spec->image_name)
return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
return sprintf(buf, "(unknown)\n");
}
static ssize_t rbd_image_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
}
/*
* Shows the name of the currently-mapped snapshot (or
* RBD_SNAP_HEAD_NAME for the base image).
*/
static ssize_t rbd_snap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
}
static ssize_t rbd_snap_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
}
/*
* For a v2 image, shows the chain of parent images, separated by empty
* lines. For v1 images or if there is no parent, shows "(no parent
* image)".
*/
static ssize_t rbd_parent_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ssize_t count = 0;
if (!rbd_dev->parent)
return sprintf(buf, "(no parent image)\n");
for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
struct rbd_spec *spec = rbd_dev->parent_spec;
count += sprintf(&buf[count], "%s"
"pool_id %llu\npool_name %s\n"
"pool_ns %s\n"
"image_id %s\nimage_name %s\n"
"snap_id %llu\nsnap_name %s\n"
"overlap %llu\n",
!count ? "" : "\n", /* first? */
spec->pool_id, spec->pool_name,
spec->pool_ns ?: "",
spec->image_id, spec->image_name ?: "(unknown)",
spec->snap_id, spec->snap_name,
rbd_dev->parent_overlap);
}
return count;
}
static ssize_t rbd_image_refresh(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
&dev_attr_features.attr,
&dev_attr_major.attr,
&dev_attr_minor.attr,
&dev_attr_client_addr.attr,
&dev_attr_client_id.attr,
&dev_attr_cluster_fsid.attr,
&dev_attr_config_info.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
&dev_attr_pool_ns.attr,
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
&dev_attr_snap_id.attr,
&dev_attr_parent.attr,
&dev_attr_refresh.attr,
NULL
};
static struct attribute_group rbd_attr_group = {
.attrs = rbd_attrs,
};
static const struct attribute_group *rbd_attr_groups[] = {
&rbd_attr_group,
NULL
};
static void rbd_dev_release(struct device *dev);
static const struct device_type rbd_device_type = {
.name = "rbd",
.groups = rbd_attr_groups,
.release = rbd_dev_release,
};
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
kref_get(&spec->kref);
return spec;
}
static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
if (spec)
kref_put(&spec->kref, rbd_spec_free);
}
static struct rbd_spec *rbd_spec_alloc(void)
{
struct rbd_spec *spec;
spec = kzalloc(sizeof (*spec), GFP_KERNEL);
if (!spec)
return NULL;
spec->pool_id = CEPH_NOPOOL;
spec->snap_id = CEPH_NOSNAP;
kref_init(&spec->kref);
return spec;
}
static void rbd_spec_free(struct kref *kref)
{
struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
kfree(spec->pool_name);
kfree(spec->pool_ns);
kfree(spec->image_id);
kfree(spec->image_name);
kfree(spec->snap_name);
kfree(spec);
}
static void rbd_dev_free(struct rbd_device *rbd_dev)
{
WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
ceph_oid_destroy(&rbd_dev->header_oid);
ceph_oloc_destroy(&rbd_dev->header_oloc);
kfree(rbd_dev->config_info);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev->opts);
kfree(rbd_dev);
}
static void rbd_dev_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
bool need_put = !!rbd_dev->opts;
if (need_put) {
destroy_workqueue(rbd_dev->task_wq);
ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
}
rbd_dev_free(rbd_dev);
/*
* This is racy, but way better than putting module outside of
* the release callback. The race window is pretty small, so
* doing something similar to dm (dm-builtin.c) is overkill.
*/
if (need_put)
module_put(THIS_MODULE);
}
static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
if (!rbd_dev)
return NULL;
spin_lock_init(&rbd_dev->lock);
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
rbd_dev->header.data_pool_id = CEPH_NOPOOL;
ceph_oid_init(&rbd_dev->header_oid);
rbd_dev->header_oloc.pool = spec->pool_id;
if (spec->pool_ns) {
WARN_ON(!*spec->pool_ns);
rbd_dev->header_oloc.pool_ns =
ceph_find_or_create_string(spec->pool_ns,
strlen(spec->pool_ns));
}
mutex_init(&rbd_dev->watch_mutex);
rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
init_rwsem(&rbd_dev->lock_rwsem);
rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
spin_lock_init(&rbd_dev->lock_lists_lock);
INIT_LIST_HEAD(&rbd_dev->acquiring_list);
INIT_LIST_HEAD(&rbd_dev->running_list);
init_completion(&rbd_dev->acquire_wait);
init_completion(&rbd_dev->releasing_wait);
spin_lock_init(&rbd_dev->object_map_lock);
rbd_dev->dev.bus = &rbd_bus_type;
rbd_dev->dev.type = &rbd_device_type;
rbd_dev->dev.parent = &rbd_root_dev;
device_initialize(&rbd_dev->dev);
return rbd_dev;
}
/*
* Create a mapping rbd_dev.
*/
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
struct rbd_spec *spec,
struct rbd_options *opts)
{
struct rbd_device *rbd_dev;
rbd_dev = __rbd_dev_create(spec);
if (!rbd_dev)
return NULL;
/* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS),
GFP_KERNEL);
if (rbd_dev->dev_id < 0)
goto fail_rbd_dev;
sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
rbd_dev->name);
if (!rbd_dev->task_wq)
goto fail_dev_id;
/* we have a ref from do_rbd_add() */
__module_get(THIS_MODULE);
rbd_dev->rbd_client = rbdc;
rbd_dev->spec = spec;
rbd_dev->opts = opts;
dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev;
fail_dev_id:
ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
rbd_dev_free(rbd_dev);
return NULL;
}
static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
if (rbd_dev)
put_device(&rbd_dev->dev);
}
/*
* Get the size and object order for an image snapshot, or if
* snap_id is CEPH_NOSNAP, gets this information for the base
* image.
*/
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size)
{
__le64 snapid = cpu_to_le64(snap_id);
int ret;
struct {
u8 order;
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_size",
&snapid, sizeof(snapid),
&size_buf, sizeof(size_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof (size_buf))
return -ERANGE;
if (order) {
*order = size_buf.order;
dout(" order %u", (unsigned int)*order);
}
*snap_size = le64_to_cpu(size_buf.size);
dout(" snap_id 0x%016llx snap_size = %llu\n",
(unsigned long long)snap_id,
(unsigned long long)*snap_size);
return 0;
}
static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.obj_order,
&rbd_dev->header.image_size);
}
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
size_t size;
void *reply_buf;
int ret;
void *p;
/* Response will be an encoded string, which includes a length */
size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
reply_buf = kzalloc(size, GFP_KERNEL);
if (!reply_buf)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_object_prefix",
NULL, 0, reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
p + ret, NULL, GFP_NOIO);
ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
out:
kfree(reply_buf);
return ret;
}
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
bool read_only, u64 *snap_features)
{
struct {
__le64 snap_id;
u8 read_only;
} features_in;
struct {
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
u64 unsup;
int ret;
features_in.snap_id = cpu_to_le64(snap_id);
features_in.read_only = read_only;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_features",
&features_in, sizeof(features_in),
&features_buf, sizeof(features_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof (features_buf))
return -ERANGE;
unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
if (unsup) {
rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
unsup);
return -ENXIO;
}
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
(unsigned long long)snap_id,
(unsigned long long)*snap_features,
(unsigned long long)le64_to_cpu(features_buf.incompat));
return 0;
}
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
rbd_is_ro(rbd_dev),
&rbd_dev->header.features);
}
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
*
* For the same reason, this function is called only on object map
* (re)load and not on header refresh.
*/
static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
{
__le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
__le64 flags;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_flags",
&snapid, sizeof(snapid),
&flags, sizeof(flags));
if (ret < 0)
return ret;
if (ret < sizeof(flags))
return -EBADMSG;
rbd_dev->object_map_flags = le64_to_cpu(flags);
return 0;
}
struct parent_image_info {
u64 pool_id;
const char *pool_ns;
const char *image_id;
u64 snap_id;
bool has_overlap;
u64 overlap;
};
/*
* The caller is responsible for @pii.
*/
static int decode_parent_image_spec(void **p, void *end,
struct parent_image_info *pii)
{
u8 struct_v;
u32 struct_len;
int ret;
ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
&struct_v, &struct_len);
if (ret)
return ret;
ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
if (IS_ERR(pii->pool_ns)) {
ret = PTR_ERR(pii->pool_ns);
pii->pool_ns = NULL;
return ret;
}
pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
if (IS_ERR(pii->image_id)) {
ret = PTR_ERR(pii->image_id);
pii->image_id = NULL;
return ret;
}
ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
return 0;
e_inval:
return -EINVAL;
}
static int __get_parent_info(struct rbd_device *rbd_dev,
struct page *req_page,
struct page *reply_page,
struct parent_image_info *pii)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
size_t reply_len = PAGE_SIZE;
void *p, *end;
int ret;
ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
"rbd", "parent_get", CEPH_OSD_FLAG_READ,
req_page, sizeof(u64), &reply_page, &reply_len);
if (ret)
return ret == -EOPNOTSUPP ? 1 : ret;
p = page_address(reply_page);
end = p + reply_len;
ret = decode_parent_image_spec(&p, end, pii);
if (ret)
return ret;
ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
"rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
req_page, sizeof(u64), &reply_page, &reply_len);
if (ret)
return ret;
p = page_address(reply_page);
end = p + reply_len;
ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
return 0;
e_inval:
return -EINVAL;
}
/*
* The caller is responsible for @pii.
*/
static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
struct page *req_page,
struct page *reply_page,
struct parent_image_info *pii)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
size_t reply_len = PAGE_SIZE;
void *p, *end;
int ret;
ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
"rbd", "get_parent", CEPH_OSD_FLAG_READ,
req_page, sizeof(u64), &reply_page, &reply_len);
if (ret)
return ret;
p = page_address(reply_page);
end = p + reply_len;
ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(pii->image_id)) {
ret = PTR_ERR(pii->image_id);
pii->image_id = NULL;
return ret;
}
ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
return 0;
e_inval:
return -EINVAL;
}
static int get_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
int ret;
req_page = alloc_page(GFP_KERNEL);
if (!req_page)
return -ENOMEM;
reply_page = alloc_page(GFP_KERNEL);
if (!reply_page) {
__free_page(req_page);
return -ENOMEM;
}
p = page_address(req_page);
ceph_encode_64(&p, rbd_dev->spec->snap_id);
ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
if (ret > 0)
ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
pii);
__free_page(req_page);
__free_page(reply_page);
return ret;
}
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
int ret;
parent_spec = rbd_spec_alloc();
if (!parent_spec)
return -ENOMEM;
ret = get_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
pii.has_overlap, pii.overlap);
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
}
goto out; /* No parent? No problem. */
}
/* The ceph file layout needs to fit pool id in 32 bits */
ret = -EIO;
if (pii.pool_id > (u64)U32_MAX) {
rbd_warn(NULL, "parent pool id too large (%llu > %u)",
(unsigned long long)pii.pool_id, U32_MAX);
goto out_err;
}
/*
* The parent won't change (except when the clone is
* flattened, already handled that). So we only need to
* record the parent spec we have not already done so.
*/
if (!rbd_dev->parent_spec) {
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
}
/*
* We always update the parent overlap. If it's zero we issue
* a warning, as we will proceed as if there was no parent.
*/
if (!pii.overlap) {
if (parent_spec) {
/* refresh, careful to warn just once */
if (rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone now standalone (overlap became 0)");
} else {
/* initial probe */
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
}
}
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
kfree(pii.pool_ns);
kfree(pii.image_id);
rbd_spec_put(parent_spec);
return ret;
}
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_stripe_unit_count",
NULL, 0, &striping_info_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < size)
return -ERANGE;
p = &striping_info_buf;
rbd_dev->header.stripe_unit = ceph_decode_64(&p);
rbd_dev->header.stripe_count = ceph_decode_64(&p);
return 0;
}
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
{
__le64 data_pool_id;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
NULL, 0, &data_pool_id, sizeof(data_pool_id));
if (ret < 0)
return ret;
if (ret < sizeof(data_pool_id))
return -EBADMSG;
rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
return 0;
}
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
CEPH_DEFINE_OID_ONSTACK(oid);
size_t image_id_size;
char *image_id;
void *p;
void *end;
size_t size;
void *reply_buf = NULL;
size_t len = 0;
char *image_name = NULL;
int ret;
rbd_assert(!rbd_dev->spec->image_name);
len = strlen(rbd_dev->spec->image_id);
image_id_size = sizeof (__le32) + len;
image_id = kmalloc(image_id_size, GFP_KERNEL);
if (!image_id)
return NULL;
p = image_id;
end = image_id + image_id_size;
ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf)
goto out;
ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
"dir_get_name", image_id, image_id_size,
reply_buf, size);
if (ret < 0)
goto out;
p = reply_buf;
end = reply_buf + ret;
image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
if (IS_ERR(image_name))
image_name = NULL;
else
dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
kfree(reply_buf);
kfree(image_id);
return image_name;
}
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
struct ceph_snap_context *snapc = rbd_dev->header.snapc;
const char *snap_name;
u32 which = 0;
/* Skip over names until we find the one we are looking for */
snap_name = rbd_dev->header.snap_names;
while (which < snapc->num_snaps) {
if (!strcmp(name, snap_name))
return snapc->snaps[which];
snap_name += strlen(snap_name) + 1;
which++;
}
return CEPH_NOSNAP;
}
static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
struct ceph_snap_context *snapc = rbd_dev->header.snapc;
u32 which;
bool found = false;
u64 snap_id;
for (which = 0; !found && which < snapc->num_snaps; which++) {
const char *snap_name;
snap_id = snapc->snaps[which];
snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
if (IS_ERR(snap_name)) {
/* ignore no-longer existing snapshots */
if (PTR_ERR(snap_name) == -ENOENT)
continue;
else
break;
}
found = !strcmp(name, snap_name);
kfree(snap_name);
}
return found ? snap_id : CEPH_NOSNAP;
}
/*
* Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
* no snapshot by that name is found, or if an error occurs.
*/
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
if (rbd_dev->image_format == 1)
return rbd_v1_snap_id_by_name(rbd_dev, name);
return rbd_v2_snap_id_by_name(rbd_dev, name);
}
/*
* An image being mapped will have everything but the snap id.
*/
static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
{
struct rbd_spec *spec = rbd_dev->spec;
rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
rbd_assert(spec->image_id && spec->image_name);
rbd_assert(spec->snap_name);
if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
u64 snap_id;
snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
spec->snap_id = snap_id;
} else {
spec->snap_id = CEPH_NOSNAP;
}
return 0;
}
/*
* A parent image will have all ids but none of the names.
*
* All names in an rbd spec are dynamically allocated. It's OK if we
* can't figure out the name for an image id.
*/
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_spec *spec = rbd_dev->spec;
const char *pool_name;
const char *image_name;
const char *snap_name;
int ret;
rbd_assert(spec->pool_id != CEPH_NOPOOL);
rbd_assert(spec->image_id);
rbd_assert(spec->snap_id != CEPH_NOSNAP);
/* Get the pool name; we have to make our own copy of this */
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
if (!pool_name) {
rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
return -EIO;
}
pool_name = kstrdup(pool_name, GFP_KERNEL);
if (!pool_name)
return -ENOMEM;
/* Fetch the image name; tolerate failure here */
image_name = rbd_dev_image_name(rbd_dev);
if (!image_name)
rbd_warn(rbd_dev, "unable to get image name");
/* Fetch the snapshot name */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
if (IS_ERR(snap_name)) {
ret = PTR_ERR(snap_name);
goto out_err;
}
spec->pool_name = pool_name;
spec->image_name = image_name;
spec->snap_name = snap_name;
return 0;
out_err:
kfree(image_name);
kfree(pool_name);
return ret;
}
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
{
size_t size;
int ret;
void *reply_buf;
void *p;
void *end;
u64 seq;
u32 snap_count;
struct ceph_snap_context *snapc;
u32 i;
/*
* We'll need room for the seq value (maximum snapshot id),
* snapshot count, and array of that many snapshot ids.
* For now we have a fixed upper limit on the number we're
* prepared to receive.
*/
size = sizeof (__le64) + sizeof (__le32) +
RBD_MAX_SNAP_COUNT * sizeof (__le64);
reply_buf = kzalloc(size, GFP_KERNEL);
if (!reply_buf)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_snapcontext",
NULL, 0, reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
p = reply_buf;
end = reply_buf + ret;
ret = -ERANGE;
ceph_decode_64_safe(&p, end, seq, out);
ceph_decode_32_safe(&p, end, snap_count, out);
/*
* Make sure the reported number of snapshot ids wouldn't go
* beyond the end of our buffer. But before checking that,
* make sure the computed size of the snapshot context we
* allocate is representable in a size_t.
*/
if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
/ sizeof (u64)) {
ret = -EINVAL;
goto out;
}
if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
goto out;
ret = 0;
snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc) {
ret = -ENOMEM;
goto out;
}
snapc->seq = seq;
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
kfree(reply_buf);
return ret;
}
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id)
{
size_t size;
void *reply_buf;
__le64 snapid;
int ret;
void *p;
void *end;
char *snap_name;
size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf)
return ERR_PTR(-ENOMEM);
snapid = cpu_to_le64(snap_id);
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_snapshot_name",
&snapid, sizeof(snapid), reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0) {
snap_name = ERR_PTR(ret);
goto out;
}
p = reply_buf;
end = reply_buf + ret;
snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(snap_name))
goto out;
dout(" snap_id 0x%016llx snap_name = %s\n",
(unsigned long long)snap_id, snap_name);
out:
kfree(reply_buf);
return snap_name;
}
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
{
bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
return ret;
if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev);
if (ret)
return ret;
}
ret = rbd_dev_v2_snap_context(rbd_dev);
if (ret && first_time) {
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
}
return ret;
}
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (rbd_dev->image_format == 1)
return rbd_dev_v1_header_info(rbd_dev);
return rbd_dev_v2_header_info(rbd_dev);
}
/*
* Skips over white space at *buf, and updates *buf to point to the
* first found non-space character (if any). Returns the length of
* the token (string of non-white space characters) found. Note
* that *buf must be terminated with '\0'.
*/
static inline size_t next_token(const char **buf)
{
/*
* These are the characters that produce nonzero for
* isspace() in the "C" and "POSIX" locales.
*/
static const char spaces[] = " \f\n\r\t\v";
*buf += strspn(*buf, spaces); /* Find start of token */
return strcspn(*buf, spaces); /* Return token length */
}
/*
* Finds the next token in *buf, dynamically allocates a buffer big
* enough to hold a copy of it, and copies the token into the new
* buffer. The copy is guaranteed to be terminated with '\0'. Note
* that a duplicate buffer is created even for a zero-length token.
*
* Returns a pointer to the newly-allocated duplicate, or a null
* pointer if memory for the duplicate was not available. If
* the lenp argument is a non-null pointer, the length of the token
* (not including the '\0') is returned in *lenp.
*
* If successful, the *buf pointer will be updated to point beyond
* the end of the found token.
*
* Note: uses GFP_KERNEL for allocation.
*/
static inline char *dup_token(const char **buf, size_t *lenp)
{
char *dup;
size_t len;
len = next_token(buf);
dup = kmemdup(*buf, len + 1, GFP_KERNEL);
if (!dup)
return NULL;
*(dup + len) = '\0';
*buf += len;
if (lenp)
*lenp = len;
return dup;
}
static int rbd_parse_param(struct fs_parameter *param,
struct rbd_parse_opts_ctx *pctx)
{
struct rbd_options *opt = pctx->opts;
struct fs_parse_result result;
struct p_log log = {.prefix = "rbd"};
int token, ret;
ret = ceph_parse_param(param, pctx->copts, NULL);
if (ret != -ENOPARAM)
return ret;
token = __fs_parse(&log, rbd_parameters, param, &result);
dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
if (token < 0) {
if (token == -ENOPARAM)
return inval_plog(&log, "Unknown parameter '%s'",
param->key);
return token;
}
switch (token) {
case Opt_queue_depth:
if (result.uint_32 < 1)
goto out_of_range;
opt->queue_depth = result.uint_32;
break;
case Opt_alloc_size:
if (result.uint_32 < SECTOR_SIZE)
goto out_of_range;
if (!is_power_of_2(result.uint_32))
return inval_plog(&log, "alloc_size must be a power of 2");
opt->alloc_size = result.uint_32;
break;
case Opt_lock_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
if (result.uint_32 > INT_MAX / 1000)
goto out_of_range;
opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
break;
case Opt_pool_ns:
kfree(pctx->spec->pool_ns);
pctx->spec->pool_ns = param->string;
param->string = NULL;
break;
case Opt_compression_hint:
switch (result.uint_32) {
case Opt_compression_hint_none:
opt->alloc_hint_flags &=
~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
break;
case Opt_compression_hint_compressible:
opt->alloc_hint_flags |=
CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
opt->alloc_hint_flags &=
~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
break;
case Opt_compression_hint_incompressible:
opt->alloc_hint_flags |=
CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
opt->alloc_hint_flags &=
~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
break;
default:
BUG();
}
break;
case Opt_read_only:
opt->read_only = true;
break;
case Opt_read_write:
opt->read_only = false;
break;
case Opt_lock_on_read:
opt->lock_on_read = true;
break;
case Opt_exclusive:
opt->exclusive = true;
break;
case Opt_notrim:
opt->trim = false;
break;
default:
BUG();
}
return 0;
out_of_range:
return inval_plog(&log, "%s out of range", param->key);
}
/*
* This duplicates most of generic_parse_monolithic(), untying it from
* fs_context and skipping standard superblock and security options.
*/
static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
{
char *key;
int ret = 0;
dout("%s '%s'\n", __func__, options);
while ((key = strsep(&options, ",")) != NULL) {
if (*key) {
struct fs_parameter param = {
.key = key,
.type = fs_value_is_flag,
};
char *value = strchr(key, '=');
size_t v_len = 0;
if (value) {
if (value == key)
continue;
*value++ = 0;
v_len = strlen(value);
param.string = kmemdup_nul(value, v_len,
GFP_KERNEL);
if (!param.string)
return -ENOMEM;
param.type = fs_value_is_string;
}
param.size = v_len;
ret = rbd_parse_param(¶m, pctx);
kfree(param.string);
if (ret)
break;
}
}
return ret;
}
/*
* Parse the options provided for an "rbd add" (i.e., rbd image
* mapping) request. These arrive via a write to /sys/bus/rbd/add,
* and the data written is passed here via a NUL-terminated buffer.
* Returns 0 if successful or an error code otherwise.
*
* The information extracted from these options is recorded in
* the other parameters which return dynamically-allocated
* structures:
* ceph_opts
* The address of a pointer that will refer to a ceph options
* structure. Caller must release the returned pointer using
* ceph_destroy_options() when it is no longer needed.
* rbd_opts
* Address of an rbd options pointer. Fully initialized by
* this function; caller must release with kfree().
* spec
* Address of an rbd image specification pointer. Fully
* initialized by this function based on parsed options.
* Caller must release with rbd_spec_put().
*
* The options passed take this form:
* <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
* where:
* <mon_addrs>
* A comma-separated list of one or more monitor addresses.
* A monitor address is an ip address, optionally followed
* by a port number (separated by a colon).
* I.e.: ip1[:port1][,ip2[:port2]...]
* <options>
* A comma-separated list of ceph and/or rbd options.
* <pool_name>
* The name of the rados pool containing the rbd image.
* <image_name>
* The name of the image in that pool to map.
* <snap_id>
* An optional snapshot id. If provided, the mapping will
* present data from the image at the time that snapshot was
* created. The image head is used if no snapshot id is
* provided. Snapshot mappings are always read-only.
*/
static int rbd_add_parse_args(const char *buf,
struct ceph_options **ceph_opts,
struct rbd_options **opts,
struct rbd_spec **rbd_spec)
{
size_t len;
char *options;
const char *mon_addrs;
char *snap_name;
size_t mon_addrs_size;
struct rbd_parse_opts_ctx pctx = { 0 };
int ret;
/* The first four tokens are required */
len = next_token(&buf);
if (!len) {
rbd_warn(NULL, "no monitor address(es) provided");
return -EINVAL;
}
mon_addrs = buf;
mon_addrs_size = len;
buf += len;
ret = -EINVAL;
options = dup_token(&buf, NULL);
if (!options)
return -ENOMEM;
if (!*options) {
rbd_warn(NULL, "no options provided");
goto out_err;
}
pctx.spec = rbd_spec_alloc();
if (!pctx.spec)
goto out_mem;
pctx.spec->pool_name = dup_token(&buf, NULL);
if (!pctx.spec->pool_name)
goto out_mem;
if (!*pctx.spec->pool_name) {
rbd_warn(NULL, "no pool name provided");
goto out_err;
}
pctx.spec->image_name = dup_token(&buf, NULL);
if (!pctx.spec->image_name)
goto out_mem;
if (!*pctx.spec->image_name) {
rbd_warn(NULL, "no image name provided");
goto out_err;
}
/*
* Snapshot name is optional; default is to use "-"
* (indicating the head/no snapshot).
*/
len = next_token(&buf);
if (!len) {
buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
} else if (len > RBD_MAX_SNAP_NAME_LEN) {
ret = -ENAMETOOLONG;
goto out_err;
}
snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
if (!snap_name)
goto out_mem;
*(snap_name + len) = '\0';
pctx.spec->snap_name = snap_name;
pctx.copts = ceph_alloc_options();
if (!pctx.copts)
goto out_mem;
/* Initialize all rbd options to the defaults */
pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
if (!pctx.opts)
goto out_mem;
pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
pctx.opts->trim = RBD_TRIM_DEFAULT;
ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
',');
if (ret)
goto out_err;
ret = rbd_parse_options(options, &pctx);
if (ret)
goto out_err;
*ceph_opts = pctx.copts;
*opts = pctx.opts;
*rbd_spec = pctx.spec;
kfree(options);
return 0;
out_mem:
ret = -ENOMEM;
out_err:
kfree(pctx.opts);
ceph_destroy_options(pctx.copts);
rbd_spec_put(pctx.spec);
kfree(options);
return ret;
}
static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
{
down_write(&rbd_dev->lock_rwsem);
if (__rbd_is_lock_owner(rbd_dev))
__rbd_release_lock(rbd_dev);
up_write(&rbd_dev->lock_rwsem);
}
/*
* If the wait is interrupted, an error is returned even if the lock
* was successfully acquired. rbd_dev_image_unlock() will release it
* if needed.
*/
static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
{
long ret;
if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
return 0;
rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
return -EINVAL;
}
if (rbd_is_ro(rbd_dev))
return 0;
rbd_assert(!rbd_is_lock_owner(rbd_dev));
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
if (ret > 0) {
ret = rbd_dev->acquire_err;
} else {
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
if (!ret)
ret = -ETIMEDOUT;
rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
}
if (ret)
return ret;
/*
* The lock may have been released by now, unless automatic lock
* transitions are disabled.
*/
rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
return 0;
}
/*
* An rbd format 2 image has a unique identifier, distinct from the
* name given to it by the user. Internally, that identifier is
* what's used to specify the names of objects related to the image.
*
* A special "rbd id" object is used to map an rbd image name to its
* id. If that object doesn't exist, then there is no v2 rbd image
* with the supplied name.
*
* This function will record the given rbd_dev's image_id field if
* it can be determined, and in that case will return 0. If any
* errors occur a negative errno will be returned and the rbd_dev's
* image_id field will be unchanged (and should be NULL).
*/
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
int ret;
size_t size;
CEPH_DEFINE_OID_ONSTACK(oid);
void *response;
char *image_id;
/*
* When probing a parent image, the image id is already
* known (and the image name likely is not). There's no
* need to fetch the image id again in this case. We
* do still need to set the image format though.
*/
if (rbd_dev->spec->image_id) {
rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
return 0;
}
/*
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
rbd_dev->spec->image_name);
if (ret)
return ret;
dout("rbd id object name is %s\n", oid.name);
/* Response will be an encoded string, which includes a length */
size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
response = kzalloc(size, GFP_NOIO);
if (!response) {
ret = -ENOMEM;
goto out;
}
/* If it doesn't exist we'll assume it's a format 1 image */
ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
"get_id", NULL, 0,
response, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret == -ENOENT) {
image_id = kstrdup("", GFP_KERNEL);
ret = image_id ? 0 : -ENOMEM;
if (!ret)
rbd_dev->image_format = 1;
} else if (ret >= 0) {
void *p = response;
image_id = ceph_extract_encoded_string(&p, p + ret,
NULL, GFP_NOIO);
ret = PTR_ERR_OR_ZERO(image_id);
if (!ret)
rbd_dev->image_format = 2;
}
if (!ret) {
rbd_dev->spec->image_id = image_id;
dout("image_id is %s\n", image_id);
}
out:
kfree(response);
ceph_oid_destroy(&oid);
return ret;
}
/*
* Undo whatever state changes are made by v1 or v2 header info
* call.
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
header = &rbd_dev->header;
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
kfree(header->object_prefix);
memset(header, 0, sizeof (*header));
}
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_dev_v2_object_prefix(rbd_dev);
if (ret)
goto out_err;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
ret = rbd_dev_v2_features(rbd_dev);
if (ret)
goto out_err;
/* If the image supports fancy striping, get its parameters */
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev);
if (ret)
goto out_err;
}
rbd_init_layout(rbd_dev);
return 0;
out_err:
rbd_dev->header.features = 0;
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
return ret;
}
/*
* @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
* rbd_dev_image_probe() recursion depth, which means it's also the
* length of the already discovered part of the parent chain.
*/
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
{
struct rbd_device *parent = NULL;
int ret;
if (!rbd_dev->parent_spec)
return 0;
if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
pr_info("parent chain is too long (%d)\n", depth);
ret = -EINVAL;
goto out_err;
}
parent = __rbd_dev_create(rbd_dev->parent_spec);
if (!parent) {
ret = -ENOMEM;
goto out_err;
}
/*
* Images related by parent/child relationships always share
* rbd_client and spec/parent_spec, so bump their refcounts.
*/
parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
parent->spec = rbd_spec_get(rbd_dev->parent_spec);
__set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
ret = rbd_dev_image_probe(parent, depth);
if (ret < 0)
goto out_err;
rbd_dev->parent = parent;
atomic_set(&rbd_dev->parent_ref, 1);
return 0;
out_err:
rbd_dev_unparent(rbd_dev);
rbd_dev_destroy(parent);
return ret;
}
static void rbd_dev_device_release(struct rbd_device *rbd_dev)
{
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_free_disk(rbd_dev);
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
}
/*
* rbd_dev->header_rwsem must be locked for write and will be unlocked
* upon return.
*/
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
/* Record our major and minor device numbers. */
if (!single_major) {
ret = register_blkdev(0, rbd_dev->name);
if (ret < 0)
goto err_out_unlock;
rbd_dev->major = ret;
rbd_dev->minor = 0;
} else {
rbd_dev->major = rbd_major;
rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
}
/* Set up the blkdev mapping. */
ret = rbd_init_disk(rbd_dev);
if (ret)
goto err_out_blkdev;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
goto err_out_disk;
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
up_write(&rbd_dev->header_rwsem);
return 0;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_unlock:
up_write(&rbd_dev->header_rwsem);
return ret;
}
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
struct rbd_spec *spec = rbd_dev->spec;
int ret;
/* Record the header object name for this rbd image. */
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (rbd_dev->image_format == 1)
ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
spec->image_name, RBD_SUFFIX);
else
ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
RBD_HEADER_PREFIX, spec->image_id);
return ret;
}
static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
{
if (!is_snap) {
pr_info("image %s/%s%s%s does not exist\n",
rbd_dev->spec->pool_name,
rbd_dev->spec->pool_ns ?: "",
rbd_dev->spec->pool_ns ? "/" : "",
rbd_dev->spec->image_name);
} else {
pr_info("snap %s/%s%s%s@%s does not exist\n",
rbd_dev->spec->pool_name,
rbd_dev->spec->pool_ns ?: "",
rbd_dev->spec->pool_ns ? "/" : "",
rbd_dev->spec->image_name,
rbd_dev->spec->snap_name);
}
}
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
if (!rbd_is_ro(rbd_dev))
rbd_unregister_watch(rbd_dev);
rbd_dev_unprobe(rbd_dev);
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
}
/*
* Probe for the existence of the header object for the given rbd
* device. If this image is the one being mapped (i.e., not a
* parent), initiate a watch on its header object before using that
* object to get detailed information about the rbd image.
*
* On success, returns with header_rwsem held for write if called
* with @depth == 0.
*/
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
{
bool need_watch = !rbd_is_ro(rbd_dev);
int ret;
/*
* Get the id from the image id object. Unless there's an
* error, rbd_dev->spec->image_id will be filled in with
* a dynamically-allocated string, and rbd_dev->image_format
* will be set to either 1 or 2.
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
return ret;
ret = rbd_dev_header_name(rbd_dev);
if (ret)
goto err_out_format;
if (need_watch) {
ret = rbd_register_watch(rbd_dev);
if (ret) {
if (ret == -ENOENT)
rbd_print_dne(rbd_dev, false);
goto err_out_format;
}
}
if (!depth)
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_header_info(rbd_dev);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_probe;
}
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
* Otherwise this is a parent image, identified by pool, image
* and snap ids - need to fill in names for those ids.
*/
if (!depth)
ret = rbd_spec_fill_snap_id(rbd_dev);
else
ret = rbd_spec_fill_names(rbd_dev);
if (ret) {
if (ret == -ENOENT)
rbd_print_dne(rbd_dev, true);
goto err_out_probe;
}
ret = rbd_dev_mapping_set(rbd_dev);
if (ret)
goto err_out_probe;
if (rbd_is_snap(rbd_dev) &&
(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
ret = rbd_object_map_load(rbd_dev);
if (ret)
goto err_out_probe;
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto err_out_probe;
}
ret = rbd_dev_probe_parent(rbd_dev, depth);
if (ret)
goto err_out_probe;
dout("discovered format %u image, header name is %s\n",
rbd_dev->image_format, rbd_dev->header_oid.name);
return 0;
err_out_probe:
if (!depth)
up_write(&rbd_dev->header_rwsem);
if (need_watch)
rbd_unregister_watch(rbd_dev);
rbd_dev_unprobe(rbd_dev);
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
return ret;
}
static ssize_t do_rbd_add(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct ceph_options *ceph_opts = NULL;
struct rbd_options *rbd_opts = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
/* parse add command */
rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
if (rc < 0)
goto out;
rbdc = rbd_get_client(ceph_opts);
if (IS_ERR(rbdc)) {
rc = PTR_ERR(rbdc);
goto err_out_args;
}
/* pick the pool */
rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
if (rc < 0) {
if (rc == -ENOENT)
pr_info("pool %s does not exist\n", spec->pool_name);
goto err_out_client;
}
spec->pool_id = (u64)rc;
rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
if (!rbd_dev) {
rc = -ENOMEM;
goto err_out_client;
}
rbdc = NULL; /* rbd_dev now owns this */
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
/* if we are mapping a snapshot it will be a read-only mapping */
if (rbd_dev->opts->read_only ||
strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
__set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
if (!rbd_dev->config_info) {
rc = -ENOMEM;
goto err_out_rbd_dev;
}
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0)
goto err_out_rbd_dev;
if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
rbd_warn(rbd_dev, "alloc_size adjusted to %u",
rbd_dev->layout.object_size);
rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
}
rc = rbd_dev_device_setup(rbd_dev);
if (rc)
goto err_out_image_probe;
rc = rbd_add_acquire_lock(rbd_dev);
if (rc)
goto err_out_image_lock;
/* Everything's ready. Announce the disk to the world. */
rc = device_add(&rbd_dev->dev);
if (rc)
goto err_out_image_lock;
rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
if (rc)
goto err_out_cleanup_disk;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
spin_unlock(&rbd_dev_list_lock);
pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
rbd_dev->header.features);
rc = count;
out:
module_put(THIS_MODULE);
return rc;
err_out_cleanup_disk:
rbd_free_disk(rbd_dev);
err_out_image_lock:
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
err_out_image_probe:
rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
err_out_args:
rbd_spec_put(spec);
kfree(rbd_opts);
goto out;
}
static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count)
{
if (single_major)
return -EINVAL;
return do_rbd_add(buf, count);
}
static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
size_t count)
{
return do_rbd_add(buf, count);
}
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
while (rbd_dev->parent) {
struct rbd_device *first = rbd_dev;
struct rbd_device *second = first->parent;
struct rbd_device *third;
/*
* Follow to the parent with no grandparent and
* remove it.
*/
while (second && (third = second->parent)) {
first = second;
second = third;
}
rbd_assert(second);
rbd_dev_image_release(second);
rbd_dev_destroy(second);
first->parent = NULL;
first->parent_overlap = 0;
rbd_assert(first->parent_spec);
rbd_spec_put(first->parent_spec);
first->parent_spec = NULL;
}
}
static ssize_t do_rbd_remove(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
int dev_id;
char opt_buf[6];
bool force = false;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
dev_id = -1;
opt_buf[0] = '\0';
sscanf(buf, "%d %5s", &dev_id, opt_buf);
if (dev_id < 0) {
pr_err("dev_id out of range\n");
return -EINVAL;
}
if (opt_buf[0] != '\0') {
if (!strcmp(opt_buf, "force")) {
force = true;
} else {
pr_err("bad remove option at '%s'\n", opt_buf);
return -EINVAL;
}
}
ret = -ENOENT;
spin_lock(&rbd_dev_list_lock);
list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
if (rbd_dev->dev_id == dev_id) {
ret = 0;
break;
}
}
if (!ret) {
spin_lock_irq(&rbd_dev->lock);
if (rbd_dev->open_count && !force)
ret = -EBUSY;
else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
&rbd_dev->flags))
ret = -EINPROGRESS;
spin_unlock_irq(&rbd_dev->lock);
}
spin_unlock(&rbd_dev_list_lock);
if (ret)
return ret;
if (force) {
/*
* Prevent new IO from being queued and wait for existing
* IO to complete/fail.
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
blk_mark_disk_dead(rbd_dev->disk);
}
del_gendisk(rbd_dev->disk);
spin_lock(&rbd_dev_list_lock);
list_del_init(&rbd_dev->node);
spin_unlock(&rbd_dev_list_lock);
device_del(&rbd_dev->dev);
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
rbd_dev_image_release(rbd_dev);
rbd_dev_destroy(rbd_dev);
return count;
}
static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
{
if (single_major)
return -EINVAL;
return do_rbd_remove(buf, count);
}
static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
size_t count)
{
return do_rbd_remove(buf, count);
}
/*
* create control files in sysfs
* /sys/bus/rbd/...
*/
static int __init rbd_sysfs_init(void)
{
int ret;
ret = device_register(&rbd_root_dev);
if (ret < 0) {
put_device(&rbd_root_dev);
return ret;
}
ret = bus_register(&rbd_bus_type);
if (ret < 0)
device_unregister(&rbd_root_dev);
return ret;
}
static void __exit rbd_sysfs_cleanup(void)
{
bus_unregister(&rbd_bus_type);
device_unregister(&rbd_root_dev);
}
static int __init rbd_slab_init(void)
{
rbd_assert(!rbd_img_request_cache);
rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
if (!rbd_img_request_cache)
return -ENOMEM;
rbd_assert(!rbd_obj_request_cache);
rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
if (!rbd_obj_request_cache)
goto out_err;
return 0;
out_err:
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
return -ENOMEM;
}
static void rbd_slab_exit(void)
{
rbd_assert(rbd_obj_request_cache);
kmem_cache_destroy(rbd_obj_request_cache);
rbd_obj_request_cache = NULL;
rbd_assert(rbd_img_request_cache);
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
}
static int __init rbd_init(void)
{
int rc;
if (!libceph_compatible(NULL)) {
rbd_warn(NULL, "libceph incompatibility (quitting)");
return -EINVAL;
}
rc = rbd_slab_init();
if (rc)
return rc;
/*
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
}
if (single_major) {
rbd_major = register_blkdev(0, RBD_DRV_NAME);
if (rbd_major < 0) {
rc = rbd_major;
goto err_out_wq;
}
}
rc = rbd_sysfs_init();
if (rc)
goto err_out_blkdev;
if (single_major)
pr_info("loaded (major %d)\n", rbd_major);
else
pr_info("loaded\n");
return 0;
err_out_blkdev:
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
err_out_wq:
destroy_workqueue(rbd_wq);
err_out_slab:
rbd_slab_exit();
return rc;
}
static void __exit rbd_exit(void)
{
ida_destroy(&rbd_dev_id_ida);
rbd_sysfs_cleanup();
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
destroy_workqueue(rbd_wq);
rbd_slab_exit();
}
module_init(rbd_init);
module_exit(rbd_exit);
MODULE_AUTHOR("Alex Elder <[email protected]>");
MODULE_AUTHOR("Sage Weil <[email protected]>");
MODULE_AUTHOR("Yehuda Sadeh <[email protected]>");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <[email protected]>");
MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/block/rbd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 Disk Storage Driver
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/ata.h>
#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
#include <asm/firmware.h>
#define DEVICE_NAME "ps3disk"
#define BOUNCE_SIZE (64*1024)
#define PS3DISK_MAX_DISKS 16
#define PS3DISK_MINORS 16
#define PS3DISK_NAME "ps3d%c"
struct ps3disk_private {
spinlock_t lock; /* Request queue spinlock */
struct blk_mq_tag_set tag_set;
struct gendisk *gendisk;
unsigned int blocking_factor;
struct request *req;
u64 raw_capacity;
unsigned char model[ATA_ID_PROD_LEN+1];
};
#define LV1_STORAGE_SEND_ATA_COMMAND (2)
#define LV1_STORAGE_ATA_HDDOUT (0x23)
struct lv1_ata_cmnd_block {
u16 features;
u16 sector_count;
u16 LBA_low;
u16 LBA_mid;
u16 LBA_high;
u8 device;
u8 command;
u32 is_ext;
u32 proto;
u32 in_out;
u32 size;
u64 buffer;
u32 arglen;
};
enum lv1_ata_proto {
NON_DATA_PROTO = 0,
PIO_DATA_IN_PROTO = 1,
PIO_DATA_OUT_PROTO = 2,
DMA_PROTO = 3
};
enum lv1_ata_in_out {
DIR_WRITE = 0, /* memory -> device */
DIR_READ = 1 /* device -> memory */
};
static int ps3disk_major;
static const struct block_device_operations ps3disk_fops = {
.owner = THIS_MODULE,
};
static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct request *req, int gather)
{
unsigned int offset = 0;
struct req_iterator iter;
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
if (gather)
memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
else
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
}
}
static blk_status_t ps3disk_submit_request_sg(struct ps3_storage_device *dev,
struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
int write = rq_data_dir(req), res;
const char *op = write ? "write" : "read";
u64 start_sector, sectors;
unsigned int region_id = dev->regions[dev->region_idx].id;
#ifdef DEBUG
unsigned int n = 0;
struct bio_vec bv;
struct req_iterator iter;
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bvecs for %u sectors\n",
__func__, __LINE__, op, n, blk_rq_sectors(req));
#endif
start_sector = blk_rq_pos(req) * priv->blocking_factor;
sectors = blk_rq_sectors(req) * priv->blocking_factor;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
if (write) {
ps3disk_scatter_gather(dev, req, 1);
res = lv1_storage_write(dev->sbd.dev_id, region_id,
start_sector, sectors, 0,
dev->bounce_lpar, &dev->tag);
} else {
res = lv1_storage_read(dev->sbd.dev_id, region_id,
start_sector, sectors, 0,
dev->bounce_lpar, &dev->tag);
}
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
return BLK_STS_IOERR;
}
priv->req = req;
return BLK_STS_OK;
}
static blk_status_t ps3disk_submit_flush_request(struct ps3_storage_device *dev,
struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
res = lv1_storage_send_device_command(dev->sbd.dev_id,
LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
0, &dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
return BLK_STS_IOERR;
}
priv->req = req;
return BLK_STS_OK;
}
static blk_status_t ps3disk_do_request(struct ps3_storage_device *dev,
struct request *req)
{
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
switch (req_op(req)) {
case REQ_OP_FLUSH:
return ps3disk_submit_flush_request(dev, req);
case REQ_OP_READ:
case REQ_OP_WRITE:
return ps3disk_submit_request_sg(dev, req);
default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
return BLK_STS_IOERR;
}
}
static blk_status_t ps3disk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q = hctx->queue;
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
blk_status_t ret;
blk_mq_start_request(bd->rq);
spin_lock_irq(&priv->lock);
ret = ps3disk_do_request(dev, bd->rq);
spin_unlock_irq(&priv->lock);
return ret;
}
static irqreturn_t ps3disk_interrupt(int irq, void *data)
{
struct ps3_storage_device *dev = data;
struct ps3disk_private *priv;
struct request *req;
int res, read;
blk_status_t error;
u64 tag, status;
const char *op;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
if (tag != dev->tag)
dev_err(&dev->sbd.core,
"%s:%u: tag mismatch, got %llx, expected %llx\n",
__func__, __LINE__, tag, dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
__func__, __LINE__, res, status);
return IRQ_HANDLED;
}
priv = ps3_system_bus_get_drvdata(&dev->sbd);
req = priv->req;
if (!req) {
dev_dbg(&dev->sbd.core,
"%s:%u non-block layer request completed\n", __func__,
__LINE__);
dev->lv1_status = status;
complete(&dev->done);
return IRQ_HANDLED;
}
if (req_op(req) == REQ_OP_FLUSH) {
read = 0;
op = "flush";
} else {
read = !rq_data_dir(req);
op = read ? "read" : "write";
}
if (status) {
dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
__LINE__, op, status);
error = BLK_STS_IOERR;
} else {
dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
__LINE__, op);
error = 0;
if (read)
ps3disk_scatter_gather(dev, req, 0);
}
spin_lock(&priv->lock);
priv->req = NULL;
blk_mq_end_request(req, error);
spin_unlock(&priv->lock);
blk_mq_run_hw_queues(priv->gendisk->queue, true);
return IRQ_HANDLED;
}
static int ps3disk_sync_cache(struct ps3_storage_device *dev)
{
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: sync cache\n", __func__, __LINE__);
res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
return -EIO;
}
return 0;
}
/* ATA helpers copied from drivers/ata/libata-core.c */
static void swap_buf_le16(u16 *buf, unsigned int buf_words)
{
#ifdef __BIG_ENDIAN
unsigned int i;
for (i = 0; i < buf_words; i++)
buf[i] = le16_to_cpu(buf[i]);
#endif /* __BIG_ENDIAN */
}
static u64 ata_id_n_sectors(const u16 *id)
{
if (ata_id_has_lba(id)) {
if (ata_id_has_lba48(id))
return ata_id_u64(id, 100);
else
return ata_id_u32(id, 60);
} else {
if (ata_id_current_chs_valid(id))
return ata_id_u32(id, 57);
else
return id[1] * id[3] * id[6];
}
}
static void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs,
unsigned int len)
{
unsigned int c;
while (len > 0) {
c = id[ofs] >> 8;
*s = c;
s++;
c = id[ofs] & 0xff;
*s = c;
s++;
ofs++;
len -= 2;
}
}
static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs,
unsigned int len)
{
unsigned char *p;
WARN_ON(!(len & 1));
ata_id_string(id, s, ofs, len - 1);
p = s + strnlen(s, len - 1);
while (p > s && p[-1] == ' ')
p--;
*p = '\0';
}
static int ps3disk_identify(struct ps3_storage_device *dev)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
struct lv1_ata_cmnd_block ata_cmnd;
u16 *id = dev->bounce_buf;
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: identify disk\n", __func__, __LINE__);
memset(&ata_cmnd, 0, sizeof(struct lv1_ata_cmnd_block));
ata_cmnd.command = ATA_CMD_ID_ATA;
ata_cmnd.sector_count = 1;
ata_cmnd.size = ata_cmnd.arglen = ATA_ID_WORDS * 2;
ata_cmnd.buffer = dev->bounce_lpar;
ata_cmnd.proto = PIO_DATA_IN_PROTO;
ata_cmnd.in_out = DIR_READ;
res = ps3stor_send_command(dev, LV1_STORAGE_SEND_ATA_COMMAND,
ps3_mm_phys_to_lpar(__pa(&ata_cmnd)),
sizeof(ata_cmnd), ata_cmnd.buffer,
ata_cmnd.arglen);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%llx\n",
__func__, __LINE__, res);
return -EIO;
}
swap_buf_le16(id, ATA_ID_WORDS);
/* All we're interested in are raw capacity and model name */
priv->raw_capacity = ata_id_n_sectors(id);
ata_id_c_string(id, priv->model, ATA_ID_PROD, sizeof(priv->model));
return 0;
}
static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
static const struct blk_mq_ops ps3disk_mq_ops = {
.queue_rq = ps3disk_queue_rq,
};
static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv;
int error;
unsigned int devidx;
struct request_queue *queue;
struct gendisk *gendisk;
if (dev->blk_size < 512) {
dev_err(&dev->sbd.core,
"%s:%u: cannot handle block size %llu\n", __func__,
__LINE__, dev->blk_size);
return -EINVAL;
}
BUILD_BUG_ON(PS3DISK_MAX_DISKS > BITS_PER_LONG);
mutex_lock(&ps3disk_mask_mutex);
devidx = find_first_zero_bit(&ps3disk_mask, PS3DISK_MAX_DISKS);
if (devidx >= PS3DISK_MAX_DISKS) {
dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__,
__LINE__);
mutex_unlock(&ps3disk_mask_mutex);
return -ENOSPC;
}
__set_bit(devidx, &ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
error = -ENOMEM;
goto fail;
}
ps3_system_bus_set_drvdata(_dev, priv);
spin_lock_init(&priv->lock);
dev->bounce_size = BOUNCE_SIZE;
dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
if (!dev->bounce_buf) {
error = -ENOMEM;
goto fail_free_priv;
}
error = ps3stor_setup(dev, ps3disk_interrupt);
if (error)
goto fail_free_bounce;
ps3disk_identify(dev);
error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE);
if (error)
goto fail_teardown;
gendisk = blk_mq_alloc_disk(&priv->tag_set, dev);
if (IS_ERR(gendisk)) {
dev_err(&dev->sbd.core, "%s:%u: blk_mq_alloc_disk failed\n",
__func__, __LINE__);
error = PTR_ERR(gendisk);
goto fail_free_tag_set;
}
queue = gendisk->queue;
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_write_cache(queue, true, false);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
gendisk->minors = PS3DISK_MINORS;
gendisk->fops = &ps3disk_fops;
gendisk->private_data = dev;
snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
devidx+'a');
priv->blocking_factor = dev->blk_size >> 9;
set_capacity(gendisk,
dev->regions[dev->region_idx].size*priv->blocking_factor);
dev_info(&dev->sbd.core,
"%s is a %s (%llu MiB total, %llu MiB for OtherOS)\n",
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
error = device_add_disk(&dev->sbd.core, gendisk, NULL);
if (error)
goto fail_cleanup_disk;
return 0;
fail_cleanup_disk:
put_disk(gendisk);
fail_free_tag_set:
blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
ps3stor_teardown(dev);
fail_free_bounce:
kfree(dev->bounce_buf);
fail_free_priv:
kfree(priv);
ps3_system_bus_set_drvdata(_dev, NULL);
fail:
mutex_lock(&ps3disk_mask_mutex);
__clear_bit(devidx, &ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
return error;
}
static void ps3disk_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
mutex_lock(&ps3disk_mask_mutex);
__clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
put_disk(priv->gendisk);
blk_mq_free_tag_set(&priv->tag_set);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
ps3stor_teardown(dev);
kfree(dev->bounce_buf);
kfree(priv);
ps3_system_bus_set_drvdata(_dev, NULL);
}
static struct ps3_system_bus_driver ps3disk = {
.match_id = PS3_MATCH_ID_STOR_DISK,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3disk_probe,
.remove = ps3disk_remove,
.shutdown = ps3disk_remove,
};
static int __init ps3disk_init(void)
{
int error;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
error = register_blkdev(0, DEVICE_NAME);
if (error <= 0) {
printk(KERN_ERR "%s:%u: register_blkdev failed %d\n", __func__,
__LINE__, error);
return error;
}
ps3disk_major = error;
pr_info("%s:%u: registered block device major %d\n", __func__,
__LINE__, ps3disk_major);
error = ps3_system_bus_driver_register(&ps3disk);
if (error)
unregister_blkdev(ps3disk_major, DEVICE_NAME);
return error;
}
static void __exit ps3disk_exit(void)
{
ps3_system_bus_driver_unregister(&ps3disk);
unregister_blkdev(ps3disk_major, DEVICE_NAME);
}
module_init(ps3disk_init);
module_exit(ps3disk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 Disk Storage Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK);
| linux-master | drivers/block/ps3disk.c |
/*
** z2ram - Amiga pseudo-driver to access 16bit-RAM in ZorroII space
** as a block device, to be used as a RAM disk or swap space
**
** Copyright (C) 1994 by Ingo Wilken ([email protected])
**
** ++Geert: support for zorro_unused_z2ram, better range checking
** ++roman: translate accesses via an array
** ++Milan: support for ChipRAM usage
** ++yambo: converted to 2.0 kernel
** ++yambo: modularized and support added for 3 minor devices including:
** MAJOR MINOR DESCRIPTION
** ----- ----- ----------------------------------------------
** 37 0 Use Zorro II and Chip ram
** 37 1 Use only Zorro II ram
** 37 2 Use only Chip ram
** 37 4-7 Use memory list entry 1-4 (first is 0)
** ++jskov: support for 1-4th memory list entry.
**
** Permission to use, copy, modify, and distribute this software and its
** documentation for any purpose and without fee is hereby granted, provided
** that the above copyright notice appear in all copies and that both that
** copyright notice and this permission notice appear in supporting
** documentation. This software is provided "as is" without express or
** implied warranty.
*/
#define DEVICE_NAME "Z2RAM"
#include <linux/major.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/blk-mq.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
#include <linux/zorro.h>
#define Z2MINOR_COMBINED (0)
#define Z2MINOR_Z2ONLY (1)
#define Z2MINOR_CHIPONLY (2)
#define Z2MINOR_MEMLIST1 (4)
#define Z2MINOR_MEMLIST2 (5)
#define Z2MINOR_MEMLIST3 (6)
#define Z2MINOR_MEMLIST4 (7)
#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */
#define Z2RAM_CHUNK1024 ( Z2RAM_CHUNKSIZE >> 10 )
static DEFINE_MUTEX(z2ram_mutex);
static u_long *z2ram_map = NULL;
static u_long z2ram_size = 0;
static int z2_count = 0;
static int chip_count = 0;
static int list_count = 0;
static int current_device = -1;
static DEFINE_SPINLOCK(z2ram_lock);
static struct gendisk *z2ram_gendisk[Z2MINOR_COUNT];
static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
blk_mq_start_request(req);
if (start + len > z2ram_size) {
pr_err(DEVICE_NAME ": bad access: block=%llu, "
"count=%u\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_cur_sectors(req));
return BLK_STS_IOERR;
}
spin_lock_irq(&z2ram_lock);
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
unsigned long size = Z2RAM_CHUNKSIZE - addr;
void *buffer = bio_data(req->bio);
if (len < size)
size = len;
addr += z2ram_map[start >> Z2RAM_CHUNKSHIFT];
if (rq_data_dir(req) == READ)
memcpy(buffer, (char *)addr, size);
else
memcpy((char *)addr, buffer, size);
start += size;
len -= size;
}
spin_unlock_irq(&z2ram_lock);
blk_mq_end_request(req, BLK_STS_OK);
return BLK_STS_OK;
}
static void get_z2ram(void)
{
int i;
for (i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++) {
if (test_bit(i, zorro_unused_z2ram)) {
z2_count++;
z2ram_map[z2ram_size++] =
(unsigned long)ZTWO_VADDR(Z2RAM_START) +
(i << Z2RAM_CHUNKSHIFT);
clear_bit(i, zorro_unused_z2ram);
}
}
return;
}
static void get_chipram(void)
{
while (amiga_chip_avail() > (Z2RAM_CHUNKSIZE * 4)) {
chip_count++;
z2ram_map[z2ram_size] =
(u_long) amiga_chip_alloc(Z2RAM_CHUNKSIZE, "z2ram");
if (z2ram_map[z2ram_size] == 0) {
break;
}
z2ram_size++;
}
return;
}
static int z2_open(struct gendisk *disk, blk_mode_t mode)
{
int device = disk->first_minor;
int max_z2_map = (Z2RAM_SIZE / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]);
int max_chip_map = (amiga_chip_size / Z2RAM_CHUNKSIZE) *
sizeof(z2ram_map[0]);
int rc = -ENOMEM;
mutex_lock(&z2ram_mutex);
if (current_device != -1 && current_device != device) {
rc = -EBUSY;
goto err_out;
}
if (current_device == -1) {
z2_count = 0;
chip_count = 0;
list_count = 0;
z2ram_size = 0;
/* Use a specific list entry. */
if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
int index = device - Z2MINOR_MEMLIST1 + 1;
unsigned long size, paddr, vaddr;
if (index >= m68k_realnum_memory) {
printk(KERN_ERR DEVICE_NAME
": no such entry in z2ram_map\n");
goto err_out;
}
paddr = m68k_memory[index].addr;
size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE - 1);
#ifdef __powerpc__
/* FIXME: ioremap doesn't build correct memory tables. */
{
vfree(vmalloc(size));
}
vaddr = (unsigned long)ioremap_wt(paddr, size);
#else
vaddr =
(unsigned long)z_remap_nocache_nonser(paddr, size);
#endif
z2ram_map =
kmalloc_array(size / Z2RAM_CHUNKSIZE,
sizeof(z2ram_map[0]), GFP_KERNEL);
if (z2ram_map == NULL) {
printk(KERN_ERR DEVICE_NAME
": cannot get mem for z2ram_map\n");
goto err_out;
}
while (size) {
z2ram_map[z2ram_size++] = vaddr;
size -= Z2RAM_CHUNKSIZE;
vaddr += Z2RAM_CHUNKSIZE;
list_count++;
}
if (z2ram_size != 0)
printk(KERN_INFO DEVICE_NAME
": using %iK List Entry %d Memory\n",
list_count * Z2RAM_CHUNK1024, index);
} else
switch (device) {
case Z2MINOR_COMBINED:
z2ram_map =
kmalloc(max_z2_map + max_chip_map,
GFP_KERNEL);
if (z2ram_map == NULL) {
printk(KERN_ERR DEVICE_NAME
": cannot get mem for z2ram_map\n");
goto err_out;
}
get_z2ram();
get_chipram();
if (z2ram_size != 0)
printk(KERN_INFO DEVICE_NAME
": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
z2_count * Z2RAM_CHUNK1024,
chip_count * Z2RAM_CHUNK1024,
(z2_count +
chip_count) * Z2RAM_CHUNK1024);
break;
case Z2MINOR_Z2ONLY:
z2ram_map = kmalloc(max_z2_map, GFP_KERNEL);
if (!z2ram_map)
goto err_out;
get_z2ram();
if (z2ram_size != 0)
printk(KERN_INFO DEVICE_NAME
": using %iK of Zorro II RAM\n",
z2_count * Z2RAM_CHUNK1024);
break;
case Z2MINOR_CHIPONLY:
z2ram_map = kmalloc(max_chip_map, GFP_KERNEL);
if (!z2ram_map)
goto err_out;
get_chipram();
if (z2ram_size != 0)
printk(KERN_INFO DEVICE_NAME
": using %iK Chip RAM\n",
chip_count * Z2RAM_CHUNK1024);
break;
default:
rc = -ENODEV;
goto err_out;
break;
}
if (z2ram_size == 0) {
printk(KERN_NOTICE DEVICE_NAME
": no unused ZII/Chip RAM found\n");
goto err_out_kfree;
}
current_device = device;
z2ram_size <<= Z2RAM_CHUNKSHIFT;
set_capacity(z2ram_gendisk[device], z2ram_size >> 9);
}
mutex_unlock(&z2ram_mutex);
return 0;
err_out_kfree:
kfree(z2ram_map);
err_out:
mutex_unlock(&z2ram_mutex);
return rc;
}
static void z2_release(struct gendisk *disk)
{
mutex_lock(&z2ram_mutex);
if (current_device == -1) {
mutex_unlock(&z2ram_mutex);
return;
}
mutex_unlock(&z2ram_mutex);
/*
* FIXME: unmap memory
*/
}
static const struct block_device_operations z2_fops = {
.owner = THIS_MODULE,
.open = z2_open,
.release = z2_release,
};
static struct blk_mq_tag_set tag_set;
static const struct blk_mq_ops z2_mq_ops = {
.queue_rq = z2_queue_rq,
};
static int z2ram_register_disk(int minor)
{
struct gendisk *disk;
int err;
disk = blk_mq_alloc_disk(&tag_set, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
disk->major = Z2RAM_MAJOR;
disk->first_minor = minor;
disk->minors = 1;
disk->flags |= GENHD_FL_NO_PART;
disk->fops = &z2_fops;
if (minor)
sprintf(disk->disk_name, "z2ram%d", minor);
else
sprintf(disk->disk_name, "z2ram");
z2ram_gendisk[minor] = disk;
err = add_disk(disk);
if (err)
put_disk(disk);
return err;
}
static int __init z2_init(void)
{
int ret, i;
if (!MACH_IS_AMIGA)
return -ENODEV;
if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
return -EBUSY;
tag_set.ops = &z2_mq_ops;
tag_set.nr_hw_queues = 1;
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
for (i = 0; i < Z2MINOR_COUNT; i++) {
ret = z2ram_register_disk(i);
if (ret && i == 0)
goto out_free_tagset;
}
return 0;
out_free_tagset:
blk_mq_free_tag_set(&tag_set);
out_unregister_blkdev:
unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
return ret;
}
static void __exit z2_exit(void)
{
int i, j;
unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
for (i = 0; i < Z2MINOR_COUNT; i++) {
del_gendisk(z2ram_gendisk[i]);
put_disk(z2ram_gendisk[i]);
}
blk_mq_free_tag_set(&tag_set);
if (current_device != -1) {
i = 0;
for (j = 0; j < z2_count; j++) {
set_bit(i++, zorro_unused_z2ram);
}
for (j = 0; j < chip_count; j++) {
if (z2ram_map[i]) {
amiga_chip_free((void *)z2ram_map[i++]);
}
}
if (z2ram_map != NULL) {
kfree(z2ram_map);
}
}
return;
}
module_init(z2_init);
module_exit(z2_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/block/z2ram.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/block/ataflop.c
*
* Copyright (C) 1993 Greg Harp
* Atari Support by Bjoern Brauel, Roman Hodek
*
* Big cleanup Sep 11..14 1994 Roman Hodek:
* - Driver now works interrupt driven
* - Support for two drives; should work, but I cannot test that :-(
* - Reading is done in whole tracks and buffered to speed up things
* - Disk change detection and drive deselecting after motor-off
* similar to TOS
* - Autodetection of disk format (DD/HD); untested yet, because I
* don't have an HD drive :-(
*
* Fixes Nov 13 1994 Martin Schaller:
* - Autodetection works now
* - Support for 5 1/4'' disks
* - Removed drive type (unknown on atari)
* - Do seeks with 8 Mhz
*
* Changes by Andreas Schwab:
* - After errors in multiple read mode try again reading single sectors
* (Feb 1995):
* - Clean up error handling
* - Set blk_size for proper size checking
* - Initialize track register when testing presence of floppy
* - Implement some ioctl's
*
* Changes by Torsten Lang:
* - When probing the floppies we should add the FDCCMDADD_H flag since
* the FDC will otherwise wait forever when no disk is inserted...
*
* ++ Freddi Aschwanden (fa) 20.9.95 fixes for medusa:
* - MFPDELAY() after each FDC access -> atari
* - more/other disk formats
* - DMA to the block buffer directly if we have a 32bit DMA
* - for medusa, the step rate is always 3ms
* - on medusa, use only cache_push()
* Roman:
* - Make disk format numbering independent from minors
* - Let user set max. supported drive type (speeds up format
* detection, saves buffer space)
*
* Roman 10/15/95:
* - implement some more ioctls
* - disk formatting
*
* Andreas 95/12/12:
* - increase gap size at start of track for HD/ED disks
*
* Michael (MSch) 11/07/96:
* - implemented FDSETPRM and FDDEFPRM ioctl
*
* Andreas (97/03/19):
* - implemented missing BLK* ioctls
*
* Things left to do:
* - Formatting
* - Maybe a better strategy for disk change detection (does anyone
* know one?)
*/
#include <linux/module.h>
#include <linux/fd.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blk-mq.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
#include <asm/atari_stram.h>
#define FD_MAX_UNITS 2
#undef DEBUG
static DEFINE_MUTEX(ataflop_mutex);
static struct request *fd_request;
/*
* WD1772 stuff
*/
/* register codes */
#define FDCSELREG_STP (0x80) /* command/status register */
#define FDCSELREG_TRA (0x82) /* track register */
#define FDCSELREG_SEC (0x84) /* sector register */
#define FDCSELREG_DTA (0x86) /* data register */
/* register names for FDC_READ/WRITE macros */
#define FDCREG_CMD 0
#define FDCREG_STATUS 0
#define FDCREG_TRACK 2
#define FDCREG_SECTOR 4
#define FDCREG_DATA 6
/* command opcodes */
#define FDCCMD_RESTORE (0x00) /* - */
#define FDCCMD_SEEK (0x10) /* | */
#define FDCCMD_STEP (0x20) /* | TYP 1 Commands */
#define FDCCMD_STIN (0x40) /* | */
#define FDCCMD_STOT (0x60) /* - */
#define FDCCMD_RDSEC (0x80) /* - TYP 2 Commands */
#define FDCCMD_WRSEC (0xa0) /* - " */
#define FDCCMD_RDADR (0xc0) /* - */
#define FDCCMD_RDTRA (0xe0) /* | TYP 3 Commands */
#define FDCCMD_WRTRA (0xf0) /* - */
#define FDCCMD_FORCI (0xd0) /* - TYP 4 Command */
/* command modifier bits */
#define FDCCMDADD_SR6 (0x00) /* step rate settings */
#define FDCCMDADD_SR12 (0x01)
#define FDCCMDADD_SR2 (0x02)
#define FDCCMDADD_SR3 (0x03)
#define FDCCMDADD_V (0x04) /* verify */
#define FDCCMDADD_H (0x08) /* wait for spin-up */
#define FDCCMDADD_U (0x10) /* update track register */
#define FDCCMDADD_M (0x10) /* multiple sector access */
#define FDCCMDADD_E (0x04) /* head settling flag */
#define FDCCMDADD_P (0x02) /* precompensation off */
#define FDCCMDADD_A0 (0x01) /* DAM flag */
/* status register bits */
#define FDCSTAT_MOTORON (0x80) /* motor on */
#define FDCSTAT_WPROT (0x40) /* write protected (FDCCMD_WR*) */
#define FDCSTAT_SPINUP (0x20) /* motor speed stable (Type I) */
#define FDCSTAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
#define FDCSTAT_RECNF (0x10) /* record not found */
#define FDCSTAT_CRC (0x08) /* CRC error */
#define FDCSTAT_TR00 (0x04) /* Track 00 flag (Type I) */
#define FDCSTAT_LOST (0x04) /* Lost Data (Type II+III) */
#define FDCSTAT_IDX (0x02) /* Index status (Type I) */
#define FDCSTAT_DRQ (0x02) /* DRQ status (Type II+III) */
#define FDCSTAT_BUSY (0x01) /* FDC is busy */
/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
#define DSKSIDE (0x01)
#define DSKDRVNONE (0x06)
#define DSKDRV0 (0x02)
#define DSKDRV1 (0x04)
/* step rates */
#define FDCSTEP_6 0x00
#define FDCSTEP_12 0x01
#define FDCSTEP_2 0x02
#define FDCSTEP_3 0x03
struct atari_format_descr {
int track; /* to be formatted */
int head; /* "" "" */
int sect_offset; /* offset of first sector */
};
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
const char *name;
unsigned spt; /* sectors per track */
unsigned blocks; /* total number of blocks */
unsigned fdc_speed; /* fdc_speed setting */
unsigned stretch; /* track doubling ? */
} atari_disk_type[] = {
{ "d360", 9, 720, 0, 0}, /* 0: 360kB diskette */
{ "D360", 9, 720, 0, 1}, /* 1: 360kb in 720k or 1.2MB drive */
{ "D720", 9,1440, 0, 0}, /* 2: 720kb in 720k or 1.2MB drive */
{ "D820", 10,1640, 0, 0}, /* 3: DD disk with 82 tracks/10 sectors */
/* formats above are probed for type DD */
#define MAX_TYPE_DD 3
{ "h1200",15,2400, 3, 0}, /* 4: 1.2MB diskette */
{ "H1440",18,2880, 3, 0}, /* 5: 1.4 MB diskette (HD) */
{ "H1640",20,3280, 3, 0}, /* 6: 1.64MB diskette (fat HD) 82 tr 20 sec */
/* formats above are probed for types DD and HD */
#define MAX_TYPE_HD 6
{ "E2880",36,5760, 3, 0}, /* 7: 2.8 MB diskette (ED) */
{ "E3280",40,6560, 3, 0}, /* 8: 3.2 MB diskette (fat ED) 82 tr 40 sec */
/* formats above are probed for types DD, HD and ED */
#define MAX_TYPE_ED 8
/* types below are never autoprobed */
{ "H1680",21,3360, 3, 0}, /* 9: 1.68MB diskette (fat HD) 80 tr 21 sec */
{ "h410",10,820, 0, 1}, /* 10: 410k diskette 41 tr 10 sec, stretch */
{ "h1476",18,2952, 3, 0}, /* 11: 1.48MB diskette 82 tr 18 sec */
{ "H1722",21,3444, 3, 0}, /* 12: 1.72MB diskette 82 tr 21 sec */
{ "h420",10,840, 0, 1}, /* 13: 420k diskette 42 tr 10 sec, stretch */
{ "H830",10,1660, 0, 0}, /* 14: 820k diskette 83 tr 10 sec */
{ "h1494",18,2952, 3, 0}, /* 15: 1.49MB diskette 83 tr 18 sec */
{ "H1743",21,3486, 3, 0}, /* 16: 1.74MB diskette 83 tr 21 sec */
{ "h880",11,1760, 0, 0}, /* 17: 880k diskette 80 tr 11 sec */
{ "D1040",13,2080, 0, 0}, /* 18: 1.04MB diskette 80 tr 13 sec */
{ "D1120",14,2240, 0, 0}, /* 19: 1.12MB diskette 80 tr 14 sec */
{ "h1600",20,3200, 3, 0}, /* 20: 1.60MB diskette 80 tr 20 sec */
{ "H1760",22,3520, 3, 0}, /* 21: 1.76MB diskette 80 tr 22 sec */
{ "H1920",24,3840, 3, 0}, /* 22: 1.92MB diskette 80 tr 24 sec */
{ "E3200",40,6400, 3, 0}, /* 23: 3.2MB diskette 80 tr 40 sec */
{ "E3520",44,7040, 3, 0}, /* 24: 3.52MB diskette 80 tr 44 sec */
{ "E3840",48,7680, 3, 0}, /* 25: 3.84MB diskette 80 tr 48 sec */
{ "H1840",23,3680, 3, 0}, /* 26: 1.84MB diskette 80 tr 23 sec */
{ "D800",10,1600, 0, 0}, /* 27: 800k diskette 80 tr 10 sec */
};
static int StartDiskType[] = {
MAX_TYPE_DD,
MAX_TYPE_HD,
MAX_TYPE_ED
};
#define TYPE_DD 0
#define TYPE_HD 1
#define TYPE_ED 2
static int DriveType = TYPE_HD;
static DEFINE_SPINLOCK(ataflop_lock);
/* Array for translating minors into disk formats */
static struct {
int index;
unsigned drive_types;
} minor2disktype[] = {
{ 0, TYPE_DD }, /* 1: d360 */
{ 4, TYPE_HD }, /* 2: h1200 */
{ 1, TYPE_DD }, /* 3: D360 */
{ 2, TYPE_DD }, /* 4: D720 */
{ 1, TYPE_DD }, /* 5: h360 = D360 */
{ 2, TYPE_DD }, /* 6: h720 = D720 */
{ 5, TYPE_HD }, /* 7: H1440 */
{ 7, TYPE_ED }, /* 8: E2880 */
/* some PC formats :-) */
{ 8, TYPE_ED }, /* 9: E3280 <- was "CompaQ" == E2880 for PC */
{ 5, TYPE_HD }, /* 10: h1440 = H1440 */
{ 9, TYPE_HD }, /* 11: H1680 */
{ 10, TYPE_DD }, /* 12: h410 */
{ 3, TYPE_DD }, /* 13: H820 <- == D820, 82x10 */
{ 11, TYPE_HD }, /* 14: h1476 */
{ 12, TYPE_HD }, /* 15: H1722 */
{ 13, TYPE_DD }, /* 16: h420 */
{ 14, TYPE_DD }, /* 17: H830 */
{ 15, TYPE_HD }, /* 18: h1494 */
{ 16, TYPE_HD }, /* 19: H1743 */
{ 17, TYPE_DD }, /* 20: h880 */
{ 18, TYPE_DD }, /* 21: D1040 */
{ 19, TYPE_DD }, /* 22: D1120 */
{ 20, TYPE_HD }, /* 23: h1600 */
{ 21, TYPE_HD }, /* 24: H1760 */
{ 22, TYPE_HD }, /* 25: H1920 */
{ 23, TYPE_ED }, /* 26: E3200 */
{ 24, TYPE_ED }, /* 27: E3520 */
{ 25, TYPE_ED }, /* 28: E3840 */
{ 26, TYPE_HD }, /* 29: H1840 */
{ 27, TYPE_DD }, /* 30: D800 */
{ 6, TYPE_HD }, /* 31: H1640 <- was H1600 == h1600 for PC */
};
#define NUM_DISK_MINORS ARRAY_SIZE(minor2disktype)
/*
* Maximum disk size (in kilobytes). This default is used whenever the
* current disk size is unknown.
*/
#define MAX_DISK_SIZE 3280
/*
* MSch: User-provided type information. 'drive' points to
* the respective entry of this array. Set by FDSETPRM ioctls.
*/
static struct atari_disk_type user_params[FD_MAX_UNITS];
/*
* User-provided permanent type information. 'drive' points to
* the respective entry of this array. Set by FDDEFPRM ioctls,
* restored upon disk change by floppy_revalidate() if valid (as seen by
* default_params[].blocks > 0 - a bit in unit[].flags might be used for this?)
*/
static struct atari_disk_type default_params[FD_MAX_UNITS];
/* current info on each unit */
static struct atari_floppy_struct {
int connected; /* !=0 : drive is connected */
int autoprobe; /* !=0 : do autoprobe */
struct atari_disk_type *disktype; /* current type of disk */
int track; /* current head position or -1 if
unknown */
unsigned int steprate; /* steprate setting */
unsigned int wpstat; /* current state of WP signal (for
disk change detection) */
int flags; /* flags */
struct gendisk *disk[NUM_DISK_MINORS];
bool registered[NUM_DISK_MINORS];
int ref;
int type;
struct blk_mq_tag_set tag_set;
int error_count;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
#define UDT unit[drive].disktype
#define SUD unit[SelectedDrive]
#define SUDT unit[SelectedDrive].disktype
#define FDC_READ(reg) ({ \
/* unsigned long __flags; */ \
unsigned short __val; \
/* local_irq_save(__flags); */ \
dma_wd.dma_mode_status = 0x80 | (reg); \
udelay(25); \
__val = dma_wd.fdc_acces_seccount; \
MFPDELAY(); \
/* local_irq_restore(__flags); */ \
__val & 0xff; \
})
#define FDC_WRITE(reg,val) \
do { \
/* unsigned long __flags; */ \
/* local_irq_save(__flags); */ \
dma_wd.dma_mode_status = 0x80 | (reg); \
udelay(25); \
dma_wd.fdc_acces_seccount = (val); \
MFPDELAY(); \
/* local_irq_restore(__flags); */ \
} while(0)
/* Buffering variables:
* First, there is a DMA buffer in ST-RAM that is used for floppy DMA
* operations. Second, a track buffer is used to cache a whole track
* of the disk to save read operations. These are two separate buffers
* because that allows write operations without clearing the track buffer.
*/
static int MaxSectors[] = {
11, 22, 44
};
static int BufferSize[] = {
15*512, 30*512, 60*512
};
#define BUFFER_SIZE (BufferSize[DriveType])
unsigned char *DMABuffer; /* buffer for writes */
static unsigned long PhysDMABuffer; /* physical address */
static int UseTrackbuffer = -1; /* Do track buffering? */
module_param(UseTrackbuffer, int, 0);
unsigned char *TrackBuffer; /* buffer for reads */
static unsigned long PhysTrackBuffer; /* physical address */
static int BufferDrive, BufferSide, BufferTrack;
static int read_track; /* non-zero if we are reading whole tracks */
#define SECTOR_BUFFER(sec) (TrackBuffer + ((sec)-1)*512)
#define IS_BUFFERED(drive,side,track) \
(BufferDrive == (drive) && BufferSide == (side) && BufferTrack == (track))
/*
* These are global variables, as that's the easiest way to give
* information to interrupts. They are the data used for the current
* request.
*/
static int SelectedDrive = 0;
static int ReqCmd, ReqBlock;
static int ReqSide, ReqTrack, ReqSector, ReqCnt;
static int HeadSettleFlag = 0;
static unsigned char *ReqData, *ReqBuffer;
static int MotorOn = 0, MotorOffTrys;
static int IsFormatting = 0, FormatError;
static int UserSteprate[FD_MAX_UNITS] = { -1, -1 };
module_param_array(UserSteprate, int, NULL, 0);
static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
#define CHECK_CHANGE_DELAY HZ/2
#define FD_MOTOR_OFF_DELAY (3*HZ)
#define FD_MOTOR_OFF_MAXTRY (10*20)
#define FLOPPY_TIMEOUT (6*HZ)
#define RECALIBRATE_ERRORS 4 /* After this many errors the drive
* will be recalibrated. */
#define MAX_ERRORS 8 /* After this many errors the driver
* will give up. */
/*
* The driver is trying to determine the correct media format
* while Probing is set. fd_rwsec_done() clears it after a
* successful access.
*/
static int Probing = 0;
/* This flag is set when a dummy seek is necessary to make the WP
* status bit accessible.
*/
static int NeedSeek = 0;
#ifdef DEBUG
#define DPRINT(a) printk a
#else
#define DPRINT(a)
#endif
/***************************** Prototypes *****************************/
static void fd_select_side( int side );
static void fd_select_drive( int drive );
static void fd_deselect( void );
static void fd_motor_off_timer(struct timer_list *unused);
static void check_change(struct timer_list *unused);
static irqreturn_t floppy_irq (int irq, void *dummy);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
static void do_fd_action( int drive );
static void fd_calibrate( void );
static void fd_calibrate_done( int status );
static void fd_seek( void );
static void fd_seek_done( int status );
static void fd_rwsec( void );
static void fd_readtrack_check(struct timer_list *unused);
static void fd_rwsec_done( int status );
static void fd_rwsec_done1(int status);
static void fd_writetrack( void );
static void fd_writetrack_done( int status );
static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param);
static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
static void config_types( void );
static int floppy_open(struct gendisk *disk, blk_mode_t mode);
static void floppy_release(struct gendisk *disk);
/************************* End of Prototypes **************************/
static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer);
static DEFINE_TIMER(readtrack_timer, fd_readtrack_check);
static DEFINE_TIMER(timeout_timer, fd_times_out);
static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
DPRINT(("fd_end_request_cur(), bytes %d of %d\n",
blk_rq_cur_bytes(fd_request),
blk_rq_bytes(fd_request)));
if (!blk_update_request(fd_request, err,
blk_rq_cur_bytes(fd_request))) {
DPRINT(("calling __blk_mq_end_request()\n"));
__blk_mq_end_request(fd_request, err);
fd_request = NULL;
} else {
/* requeue rest of request */
DPRINT(("calling blk_mq_requeue_request()\n"));
blk_mq_requeue_request(fd_request, true);
fd_request = NULL;
}
}
static inline void start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
MotorOffTrys = 0;
}
static inline void start_check_change_timer( void )
{
mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
}
static inline void start_timeout(void)
{
mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT);
}
static inline void stop_timeout(void)
{
del_timer(&timeout_timer);
}
/* Select the side to use. */
static void fd_select_side( int side )
{
unsigned long flags;
/* protect against various other ints mucking around with the PSG */
local_irq_save(flags);
sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
sound_ym.wd_data = (side == 0) ? sound_ym.rd_data_reg_sel | 0x01 :
sound_ym.rd_data_reg_sel & 0xfe;
local_irq_restore(flags);
}
/* Select a drive, update the FDC's track register and set the correct
* clock speed for this disk's type.
*/
static void fd_select_drive( int drive )
{
unsigned long flags;
unsigned char tmp;
if (drive == SelectedDrive)
return;
/* protect against various other ints mucking around with the PSG */
local_irq_save(flags);
sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
tmp = sound_ym.rd_data_reg_sel;
sound_ym.wd_data = (tmp | DSKDRVNONE) & ~(drive == 0 ? DSKDRV0 : DSKDRV1);
atari_dont_touch_floppy_select = 1;
local_irq_restore(flags);
/* restore track register to saved value */
FDC_WRITE( FDCREG_TRACK, UD.track );
udelay(25);
/* select 8/16 MHz */
if (UDT)
if (ATARIHW_PRESENT(FDCSPEED))
dma_wd.fdc_speed = UDT->fdc_speed;
SelectedDrive = drive;
}
/* Deselect both drives. */
static void fd_deselect( void )
{
unsigned long flags;
/* protect against various other ints mucking around with the PSG */
local_irq_save(flags);
atari_dont_touch_floppy_select = 0;
sound_ym.rd_data_reg_sel=14; /* Select PSG Port A */
sound_ym.wd_data = (sound_ym.rd_data_reg_sel |
(MACH_IS_FALCON ? 3 : 7)); /* no drives selected */
/* On Falcon, the drive B select line is used on the printer port, so
* leave it alone... */
SelectedDrive = -1;
local_irq_restore(flags);
}
/* This timer function deselects the drives when the FDC switched the
* motor off. The deselection cannot happen earlier because the FDC
* counts the index signals, which arrive only if one drive is selected.
*/
static void fd_motor_off_timer(struct timer_list *unused)
{
unsigned char status;
if (SelectedDrive < 0)
/* no drive selected, needn't deselect anyone */
return;
if (stdma_islocked())
goto retry;
status = FDC_READ( FDCREG_STATUS );
if (!(status & 0x80)) {
/* motor already turned off by FDC -> deselect drives */
MotorOn = 0;
fd_deselect();
return;
}
/* not yet off, try again */
retry:
/* Test again later; if tested too often, it seems there is no disk
* in the drive and the FDC will leave the motor on forever (or,
* at least until a disk is inserted). So we'll test only twice
* per second from then on...
*/
mod_timer(&motor_off_timer,
jiffies + (MotorOffTrys++ < FD_MOTOR_OFF_MAXTRY ? HZ/20 : HZ/2));
}
/* This function is repeatedly called to detect disk changes (as good
* as possible) and keep track of the current state of the write protection.
*/
static void check_change(struct timer_list *unused)
{
static int drive = 0;
unsigned long flags;
unsigned char old_porta;
int stat;
if (++drive > 1 || !UD.connected)
drive = 0;
/* protect against various other ints mucking around with the PSG */
local_irq_save(flags);
if (!stdma_islocked()) {
sound_ym.rd_data_reg_sel = 14;
old_porta = sound_ym.rd_data_reg_sel;
sound_ym.wd_data = (old_porta | DSKDRVNONE) &
~(drive == 0 ? DSKDRV0 : DSKDRV1);
stat = !!(FDC_READ( FDCREG_STATUS ) & FDCSTAT_WPROT);
sound_ym.wd_data = old_porta;
if (stat != UD.wpstat) {
DPRINT(( "wpstat[%d] = %d\n", drive, stat ));
UD.wpstat = stat;
set_bit (drive, &changed_floppies);
}
}
local_irq_restore(flags);
start_check_change_timer();
}
/* Handling of the Head Settling Flag: This flag should be set after each
* seek operation, because we don't use seeks with verify.
*/
static inline void set_head_settle_flag(void)
{
HeadSettleFlag = FDCCMDADD_E;
}
static inline int get_head_settle_flag(void)
{
int tmp = HeadSettleFlag;
HeadSettleFlag = 0;
return( tmp );
}
static inline void copy_buffer(void *from, void *to)
{
ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
int cnt;
for (cnt = 512/4; cnt; cnt--)
*p2++ = *p1++;
}
/* General Interrupt Handling */
static void (*FloppyIRQHandler)( int status ) = NULL;
static irqreturn_t floppy_irq (int irq, void *dummy)
{
unsigned char status;
void (*handler)( int );
handler = xchg(&FloppyIRQHandler, NULL);
if (handler) {
nop();
status = FDC_READ( FDCREG_STATUS );
DPRINT(("FDC irq, status = %02x handler = %08lx\n",status,(unsigned long)handler));
handler( status );
}
else {
DPRINT(("FDC irq, no handler\n"));
}
return IRQ_HANDLED;
}
/* Error handling: If some error happened, retry some times, then
* recalibrate, then try again, and fail after MAX_ERRORS.
*/
static void fd_error( void )
{
if (IsFormatting) {
IsFormatting = 0;
FormatError = 1;
complete(&format_wait);
return;
}
if (!fd_request)
return;
unit[SelectedDrive].error_count++;
if (unit[SelectedDrive].error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
finish_fdc();
return;
}
else if (unit[SelectedDrive].error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
}
/* need to re-run request to recalibrate */
atari_disable_irq( IRQ_MFP_FDC );
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
atari_enable_irq( IRQ_MFP_FDC );
}
#define SET_IRQ_HANDLER(proc) do { FloppyIRQHandler = (proc); } while(0)
/* ---------- Formatting ---------- */
#define FILL(n,val) \
do { \
memset( p, val, n ); \
p += n; \
} while(0)
static int do_format(int drive, int type, struct atari_format_descr *desc)
{
struct request_queue *q;
unsigned char *p;
int sect, nsect;
unsigned long flags;
int ret;
if (type) {
type--;
if (type >= NUM_DISK_MINORS ||
minor2disktype[type].drive_types > DriveType) {
finish_fdc();
return -EINVAL;
}
}
q = unit[drive].disk[type]->queue;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
local_irq_save(flags);
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
local_irq_restore(flags);
if (type) {
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
finish_fdc();
ret = -EINVAL;
goto out;
}
nsect = UDT->spt;
p = TrackBuffer;
/* The track buffer is used for the raw track data, so its
contents become invalid! */
BufferDrive = -1;
/* stop deselect timer */
del_timer( &motor_off_timer );
FILL( 60 * (nsect / 9), 0x4e );
for( sect = 0; sect < nsect; ++sect ) {
FILL( 12, 0 );
FILL( 3, 0xf5 );
*p++ = 0xfe;
*p++ = desc->track;
*p++ = desc->head;
*p++ = (nsect + sect - desc->sect_offset) % nsect + 1;
*p++ = 2;
*p++ = 0xf7;
FILL( 22, 0x4e );
FILL( 12, 0 );
FILL( 3, 0xf5 );
*p++ = 0xfb;
FILL( 512, 0xe5 );
*p++ = 0xf7;
FILL( 40, 0x4e );
}
FILL( TrackBuffer+BUFFER_SIZE-p, 0x4e );
IsFormatting = 1;
FormatError = 0;
ReqTrack = desc->track;
ReqSide = desc->head;
do_fd_action( drive );
wait_for_completion(&format_wait);
finish_fdc();
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}
/* do_fd_action() is the general procedure for a fd request: All
* required parameter settings (drive select, side select, track
* position) are checked and set if needed. For each of these
* parameters and the actual reading or writing exist two functions:
* one that starts the setting (or skips it if possible) and one
* callback for the "done" interrupt. Each done func calls the next
* set function to propagate the request down to fd_rwsec_done().
*/
static void do_fd_action( int drive )
{
DPRINT(("do_fd_action\n"));
if (UseTrackbuffer && !IsFormatting) {
repeat:
if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
if (ReqCmd == READ) {
copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( drive );
goto repeat;
}
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
finish_fdc();
return;
}
}
else {
/* cmd == WRITE, pay attention to track buffer
* consistency! */
copy_buffer( ReqData, SECTOR_BUFFER(ReqSector) );
}
}
}
if (SelectedDrive != drive)
fd_select_drive( drive );
if (UD.track == -1)
fd_calibrate();
else if (UD.track != ReqTrack << UDT->stretch)
fd_seek();
else if (IsFormatting)
fd_writetrack();
else
fd_rwsec();
}
/* Seek to track 0 if the current track is unknown */
static void fd_calibrate( void )
{
if (SUD.track >= 0) {
fd_calibrate_done( 0 );
return;
}
if (ATARIHW_PRESENT(FDCSPEED))
dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */
DPRINT(("fd_calibrate\n"));
SET_IRQ_HANDLER( fd_calibrate_done );
/* we can't verify, since the speed may be incorrect */
FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | SUD.steprate );
NeedSeek = 1;
MotorOn = 1;
start_timeout();
/* wait for IRQ */
}
static void fd_calibrate_done( int status )
{
DPRINT(("fd_calibrate_done()\n"));
stop_timeout();
/* set the correct speed now */
if (ATARIHW_PRESENT(FDCSPEED))
dma_wd.fdc_speed = SUDT->fdc_speed;
if (status & FDCSTAT_RECNF) {
printk(KERN_ERR "fd%d: restore failed\n", SelectedDrive );
fd_error();
}
else {
SUD.track = 0;
fd_seek();
}
}
/* Seek the drive to the requested track. The drive must have been
* calibrated at some point before this.
*/
static void fd_seek( void )
{
if (SUD.track == ReqTrack << SUDT->stretch) {
fd_seek_done( 0 );
return;
}
if (ATARIHW_PRESENT(FDCSPEED)) {
dma_wd.fdc_speed = 0; /* always seek witch 8 Mhz */
MFPDELAY();
}
DPRINT(("fd_seek() to track %d\n",ReqTrack));
FDC_WRITE( FDCREG_DATA, ReqTrack << SUDT->stretch);
udelay(25);
SET_IRQ_HANDLER( fd_seek_done );
FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK | SUD.steprate );
MotorOn = 1;
set_head_settle_flag();
start_timeout();
/* wait for IRQ */
}
static void fd_seek_done( int status )
{
DPRINT(("fd_seek_done()\n"));
stop_timeout();
/* set the correct speed */
if (ATARIHW_PRESENT(FDCSPEED))
dma_wd.fdc_speed = SUDT->fdc_speed;
if (status & FDCSTAT_RECNF) {
printk(KERN_ERR "fd%d: seek error (to track %d)\n",
SelectedDrive, ReqTrack );
/* we don't know exactly which track we are on now! */
SUD.track = -1;
fd_error();
}
else {
SUD.track = ReqTrack << SUDT->stretch;
NeedSeek = 0;
if (IsFormatting)
fd_writetrack();
else
fd_rwsec();
}
}
/* This does the actual reading/writing after positioning the head
* over the correct track.
*/
static int MultReadInProgress = 0;
static void fd_rwsec( void )
{
unsigned long paddr, flags;
unsigned int rwflag, old_motoron;
unsigned int track;
DPRINT(("fd_rwsec(), Sec=%d, Access=%c\n",ReqSector, ReqCmd == WRITE ? 'w' : 'r' ));
if (ReqCmd == WRITE) {
if (ATARIHW_PRESENT(EXTD_DMA)) {
paddr = virt_to_phys(ReqData);
}
else {
copy_buffer( ReqData, DMABuffer );
paddr = PhysDMABuffer;
}
dma_cache_maintenance( paddr, 512, 1 );
rwflag = 0x100;
}
else {
if (read_track)
paddr = PhysTrackBuffer;
else
paddr = ATARIHW_PRESENT(EXTD_DMA) ?
virt_to_phys(ReqData) : PhysDMABuffer;
rwflag = 0;
}
fd_select_side( ReqSide );
/* Start sector of this operation */
FDC_WRITE( FDCREG_SECTOR, read_track ? 1 : ReqSector );
MFPDELAY();
/* Cheat for track if stretch != 0 */
if (SUDT->stretch) {
track = FDC_READ( FDCREG_TRACK);
MFPDELAY();
FDC_WRITE( FDCREG_TRACK, track >> SUDT->stretch);
}
udelay(25);
/* Setup DMA */
local_irq_save(flags);
dma_wd.dma_lo = (unsigned char)paddr;
MFPDELAY();
paddr >>= 8;
dma_wd.dma_md = (unsigned char)paddr;
MFPDELAY();
paddr >>= 8;
if (ATARIHW_PRESENT(EXTD_DMA))
st_dma_ext_dmahi = (unsigned short)paddr;
else
dma_wd.dma_hi = (unsigned char)paddr;
MFPDELAY();
local_irq_restore(flags);
/* Clear FIFO and switch DMA to correct mode */
dma_wd.dma_mode_status = 0x90 | rwflag;
MFPDELAY();
dma_wd.dma_mode_status = 0x90 | (rwflag ^ 0x100);
MFPDELAY();
dma_wd.dma_mode_status = 0x90 | rwflag;
MFPDELAY();
/* How many sectors for DMA */
dma_wd.fdc_acces_seccount = read_track ? SUDT->spt : 1;
udelay(25);
/* Start operation */
dma_wd.dma_mode_status = FDCSELREG_STP | rwflag;
udelay(25);
SET_IRQ_HANDLER( fd_rwsec_done );
dma_wd.fdc_acces_seccount =
(get_head_settle_flag() |
(rwflag ? FDCCMD_WRSEC : (FDCCMD_RDSEC | (read_track ? FDCCMDADD_M : 0))));
old_motoron = MotorOn;
MotorOn = 1;
NeedSeek = 1;
/* wait for interrupt */
if (read_track) {
/* If reading a whole track, wait about one disk rotation and
* then check if all sectors are read. The FDC will even
* search for the first non-existent sector and need 1 sec to
* recognise that it isn't present :-(
*/
MultReadInProgress = 1;
mod_timer(&readtrack_timer,
/* 1 rot. + 5 rot.s if motor was off */
jiffies + HZ/5 + (old_motoron ? 0 : HZ));
}
start_timeout();
}
static void fd_readtrack_check(struct timer_list *unused)
{
unsigned long flags, addr, addr2;
local_irq_save(flags);
if (!MultReadInProgress) {
/* This prevents a race condition that could arise if the
* interrupt is triggered while the calling of this timer
* callback function takes place. The IRQ function then has
* already cleared 'MultReadInProgress' when flow of control
* gets here.
*/
local_irq_restore(flags);
return;
}
/* get the current DMA address */
/* ++ f.a. read twice to avoid being fooled by switcher */
addr = 0;
do {
addr2 = addr;
addr = dma_wd.dma_lo & 0xff;
MFPDELAY();
addr |= (dma_wd.dma_md & 0xff) << 8;
MFPDELAY();
if (ATARIHW_PRESENT( EXTD_DMA ))
addr |= (st_dma_ext_dmahi & 0xffff) << 16;
else
addr |= (dma_wd.dma_hi & 0xff) << 16;
MFPDELAY();
} while(addr != addr2);
if (addr >= PhysTrackBuffer + SUDT->spt*512) {
/* already read enough data, force an FDC interrupt to stop
* the read operation
*/
SET_IRQ_HANDLER( NULL );
MultReadInProgress = 0;
local_irq_restore(flags);
DPRINT(("fd_readtrack_check(): done\n"));
FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
udelay(25);
/* No error until now -- the FDC would have interrupted
* otherwise!
*/
fd_rwsec_done1(0);
}
else {
/* not yet finished, wait another tenth rotation */
local_irq_restore(flags);
DPRINT(("fd_readtrack_check(): not yet finished\n"));
mod_timer(&readtrack_timer, jiffies + HZ/5/10);
}
}
static void fd_rwsec_done( int status )
{
DPRINT(("fd_rwsec_done()\n"));
if (read_track) {
del_timer(&readtrack_timer);
if (!MultReadInProgress)
return;
MultReadInProgress = 0;
}
fd_rwsec_done1(status);
}
static void fd_rwsec_done1(int status)
{
unsigned int track;
stop_timeout();
/* Correct the track if stretch != 0 */
if (SUDT->stretch) {
track = FDC_READ( FDCREG_TRACK);
MFPDELAY();
FDC_WRITE( FDCREG_TRACK, track << SUDT->stretch);
}
if (!UseTrackbuffer) {
dma_wd.dma_mode_status = 0x90;
MFPDELAY();
if (!(dma_wd.dma_mode_status & 0x01)) {
printk(KERN_ERR "fd%d: DMA error\n", SelectedDrive );
goto err_end;
}
}
MFPDELAY();
if (ReqCmd == WRITE && (status & FDCSTAT_WPROT)) {
printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive );
goto err_end;
}
if ((status & FDCSTAT_RECNF) &&
/* RECNF is no error after a multiple read when the FDC
searched for a non-existent sector! */
!(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) {
if (Probing) {
if (SUDT > atari_disk_type) {
if (SUDT[-1].blocks > ReqBlock) {
/* try another disk type */
SUDT--;
set_capacity(unit[SelectedDrive].disk[0],
SUDT->blocks);
} else
Probing = 0;
}
else {
if (SUD.flags & FTD_MSG)
printk(KERN_INFO "fd%d: Auto-detected floppy type %s\n",
SelectedDrive, SUDT->name );
Probing=0;
}
} else {
/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
if (SUD.autoprobe) {
SUDT = atari_disk_type + StartDiskType[DriveType];
set_capacity(unit[SelectedDrive].disk[0],
SUDT->blocks);
Probing = 1;
}
}
if (Probing) {
if (ATARIHW_PRESENT(FDCSPEED)) {
dma_wd.fdc_speed = SUDT->fdc_speed;
MFPDELAY();
}
setup_req_params( SelectedDrive );
BufferDrive = -1;
do_fd_action( SelectedDrive );
return;
}
printk(KERN_ERR "fd%d: sector %d not found (side %d, track %d)\n",
SelectedDrive, FDC_READ (FDCREG_SECTOR), ReqSide, ReqTrack );
goto err_end;
}
if (status & FDCSTAT_CRC) {
printk(KERN_ERR "fd%d: CRC error (side %d, track %d, sector %d)\n",
SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) );
goto err_end;
}
if (status & FDCSTAT_LOST) {
printk(KERN_ERR "fd%d: lost data (side %d, track %d, sector %d)\n",
SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) );
goto err_end;
}
Probing = 0;
if (ReqCmd == READ) {
if (!read_track) {
void *addr;
addr = ATARIHW_PRESENT( EXTD_DMA ) ? ReqData : DMABuffer;
dma_cache_maintenance( virt_to_phys(addr), 512, 0 );
if (!ATARIHW_PRESENT( EXTD_DMA ))
copy_buffer (addr, ReqData);
} else {
dma_cache_maintenance( PhysTrackBuffer, MaxSectors[DriveType] * 512, 0 );
BufferDrive = SelectedDrive;
BufferSide = ReqSide;
BufferTrack = ReqTrack;
copy_buffer (SECTOR_BUFFER (ReqSector), ReqData);
}
}
if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
}
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
finish_fdc();
}
return;
err_end:
BufferDrive = -1;
fd_error();
}
static void fd_writetrack( void )
{
unsigned long paddr, flags;
unsigned int track;
DPRINT(("fd_writetrack() Tr=%d Si=%d\n", ReqTrack, ReqSide ));
paddr = PhysTrackBuffer;
dma_cache_maintenance( paddr, BUFFER_SIZE, 1 );
fd_select_side( ReqSide );
/* Cheat for track if stretch != 0 */
if (SUDT->stretch) {
track = FDC_READ( FDCREG_TRACK);
MFPDELAY();
FDC_WRITE(FDCREG_TRACK,track >> SUDT->stretch);
}
udelay(40);
/* Setup DMA */
local_irq_save(flags);
dma_wd.dma_lo = (unsigned char)paddr;
MFPDELAY();
paddr >>= 8;
dma_wd.dma_md = (unsigned char)paddr;
MFPDELAY();
paddr >>= 8;
if (ATARIHW_PRESENT( EXTD_DMA ))
st_dma_ext_dmahi = (unsigned short)paddr;
else
dma_wd.dma_hi = (unsigned char)paddr;
MFPDELAY();
local_irq_restore(flags);
/* Clear FIFO and switch DMA to correct mode */
dma_wd.dma_mode_status = 0x190;
MFPDELAY();
dma_wd.dma_mode_status = 0x90;
MFPDELAY();
dma_wd.dma_mode_status = 0x190;
MFPDELAY();
/* How many sectors for DMA */
dma_wd.fdc_acces_seccount = BUFFER_SIZE/512;
udelay(40);
/* Start operation */
dma_wd.dma_mode_status = FDCSELREG_STP | 0x100;
udelay(40);
SET_IRQ_HANDLER( fd_writetrack_done );
dma_wd.fdc_acces_seccount = FDCCMD_WRTRA | get_head_settle_flag();
MotorOn = 1;
start_timeout();
/* wait for interrupt */
}
static void fd_writetrack_done( int status )
{
DPRINT(("fd_writetrack_done()\n"));
stop_timeout();
if (status & FDCSTAT_WPROT) {
printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive );
goto err_end;
}
if (status & FDCSTAT_LOST) {
printk(KERN_ERR "fd%d: lost data (side %d, track %d)\n",
SelectedDrive, ReqSide, ReqTrack );
goto err_end;
}
complete(&format_wait);
return;
err_end:
fd_error();
}
static void fd_times_out(struct timer_list *unused)
{
atari_disable_irq( IRQ_MFP_FDC );
if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
* before we came here... */
SET_IRQ_HANDLER( NULL );
/* If the timeout occurred while the readtrack_check timer was
* active, we need to cancel it, else bad things will happen */
if (UseTrackbuffer)
del_timer( &readtrack_timer );
FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
udelay( 25 );
printk(KERN_ERR "floppy timeout\n" );
fd_error();
end:
atari_enable_irq( IRQ_MFP_FDC );
}
/* The (noop) seek operation here is needed to make the WP bit in the
* FDC status register accessible for check_change. If the last disk
* operation would have been a RDSEC, this bit would always read as 0
* no matter what :-( To save time, the seek goes to the track we're
* already on.
*/
static void finish_fdc( void )
{
if (!NeedSeek || !stdma_is_locked_by(floppy_irq)) {
finish_fdc_done( 0 );
}
else {
DPRINT(("finish_fdc: dummy seek started\n"));
FDC_WRITE (FDCREG_DATA, SUD.track);
SET_IRQ_HANDLER( finish_fdc_done );
FDC_WRITE (FDCREG_CMD, FDCCMD_SEEK);
MotorOn = 1;
start_timeout();
/* we must wait for the IRQ here, because the ST-DMA
is released immediately afterwards and the interrupt
may be delivered to the wrong driver. */
}
}
static void finish_fdc_done( int dummy )
{
unsigned long flags;
DPRINT(("finish_fdc_done entered\n"));
stop_timeout();
NeedSeek = 0;
if (timer_pending(&fd_timer) && time_before(fd_timer.expires, jiffies + 5))
/* If the check for a disk change is done too early after this
* last seek command, the WP bit still reads wrong :-((
*/
mod_timer(&fd_timer, jiffies + 5);
else
start_check_change_timer();
start_motor_off_timer();
local_irq_save(flags);
if (stdma_is_locked_by(floppy_irq))
stdma_release();
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
}
/* The detection of disk changes is a dark chapter in Atari history :-(
* Because the "Drive ready" signal isn't present in the Atari
* hardware, one has to rely on the "Write Protect". This works fine,
* as long as no write protected disks are used. TOS solves this
* problem by introducing tri-state logic ("maybe changed") and
* looking at the serial number in block 0. This isn't possible for
* Linux, since the floppy driver can't make assumptions about the
* filesystem used on the disk and thus the contents of block 0. I've
* chosen the method to always say "The disk was changed" if it is
* unsure whether it was. This implies that every open or mount
* invalidates the disk buffers if you work with write protected
* disks. But at least this is better than working with incorrect data
* due to unrecognised disk changes.
*/
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct atari_floppy_struct *p = disk->private_data;
unsigned int drive = p - unit;
if (test_bit (drive, &fake_change)) {
/* simulated change (e.g. after formatting) */
return DISK_EVENT_MEDIA_CHANGE;
}
if (test_bit (drive, &changed_floppies)) {
/* surely changed (the WP signal changed at least once) */
return DISK_EVENT_MEDIA_CHANGE;
}
if (UD.wpstat) {
/* WP is on -> could be changed: to be sure, buffers should be
* invalidated...
*/
return DISK_EVENT_MEDIA_CHANGE;
}
return 0;
}
static int floppy_revalidate(struct gendisk *disk)
{
struct atari_floppy_struct *p = disk->private_data;
unsigned int drive = p - unit;
if (test_bit(drive, &changed_floppies) ||
test_bit(drive, &fake_change) || !p->disktype) {
if (UD.flags & FTD_MSG)
printk(KERN_ERR "floppy: clear format %p!\n", UDT);
BufferDrive = -1;
clear_bit(drive, &fake_change);
clear_bit(drive, &changed_floppies);
/* MSch: clearing geometry makes sense only for autoprobe
formats, for 'permanent user-defined' parameter:
restore default_params[] here if flagged valid! */
if (default_params[drive].blocks == 0)
UDT = NULL;
else
UDT = &default_params[drive];
}
return 0;
}
/* This sets up the global variables describing the current request. */
static void setup_req_params( int drive )
{
int block = ReqBlock + ReqCnt;
ReqTrack = block / UDT->spt;
ReqSector = block - ReqTrack * UDT->spt + 1;
ReqSide = ReqTrack & 1;
ReqTrack >>= 1;
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
read_track = (ReqCmd == READ && unit[drive].error_count == 0);
else
read_track = 0;
DPRINT(("Request params: Si=%d Tr=%d Se=%d Data=%08lx\n",ReqSide,
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct atari_floppy_struct *floppy = bd->rq->q->disk->private_data;
int drive = floppy - unit;
int type = floppy->type;
DPRINT(("Queue request: drive %d type %d sectors %d of %d last %d\n",
drive, type, blk_rq_cur_sectors(bd->rq),
blk_rq_sectors(bd->rq), bd->last));
spin_lock_irq(&ataflop_lock);
if (fd_request) {
spin_unlock_irq(&ataflop_lock);
return BLK_STS_DEV_RESOURCE;
}
if (!stdma_try_lock(floppy_irq, NULL)) {
spin_unlock_irq(&ataflop_lock);
return BLK_STS_RESOURCE;
}
fd_request = bd->rq;
unit[drive].error_count = 0;
blk_mq_start_request(fd_request);
atari_disable_irq( IRQ_MFP_FDC );
IsFormatting = 0;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
stdma_release();
goto out;
}
if (type == 0) {
if (!UDT) {
Probing = 1;
UDT = atari_disk_type + StartDiskType[DriveType];
set_capacity(bd->rq->q->disk, UDT->blocks);
UD.autoprobe = 1;
}
}
else {
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
stdma_release();
goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
stdma_release();
goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
set_capacity(bd->rq->q->disk, UDT->blocks);
UD.autoprobe = 0;
}
/* stop deselect timer */
del_timer( &motor_off_timer );
ReqCnt = 0;
ReqCmd = rq_data_dir(fd_request);
ReqBlock = blk_rq_pos(fd_request);
ReqBuffer = bio_data(fd_request->bio);
setup_req_params( drive );
do_fd_action( drive );
atari_enable_irq( IRQ_MFP_FDC );
out:
spin_unlock_irq(&ataflop_lock);
return BLK_STS_OK;
}
static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct gendisk *disk = bdev->bd_disk;
struct atari_floppy_struct *floppy = disk->private_data;
int drive = floppy - unit;
int type = floppy->type;
struct atari_format_descr fmt_desc;
struct atari_disk_type *dtp;
struct floppy_struct getprm;
int settype;
struct floppy_struct setprm;
void __user *argp = (void __user *)param;
switch (cmd) {
case FDGETPRM:
if (type) {
if (--type >= NUM_DISK_MINORS)
return -ENODEV;
if (minor2disktype[type].drive_types > DriveType)
return -ENODEV;
type = minor2disktype[type].index;
dtp = &atari_disk_type[type];
if (UD.flags & FTD_MSG)
printk (KERN_ERR "floppy%d: found dtp %p name %s!\n",
drive, dtp, dtp->name);
}
else {
if (!UDT)
return -ENXIO;
else
dtp = UDT;
}
memset((void *)&getprm, 0, sizeof(getprm));
getprm.size = dtp->blocks;
getprm.sect = dtp->spt;
getprm.head = 2;
getprm.track = dtp->blocks/dtp->spt/2;
getprm.stretch = dtp->stretch;
if (copy_to_user(argp, &getprm, sizeof(getprm)))
return -EFAULT;
return 0;
}
switch (cmd) {
case FDSETPRM:
case FDDEFPRM:
/*
* MSch 7/96: simple 'set geometry' case: just set the
* 'default' device params (minor == 0).
* Currently, the drive geometry is cleared after each
* disk change and subsequent revalidate()! simple
* implementation of FDDEFPRM: save geometry from a
* FDDEFPRM call and restore it in floppy_revalidate() !
*/
/* get the parameters from user space */
if (floppy->ref != 1 && floppy->ref != -1)
return -EBUSY;
if (copy_from_user(&setprm, argp, sizeof(setprm)))
return -EFAULT;
/*
* first of all: check for floppy change and revalidate,
* or the next access will revalidate - and clear UDT :-(
*/
if (floppy_check_events(disk, 0))
floppy_revalidate(disk);
if (UD.flags & FTD_MSG)
printk (KERN_INFO "floppy%d: setting size %d spt %d str %d!\n",
drive, setprm.size, setprm.sect, setprm.stretch);
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
finish_fdc();
return -EINVAL;
}
/*
* type == 0: first look for a matching entry in the type list,
* and set the UD.disktype field to use the perdefined entry.
* TODO: add user-defined format to head of autoprobe list ?
* Useful to include the user-type for future autodetection!
*/
for (settype = 0; settype < NUM_DISK_MINORS; settype++) {
int setidx = 0;
if (minor2disktype[settype].drive_types > DriveType) {
/* skip this one, invalid for drive ... */
continue;
}
setidx = minor2disktype[settype].index;
dtp = &atari_disk_type[setidx];
/* found matching entry ?? */
if ( dtp->blocks == setprm.size
&& dtp->spt == setprm.sect
&& dtp->stretch == setprm.stretch ) {
if (UD.flags & FTD_MSG)
printk (KERN_INFO "floppy%d: setting %s %p!\n",
drive, dtp->name, dtp);
UDT = dtp;
set_capacity(disk, UDT->blocks);
if (cmd == FDDEFPRM) {
/* save settings as permanent default type */
default_params[drive].name = dtp->name;
default_params[drive].spt = dtp->spt;
default_params[drive].blocks = dtp->blocks;
default_params[drive].fdc_speed = dtp->fdc_speed;
default_params[drive].stretch = dtp->stretch;
}
return 0;
}
}
/* no matching disk type found above - setting user_params */
if (cmd == FDDEFPRM) {
/* set permanent type */
dtp = &default_params[drive];
} else
/* set user type (reset by disk change!) */
dtp = &user_params[drive];
dtp->name = "user format";
dtp->blocks = setprm.size;
dtp->spt = setprm.sect;
if (setprm.sect > 14)
dtp->fdc_speed = 3;
else
dtp->fdc_speed = 0;
dtp->stretch = setprm.stretch;
if (UD.flags & FTD_MSG)
printk (KERN_INFO "floppy%d: blk %d spt %d str %d!\n",
drive, dtp->blocks, dtp->spt, dtp->stretch);
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
setprm.head != 2) {
finish_fdc();
return -EINVAL;
}
UDT = dtp;
set_capacity(disk, UDT->blocks);
return 0;
case FDMSGON:
UD.flags |= FTD_MSG;
return 0;
case FDMSGOFF:
UD.flags &= ~FTD_MSG;
return 0;
case FDSETEMSGTRESH:
return -EINVAL;
case FDFMTBEG:
return 0;
case FDFMTTRK:
if (floppy->ref != 1 && floppy->ref != -1)
return -EBUSY;
if (copy_from_user(&fmt_desc, argp, sizeof(fmt_desc)))
return -EFAULT;
return do_format(drive, type, &fmt_desc);
case FDCLRPRM:
UDT = NULL;
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(disk, MAX_DISK_SIZE * 2);
fallthrough;
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
if (disk_check_media_change(disk))
floppy_revalidate(disk);
return 0;
default:
return -EINVAL;
}
}
static int fd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret;
mutex_lock(&ataflop_mutex);
ret = fd_locked_ioctl(bdev, mode, cmd, arg);
mutex_unlock(&ataflop_mutex);
return ret;
}
/* Initialize the 'unit' variable for drive 'drive' */
static void __init fd_probe( int drive )
{
UD.connected = 0;
UDT = NULL;
if (!fd_test_drive_present( drive ))
return;
UD.connected = 1;
UD.track = 0;
switch( UserSteprate[drive] ) {
case 2:
UD.steprate = FDCSTEP_2;
break;
case 3:
UD.steprate = FDCSTEP_3;
break;
case 6:
UD.steprate = FDCSTEP_6;
break;
case 12:
UD.steprate = FDCSTEP_12;
break;
default: /* should be -1 for "not set by user" */
if (ATARIHW_PRESENT( FDCSPEED ) || MACH_IS_MEDUSA)
UD.steprate = FDCSTEP_3;
else
UD.steprate = FDCSTEP_6;
break;
}
MotorOn = 1; /* from probe restore operation! */
}
/* This function tests the physical presence of a floppy drive (not
* whether a disk is inserted). This is done by issuing a restore
* command, waiting max. 2 seconds (that should be enough to move the
* head across the whole disk) and looking at the state of the "TR00"
* signal. This should now be raised if there is a drive connected
* (and there is no hardware failure :-) Otherwise, the drive is
* declared absent.
*/
static int __init fd_test_drive_present( int drive )
{
unsigned long timeout;
unsigned char status;
int ok;
if (drive >= (MACH_IS_FALCON ? 1 : 2)) return( 0 );
fd_select_drive( drive );
/* disable interrupt temporarily */
atari_turnoff_irq( IRQ_MFP_FDC );
FDC_WRITE (FDCREG_TRACK, 0xff00);
FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | FDCCMDADD_H | FDCSTEP_6 );
timeout = jiffies + 2*HZ+HZ/2;
while (time_before(jiffies, timeout))
if (!(st_mfp.par_dt_reg & 0x20))
break;
status = FDC_READ( FDCREG_STATUS );
ok = (status & FDCSTAT_TR00) != 0;
/* force interrupt to abort restore operation (FDC would try
* about 50 seconds!) */
FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
udelay(500);
status = FDC_READ( FDCREG_STATUS );
udelay(20);
if (ok) {
/* dummy seek command to make WP bit accessible */
FDC_WRITE( FDCREG_DATA, 0 );
FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK );
while( st_mfp.par_dt_reg & 0x20 )
;
status = FDC_READ( FDCREG_STATUS );
}
atari_turnon_irq( IRQ_MFP_FDC );
return( ok );
}
/* Look how many and which kind of drives are connected. If there are
* floppies, additionally start the disk-change and motor-off timers.
*/
static void __init config_types( void )
{
int drive, cnt = 0;
/* for probing drives, set the FDC speed to 8 MHz */
if (ATARIHW_PRESENT(FDCSPEED))
dma_wd.fdc_speed = 0;
printk(KERN_INFO "Probing floppy drive(s):\n");
for( drive = 0; drive < FD_MAX_UNITS; drive++ ) {
fd_probe( drive );
if (UD.connected) {
printk(KERN_INFO "fd%d\n", drive);
++cnt;
}
}
if (FDC_READ( FDCREG_STATUS ) & FDCSTAT_BUSY) {
/* If FDC is still busy from probing, give it another FORCI
* command to abort the operation. If this isn't done, the FDC
* will interrupt later and its IRQ line stays low, because
* the status register isn't read. And this will block any
* interrupts on this IRQ line :-(
*/
FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
udelay(500);
FDC_READ( FDCREG_STATUS );
udelay(20);
}
if (cnt > 0) {
start_motor_off_timer();
if (cnt == 1) fd_select_drive( 0 );
start_check_change_timer();
}
}
/*
* floppy_open check for aliasing (/dev/fd0 can be the same as
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
struct atari_floppy_struct *p = disk->private_data;
int type = disk->first_minor >> 2;
DPRINT(("fd_open: type=%d\n",type));
if (p->ref && p->type != type)
return -EBUSY;
if (p->ref == -1 || (p->ref && mode & BLK_OPEN_EXCL))
return -EBUSY;
if (mode & BLK_OPEN_EXCL)
p->ref = -1;
else
p->ref++;
p->type = type;
if (mode & BLK_OPEN_NDELAY)
return 0;
if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
if (disk_check_media_change(disk))
floppy_revalidate(disk);
if (mode & BLK_OPEN_WRITE) {
if (p->wpstat) {
if (p->ref < 0)
p->ref = 0;
else
p->ref--;
return -EROFS;
}
}
}
return 0;
}
static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
mutex_lock(&ataflop_mutex);
ret = floppy_open(disk, mode);
mutex_unlock(&ataflop_mutex);
return ret;
}
static void floppy_release(struct gendisk *disk)
{
struct atari_floppy_struct *p = disk->private_data;
mutex_lock(&ataflop_mutex);
if (p->ref < 0)
p->ref = 0;
else if (!p->ref--) {
printk(KERN_ERR "floppy_release with fd_ref == 0");
p->ref = 0;
}
mutex_unlock(&ataflop_mutex);
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_unlocked_open,
.release = floppy_release,
.ioctl = fd_ioctl,
.check_events = floppy_check_events,
};
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
};
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive + (type << 2);
disk->minors = 1;
sprintf(disk->disk_name, "fd%d", drive);
disk->fops = &floppy_fops;
disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->private_data = &unit[drive];
set_capacity(disk, MAX_DISK_SIZE * 2);
unit[drive].disk[type] = disk;
return 0;
}
static void ataflop_probe(dev_t dev)
{
int drive = MINOR(dev) & 3;
int type = MINOR(dev) >> 2;
if (type)
type--;
if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
return;
if (unit[drive].disk[type])
return;
if (ataflop_alloc_disk(drive, type))
return;
if (add_disk(unit[drive].disk[type]))
goto cleanup_disk;
unit[drive].registered[type] = true;
return;
cleanup_disk:
put_disk(unit[drive].disk[type]);
unit[drive].disk[type] = NULL;
}
static void atari_floppy_cleanup(void)
{
int i;
int type;
for (i = 0; i < FD_MAX_UNITS; i++) {
for (type = 0; type < NUM_DISK_MINORS; type++) {
if (!unit[i].disk[type])
continue;
del_gendisk(unit[i].disk[type]);
put_disk(unit[i].disk[type]);
}
blk_mq_free_tag_set(&unit[i].tag_set);
}
del_timer_sync(&fd_timer);
atari_stram_free(DMABuffer);
}
static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
{
int type;
for (type = 0; type < NUM_DISK_MINORS; type++) {
if (!fs->disk[type])
continue;
if (fs->registered[type])
del_gendisk(fs->disk[type]);
put_disk(fs->disk[type]);
}
blk_mq_free_tag_set(&fs->tag_set);
}
static int __init atari_floppy_init (void)
{
int i;
int ret;
if (!MACH_IS_ATARI)
/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
return -ENODEV;
for (i = 0; i < FD_MAX_UNITS; i++) {
memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set));
unit[i].tag_set.ops = &ataflop_mq_ops;
unit[i].tag_set.nr_hw_queues = 1;
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
ret = ataflop_alloc_disk(i, 0);
if (ret) {
blk_mq_free_tag_set(&unit[i].tag_set);
goto err;
}
}
if (UseTrackbuffer < 0)
/* not set by user -> use default: for now, we turn
track buffering off for all Medusas, though it
could be used with ones that have a counter
card. But the test is too hard :-( */
UseTrackbuffer = !MACH_IS_MEDUSA;
/* initialize variables */
SelectedDrive = -1;
BufferDrive = -1;
DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop");
if (!DMABuffer) {
printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n");
ret = -ENOMEM;
goto err;
}
TrackBuffer = DMABuffer + 512;
PhysDMABuffer = atari_stram_to_phys(DMABuffer);
PhysTrackBuffer = virt_to_phys(TrackBuffer);
BufferDrive = BufferSide = BufferTrack = -1;
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
unit[i].flags = 0;
ret = add_disk(unit[i].disk[0]);
if (ret)
goto err_out_dma;
unit[i].registered[0] = true;
}
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
UseTrackbuffer ? "" : "no ");
config_types();
ret = __register_blkdev(FLOPPY_MAJOR, "fd", ataflop_probe);
if (ret) {
printk(KERN_ERR "atari_floppy_init: cannot register block device\n");
atari_floppy_cleanup();
}
return ret;
err_out_dma:
atari_stram_free(DMABuffer);
err:
while (--i >= 0)
atari_cleanup_floppy_disk(&unit[i]);
return ret;
}
#ifndef MODULE
static int __init atari_floppy_setup(char *str)
{
int ints[3 + FD_MAX_UNITS];
int i;
if (!MACH_IS_ATARI)
return 0;
str = get_options(str, 3 + FD_MAX_UNITS, ints);
if (ints[0] < 1) {
printk(KERN_ERR "ataflop_setup: no arguments!\n" );
return 0;
}
else if (ints[0] > 2+FD_MAX_UNITS) {
printk(KERN_ERR "ataflop_setup: too many arguments\n" );
}
if (ints[1] < 0 || ints[1] > 2)
printk(KERN_ERR "ataflop_setup: bad drive type\n" );
else
DriveType = ints[1];
if (ints[0] >= 2)
UseTrackbuffer = (ints[2] > 0);
for( i = 3; i <= ints[0] && i-3 < FD_MAX_UNITS; ++i ) {
if (ints[i] != 2 && ints[i] != 3 && ints[i] != 6 && ints[i] != 12)
printk(KERN_ERR "ataflop_setup: bad steprate\n" );
else
UserSteprate[i-3] = ints[i];
}
return 1;
}
__setup("floppy=", atari_floppy_setup);
#endif
static void __exit atari_floppy_exit(void)
{
unregister_blkdev(FLOPPY_MAJOR, "fd");
atari_floppy_cleanup();
}
module_init(atari_floppy_init)
module_exit(atari_floppy_exit)
MODULE_LICENSE("GPL");
| linux-master | drivers/block/ataflop.c |
/*
* Copyright (C) 2000 Jens Axboe <[email protected]>
* Copyright (C) 2001-2004 Peter Osterlund <[email protected]>
* Copyright (C) 2006 Thomas Maier <[email protected]>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
* DVD-RAM devices.
*
* Theory of operation:
*
* At the lowest level, there is the standard driver for the CD/DVD device,
* such as drivers/scsi/sr.c. This driver can handle read and write requests,
* but it doesn't know anything about the special restrictions that apply to
* packet writing. One restriction is that write requests must be aligned to
* packet boundaries on the physical media, and the size of a write request
* must be equal to the packet size. Another restriction is that a
* GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
* command, if the previous command was a write.
*
* The purpose of the packet writing driver is to hide these restrictions from
* higher layers, such as file systems, and present a block device that can be
* randomly read and written using 2kB-sized blocks.
*
* The lowest layer in the packet writing driver is the packet I/O scheduler.
* Its data is defined by the struct packet_iosched and includes two bio
* queues with pending read and write requests. These queues are processed
* by the pkt_iosched_process_queue() function. The write requests in this
* queue are already properly aligned and sized. This layer is responsible for
* issuing the flush cache commands and scheduling the I/O in a good order.
*
* The next layer transforms unaligned write requests to aligned writes. This
* transformation requires reading missing pieces of data from the underlying
* block device, assembling the pieces to full packets and queuing them to the
* packet I/O scheduler.
*
* At the top layer there is a custom ->submit_bio function that forwards
* read requests directly to the iosched queue and puts write requests in the
* unaligned write queue. A kernel thread performs the necessary read
* gathering to convert the unaligned writes to aligned writes and then feeds
* them to the packet I/O scheduler.
*
*************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/backing-dev.h>
#include <linux/compat.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nospec.h>
#include <linux/pktcdvd.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_ioctl.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "pktcdvd"
#define MAX_SPEED 0xffff
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
static struct proc_dir_entry *pkt_proc;
static int pktdev_major;
static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
static mempool_t psd_pool;
static struct bio_set pkt_bio_set;
/* /sys/class/pktcdvd */
static struct class class_pktcdvd;
static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
/* forward declaration */
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
static int pkt_remove_dev(dev_t pkt_dev);
static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
{
return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
}
/**********************************************************
* sysfs interface for pktcdvd
* by (C) 2006 Thomas Maier <[email protected]>
/sys/class/pktcdvd/pktcdvd[0-7]/
stat/reset
stat/packets_started
stat/packets_finished
stat/kb_written
stat/kb_read
stat/kb_read_gather
write_queue/size
write_queue/congestion_off
write_queue/congestion_on
**********************************************************/
static ssize_t packets_started_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
}
static DEVICE_ATTR_RO(packets_started);
static ssize_t packets_finished_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
}
static DEVICE_ATTR_RO(packets_finished);
static ssize_t kb_written_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
}
static DEVICE_ATTR_RO(kb_written);
static ssize_t kb_read_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
}
static DEVICE_ATTR_RO(kb_read);
static ssize_t kb_read_gather_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
}
static DEVICE_ATTR_RO(kb_read_gather);
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
if (len > 0) {
pd->stats.pkt_started = 0;
pd->stats.pkt_ended = 0;
pd->stats.secs_w = 0;
pd->stats.secs_rg = 0;
pd->stats.secs_r = 0;
}
return len;
}
static DEVICE_ATTR_WO(reset);
static struct attribute *pkt_stat_attrs[] = {
&dev_attr_packets_finished.attr,
&dev_attr_packets_started.attr,
&dev_attr_kb_read.attr,
&dev_attr_kb_written.attr,
&dev_attr_kb_read_gather.attr,
&dev_attr_reset.attr,
NULL,
};
static const struct attribute_group pkt_stat_group = {
.name = "stat",
.attrs = pkt_stat_attrs,
};
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
int n;
spin_lock(&pd->lock);
n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
spin_unlock(&pd->lock);
return n;
}
static DEVICE_ATTR_RO(size);
static void init_write_congestion_marks(int* lo, int* hi)
{
if (*hi > 0) {
*hi = max(*hi, 500);
*hi = min(*hi, 1000000);
if (*lo <= 0)
*lo = *hi - 100;
else {
*lo = min(*lo, *hi - 100);
*lo = max(*lo, 100);
}
} else {
*hi = -1;
*lo = -1;
}
}
static ssize_t congestion_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
int n;
spin_lock(&pd->lock);
n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
spin_unlock(&pd->lock);
return n;
}
static ssize_t congestion_off_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
int val, ret;
ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
spin_lock(&pd->lock);
pd->write_congestion_off = val;
init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
spin_unlock(&pd->lock);
return len;
}
static DEVICE_ATTR_RW(congestion_off);
static ssize_t congestion_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
int n;
spin_lock(&pd->lock);
n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
spin_unlock(&pd->lock);
return n;
}
static ssize_t congestion_on_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
int val, ret;
ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
spin_lock(&pd->lock);
pd->write_congestion_on = val;
init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
spin_unlock(&pd->lock);
return len;
}
static DEVICE_ATTR_RW(congestion_on);
static struct attribute *pkt_wq_attrs[] = {
&dev_attr_congestion_on.attr,
&dev_attr_congestion_off.attr,
&dev_attr_size.attr,
NULL,
};
static const struct attribute_group pkt_wq_group = {
.name = "write_queue",
.attrs = pkt_wq_attrs,
};
static const struct attribute_group *pkt_groups[] = {
&pkt_stat_group,
&pkt_wq_group,
NULL,
};
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_is_registered(&class_pktcdvd)) {
pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
MKDEV(0, 0), pd, pkt_groups,
"%s", pd->disk->disk_name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
}
}
static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
{
if (class_is_registered(&class_pktcdvd))
device_unregister(pd->dev);
}
/********************************************************************
/sys/class/pktcdvd/
add map block device
remove unmap packet dev
device_map show mappings
*******************************************************************/
static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
char *data)
{
int n = 0;
int idx;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
for (idx = 0; idx < MAX_WRITERS; idx++) {
struct pktcdvd_device *pd = pkt_devs[idx];
if (!pd)
continue;
n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
MAJOR(pd->bdev->bd_dev),
MINOR(pd->bdev->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
}
static CLASS_ATTR_RO(device_map);
static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
const char *buf, size_t count)
{
unsigned int major, minor;
if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
/* pkt_setup_dev() expects caller to hold reference to self */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
pkt_setup_dev(MKDEV(major, minor), NULL);
module_put(THIS_MODULE);
return count;
}
return -EINVAL;
}
static CLASS_ATTR_WO(add);
static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
const char *buf, size_t count)
{
unsigned int major, minor;
if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
pkt_remove_dev(MKDEV(major, minor));
return count;
}
return -EINVAL;
}
static CLASS_ATTR_WO(remove);
static struct attribute *class_pktcdvd_attrs[] = {
&class_attr_add.attr,
&class_attr_remove.attr,
&class_attr_device_map.attr,
NULL,
};
ATTRIBUTE_GROUPS(class_pktcdvd);
static struct class class_pktcdvd = {
.name = DRIVER_NAME,
.class_groups = class_pktcdvd_groups,
};
static int pkt_sysfs_init(void)
{
/*
* create control files in sysfs
* /sys/class/pktcdvd/...
*/
return class_register(&class_pktcdvd);
}
static void pkt_sysfs_cleanup(void)
{
class_unregister(&class_pktcdvd);
}
/********************************************************************
entries in debugfs
/sys/kernel/debug/pktcdvd[0-7]/
info
*******************************************************************/
static void pkt_count_states(struct pktcdvd_device *pd, int *states)
{
struct packet_data *pkt;
int i;
for (i = 0; i < PACKET_NUM_STATES; i++)
states[i] = 0;
spin_lock(&pd->cdrw.active_list_lock);
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
states[pkt->state]++;
}
spin_unlock(&pd->cdrw.active_list_lock);
}
static int pkt_seq_show(struct seq_file *m, void *p)
{
struct pktcdvd_device *pd = m->private;
char *msg;
int states[PACKET_NUM_STATES];
seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev);
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
if (pd->settings.write_type == 0)
msg = "Packet";
else
msg = "Unknown";
seq_printf(m, "\twrite type:\t\t%s\n", msg);
seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
msg = "Mode 1";
else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
msg = "Mode 2";
else
msg = "Unknown";
seq_printf(m, "\tblock mode:\t\t%s\n", msg);
seq_printf(m, "\nStatistics:\n");
seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
seq_printf(m, "\nMisc:\n");
seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
seq_printf(m, "\nQueue state:\n");
seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
pkt_count_states(pd, states);
seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
states[0], states[1], states[2], states[3], states[4], states[5]);
seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
pd->write_congestion_off,
pd->write_congestion_on);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pkt_seq);
static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
if (!pd->dfs_d_root)
return;
pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
pd, &pkt_seq_fops);
}
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
debugfs_remove(pd->dfs_f_info);
debugfs_remove(pd->dfs_d_root);
pd->dfs_f_info = NULL;
pd->dfs_d_root = NULL;
}
static void pkt_debugfs_init(void)
{
pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
}
static void pkt_debugfs_cleanup(void)
{
debugfs_remove(pkt_debugfs_root);
pkt_debugfs_root = NULL;
}
/* ----------------------------------------------------------*/
static void pkt_bio_finished(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
dev_dbg(ddev, "queue empty\n");
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
}
/*
* Allocate a packet_data struct
*/
static struct packet_data *pkt_alloc_packet_data(int frames)
{
int i;
struct packet_data *pkt;
pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
if (!pkt)
goto no_pkt;
pkt->frames = frames;
pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
if (!pkt->w_bio)
goto no_bio;
for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pkt->pages[i])
goto no_page;
}
spin_lock_init(&pkt->lock);
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
if (!pkt->r_bios[i])
goto no_rd_bio;
}
return pkt;
no_rd_bio:
for (i = 0; i < frames; i++)
kfree(pkt->r_bios[i]);
no_page:
for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
if (pkt->pages[i])
__free_page(pkt->pages[i]);
kfree(pkt->w_bio);
no_bio:
kfree(pkt);
no_pkt:
return NULL;
}
/*
* Free a packet_data struct
*/
static void pkt_free_packet_data(struct packet_data *pkt)
{
int i;
for (i = 0; i < pkt->frames; i++)
kfree(pkt->r_bios[i]);
for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
__free_page(pkt->pages[i]);
kfree(pkt->w_bio);
kfree(pkt);
}
static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *next;
BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
pkt_free_packet_data(pkt);
}
INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
}
static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
{
struct packet_data *pkt;
BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
while (nr_packets > 0) {
pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
if (!pkt) {
pkt_shrink_pktlist(pd);
return 0;
}
pkt->id = nr_packets;
pkt->pd = pd;
list_add(&pkt->list, &pd->cdrw.pkt_free_list);
nr_packets--;
}
return 1;
}
static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
{
struct rb_node *n = rb_next(&node->rb_node);
if (!n)
return NULL;
return rb_entry(n, struct pkt_rb_node, rb_node);
}
static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
{
rb_erase(&node->rb_node, &pd->bio_queue);
mempool_free(node, &pd->rb_pool);
pd->bio_queue_size--;
BUG_ON(pd->bio_queue_size < 0);
}
/*
* Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
*/
static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
{
struct rb_node *n = pd->bio_queue.rb_node;
struct rb_node *next;
struct pkt_rb_node *tmp;
if (!n) {
BUG_ON(pd->bio_queue_size > 0);
return NULL;
}
for (;;) {
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
if (s <= tmp->bio->bi_iter.bi_sector)
next = n->rb_left;
else
next = n->rb_right;
if (!next)
break;
n = next;
}
if (s > tmp->bio->bi_iter.bi_sector) {
tmp = pkt_rbtree_next(tmp);
if (!tmp)
return NULL;
}
BUG_ON(s > tmp->bio->bi_iter.bi_sector);
return tmp;
}
/*
* Insert a node into the pd->bio_queue rb tree.
*/
static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
{
struct rb_node **p = &pd->bio_queue.rb_node;
struct rb_node *parent = NULL;
sector_t s = node->bio->bi_iter.bi_sector;
struct pkt_rb_node *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
if (s < tmp->bio->bi_iter.bi_sector)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &pd->bio_queue);
pd->bio_queue_size++;
}
/*
* Send a packet_command to the underlying block device and
* wait for completion.
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
struct request_queue *q = bdev_get_queue(pd->bdev);
struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
scmd = blk_mq_rq_to_pdu(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
GFP_NOIO);
if (ret)
goto out;
}
scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ;
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
blk_execute_rq(rq, false);
if (scmd->result)
ret = -EIO;
out:
blk_mq_free_request(rq);
return ret;
}
static const char *sense_key_string(__u8 index)
{
static const char * const info[] = {
"No sense", "Recovered error", "Not ready",
"Medium error", "Hardware error", "Illegal request",
"Unit attention", "Data protect", "Blank check",
};
return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
}
/*
* A generic sense dump / resolve mechanism should be implemented across
* all ATAPI + SCSI devices.
*/
static void pkt_dump_sense(struct pktcdvd_device *pd,
struct packet_command *cgc)
{
struct device *ddev = disk_to_dev(pd->disk);
struct scsi_sense_hdr *sshdr = cgc->sshdr;
if (sshdr)
dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
CDROM_PACKET_SIZE, cgc->cmd,
sshdr->sense_key, sshdr->asc, sshdr->ascq,
sense_key_string(sshdr->sense_key));
else
dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
/*
* flush the drive cache to media
*/
static int pkt_flush_cache(struct pktcdvd_device *pd)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
cgc.quiet = 1;
/*
* the IMMED bit -- we default to not setting it, although that
* would allow a much faster close, this is safer
*/
#if 0
cgc.cmd[1] = 1 << 1;
#endif
return pkt_generic_packet(pd, &cgc);
}
/*
* speed is given as the normal factor, e.g. 4 for 4x
*/
static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
unsigned write_speed, unsigned read_speed)
{
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
int ret;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_SET_SPEED;
put_unaligned_be16(read_speed, &cgc.cmd[2]);
put_unaligned_be16(write_speed, &cgc.cmd[4]);
ret = pkt_generic_packet(pd, &cgc);
if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
}
/*
* Queue a bio for processing by the low-level CD device. Must be called
* from process context.
*/
static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ)
bio_list_add(&pd->iosched.read_queue, bio);
else
bio_list_add(&pd->iosched.write_queue, bio);
spin_unlock(&pd->iosched.lock);
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
/*
* Process the queued read/write requests. This function handles special
* requirements for CDRW drives:
* - A cache flush command must be inserted before a read request if the
* previous request was a write.
* - Switching between reading and writing is slow, so don't do it more often
* than necessary.
* - Optimize for throughput at the expense of latency. This means that streaming
* writes will never be interrupted by a read, but if the drive has to seek
* before the next write, switch to reading instead if there are any pending
* read requests.
* - Set the read speed according to current usage pattern. When only reading
* from the device, it's best to use the highest possible read speed, but
* when switching often between reading and writing, it's better to have the
* same read and write speeds.
*/
static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
if (atomic_read(&pd->iosched.attention) == 0)
return;
atomic_set(&pd->iosched.attention, 0);
for (;;) {
struct bio *bio;
int reads_queued, writes_queued;
spin_lock(&pd->iosched.lock);
reads_queued = !bio_list_empty(&pd->iosched.read_queue);
writes_queued = !bio_list_empty(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
if (!reads_queued && !writes_queued)
break;
if (pd->iosched.writing) {
int need_write_seek = 1;
spin_lock(&pd->iosched.lock);
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
if (bio && (bio->bi_iter.bi_sector ==
pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
dev_dbg(ddev, "write, waiting\n");
break;
}
pkt_flush_cache(pd);
pd->iosched.writing = 0;
}
} else {
if (!reads_queued && writes_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
dev_dbg(ddev, "read, waiting\n");
break;
}
pd->iosched.writing = 1;
}
}
spin_lock(&pd->iosched.lock);
if (pd->iosched.writing)
bio = bio_list_pop(&pd->iosched.write_queue);
else
bio = bio_list_pop(&pd->iosched.read_queue);
spin_unlock(&pd->iosched.lock);
if (!bio)
continue;
if (bio_data_dir(bio) == READ)
pd->iosched.successive_reads +=
bio->bi_iter.bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio_end_sector(bio);
}
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
pd->read_speed = MAX_SPEED;
pkt_set_speed(pd, pd->write_speed, pd->read_speed);
}
} else {
if (pd->read_speed != pd->write_speed) {
pd->read_speed = pd->write_speed;
pkt_set_speed(pd, pd->write_speed, pd->read_speed);
}
}
atomic_inc(&pd->cdrw.pending_bios);
submit_bio_noacct(bio);
}
}
/*
* Special care is needed if the underlying block device has a small
* max_phys_segments value.
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
struct device *ddev = disk_to_dev(pd->disk);
if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
}
if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
*/
set_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
}
dev_err(ddev, "cdrom max_phys_segments too small\n");
return -EIO;
}
static void pkt_end_io_read(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
if (bio->bi_status)
atomic_inc(&pkt->io_errors);
bio_uninit(bio);
if (atomic_dec_and_test(&pkt->io_wait)) {
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
}
pkt_bio_finished(pd);
}
static void pkt_end_io_packet_write(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
pd->stats.pkt_ended++;
bio_uninit(bio);
pkt_bio_finished(pd);
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
}
/*
* Schedule reads for the holes in a packet
*/
static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
{
struct device *ddev = disk_to_dev(pd->disk);
int frames_read = 0;
struct bio *bio;
int f;
char written[PACKET_MAX_SIZE];
BUG_ON(bio_list_empty(&pkt->orig_bios));
atomic_set(&pkt->io_wait, 0);
atomic_set(&pkt->io_errors, 0);
/*
* Figure out which frames we need to read before we can write.
*/
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
(CD_FRAMESIZE >> 9);
int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
for (f = first_frame; f < first_frame + num_frames; f++)
written[f] = 1;
}
spin_unlock(&pkt->lock);
if (pkt->cache_valid) {
dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
goto out_account;
}
/*
* Schedule reads for missing parts of the packet.
*/
for (f = 0; f < pkt->frames; f++) {
int p, offset;
if (written[f])
continue;
bio = pkt->r_bios[f];
bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
pkt->pages[p], offset);
if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
BUG();
atomic_inc(&pkt->io_wait);
pkt_queue_bio(pd, bio);
frames_read++;
}
out_account:
dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
}
/*
* Find a packet matching zone, or the least recently used packet if
* there is no match.
*/
static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
{
struct packet_data *pkt;
list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
list_del_init(&pkt->list);
if (pkt->sector != zone)
pkt->cache_valid = 0;
return pkt;
}
}
BUG();
return NULL;
}
static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
{
if (pkt->cache_valid) {
list_add(&pkt->list, &pd->cdrw.pkt_free_list);
} else {
list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
}
}
static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
enum packet_data_state state)
{
static const char *state_name[] = {
"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
};
enum packet_data_state old_state = pkt->state;
dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
pkt->id, pkt->sector, state_name[old_state], state_name[state]);
pkt->state = state;
}
/*
* Scan the work queue to see if we can start a new packet.
* returns non-zero if any work was done.
*/
static int pkt_handle_queue(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_data *pkt, *p;
struct bio *bio = NULL;
sector_t zone = 0; /* Suppress gcc warning */
struct pkt_rb_node *node, *first_node;
struct rb_node *n;
atomic_set(&pd->scan_queue, 0);
if (list_empty(&pd->cdrw.pkt_free_list)) {
dev_dbg(ddev, "no pkt\n");
return 0;
}
/*
* Try to find a zone we are not already working on.
*/
spin_lock(&pd->lock);
first_node = pkt_rbtree_find(pd, pd->current_sector);
if (!first_node) {
n = rb_first(&pd->bio_queue);
if (n)
first_node = rb_entry(n, struct pkt_rb_node, rb_node);
}
node = first_node;
while (node) {
bio = node->bio;
zone = get_zone(bio->bi_iter.bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
goto try_next_bio;
}
}
break;
try_next_bio:
node = pkt_rbtree_next(node);
if (!node) {
n = rb_first(&pd->bio_queue);
if (n)
node = rb_entry(n, struct pkt_rb_node, rb_node);
}
if (node == first_node)
node = NULL;
}
spin_unlock(&pd->lock);
if (!bio) {
dev_dbg(ddev, "no bio\n");
return 0;
}
pkt = pkt_get_packet_data(pd, zone);
pd->current_sector = zone + pd->settings.size;
pkt->sector = zone;
BUG_ON(pkt->frames != pd->settings.size >> 2);
pkt->write_size = 0;
/*
* Scan work queue for bios in the same zone and link them
* to this packet.
*/
spin_lock(&pd->lock);
dev_dbg(ddev, "looking for zone %llx\n", zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
bio = node->bio;
dev_dbg(ddev, "found zone=%llx\n", tmp);
if (tmp != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
bio_list_add(&pkt->orig_bios, bio);
pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
* below, wake up any waiters
*/
if (pd->congested &&
pd->bio_queue_size <= pd->write_congestion_off) {
pd->congested = false;
wake_up_var(&pd->congested);
}
spin_unlock(&pd->lock);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
atomic_set(&pkt->run_sm, 1);
spin_lock(&pd->cdrw.active_list_lock);
list_add(&pkt->list, &pd->cdrw.pkt_active_list);
spin_unlock(&pd->cdrw.active_list_lock);
return 1;
}
/**
* bio_list_copy_data - copy contents of data buffers from one chain of bios to
* another
* @src: source bio list
* @dst: destination bio list
*
* Stops when it reaches the end of either the @src list or @dst list - that is,
* copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
* bios).
*/
static void bio_list_copy_data(struct bio *dst, struct bio *src)
{
struct bvec_iter src_iter = src->bi_iter;
struct bvec_iter dst_iter = dst->bi_iter;
while (1) {
if (!src_iter.bi_size) {
src = src->bi_next;
if (!src)
break;
src_iter = src->bi_iter;
}
if (!dst_iter.bi_size) {
dst = dst->bi_next;
if (!dst)
break;
dst_iter = dst->bi_iter;
}
bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
}
}
/*
* Assemble a bio to write one packet and queue the bio for processing
* by the underlying block device.
*/
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
struct device *ddev = disk_to_dev(pd->disk);
int f;
bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
/* XXX: locking? */
for (f = 0; f < pkt->frames; f++) {
struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
BUG();
}
dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
*/
spin_lock(&pkt->lock);
bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
pkt->cache_valid = 1;
else
pkt->cache_valid = 0;
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
pkt_queue_bio(pd, pkt->w_bio);
}
static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
{
struct bio *bio;
if (status)
pkt->cache_valid = 0;
/* Finish all bios corresponding to this packet */
while ((bio = bio_list_pop(&pkt->orig_bios))) {
bio->bi_status = status;
bio_endio(bio);
}
}
static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
{
struct device *ddev = disk_to_dev(pd->disk);
dev_dbg(ddev, "pkt %d\n", pkt->id);
for (;;) {
switch (pkt->state) {
case PACKET_WAITING_STATE:
if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
return;
pkt->sleep_time = 0;
pkt_gather_data(pd, pkt);
pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
break;
case PACKET_READ_WAIT_STATE:
if (atomic_read(&pkt->io_wait) > 0)
return;
if (atomic_read(&pkt->io_errors) > 0) {
pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
} else {
pkt_start_write(pd, pkt);
}
break;
case PACKET_WRITE_WAIT_STATE:
if (atomic_read(&pkt->io_wait) > 0)
return;
if (!pkt->w_bio->bi_status) {
pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
} else {
pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
}
break;
case PACKET_RECOVERY_STATE:
dev_dbg(ddev, "No recovery possible\n");
pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
break;
case PACKET_FINISHED_STATE:
pkt_finish_packet(pkt, pkt->w_bio->bi_status);
return;
default:
BUG();
break;
}
}
}
static void pkt_handle_packets(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_data *pkt, *next;
/*
* Run state machine for active packets
*/
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (atomic_read(&pkt->run_sm) > 0) {
atomic_set(&pkt->run_sm, 0);
pkt_run_state_machine(pd, pkt);
}
}
/*
* Move no longer active packets to the free list
*/
spin_lock(&pd->cdrw.active_list_lock);
list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
if (pkt->state == PACKET_FINISHED_STATE) {
list_del(&pkt->list);
pkt_put_packet_data(pd, pkt);
pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
atomic_set(&pd->scan_queue, 1);
}
}
spin_unlock(&pd->cdrw.active_list_lock);
}
/*
* kcdrwd is woken up when writes have been queued for one of our
* registered devices
*/
static int kcdrwd(void *foobar)
{
struct pktcdvd_device *pd = foobar;
struct device *ddev = disk_to_dev(pd->disk);
struct packet_data *pkt;
int states[PACKET_NUM_STATES];
long min_sleep_time, residue;
set_user_nice(current, MIN_NICE);
set_freezable();
for (;;) {
DECLARE_WAITQUEUE(wait, current);
/*
* Wait until there is something to do
*/
add_wait_queue(&pd->wqueue, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
/* Check if we need to run pkt_handle_queue */
if (atomic_read(&pd->scan_queue) > 0)
goto work_to_do;
/* Check if we need to run the state machine for some packet */
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (atomic_read(&pkt->run_sm) > 0)
goto work_to_do;
}
/* Check if we need to process the iosched queues */
if (atomic_read(&pd->iosched.attention) != 0)
goto work_to_do;
/* Otherwise, go to sleep */
pkt_count_states(pd, states);
dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
states[0], states[1], states[2], states[3], states[4], states[5]);
min_sleep_time = MAX_SCHEDULE_TIMEOUT;
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
min_sleep_time = pkt->sleep_time;
}
dev_dbg(ddev, "sleeping\n");
residue = schedule_timeout(min_sleep_time);
dev_dbg(ddev, "wake up\n");
/* make swsusp happy with our thread */
try_to_freeze();
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (!pkt->sleep_time)
continue;
pkt->sleep_time -= min_sleep_time - residue;
if (pkt->sleep_time <= 0) {
pkt->sleep_time = 0;
atomic_inc(&pkt->run_sm);
}
}
if (kthread_should_stop())
break;
}
work_to_do:
set_current_state(TASK_RUNNING);
remove_wait_queue(&pd->wqueue, &wait);
if (kthread_should_stop())
break;
/*
* if pkt_handle_queue returns true, we can queue
* another request.
*/
while (pkt_handle_queue(pd))
;
/*
* Handle packet state machine
*/
pkt_handle_packets(pd);
/*
* Handle iosched queues
*/
pkt_iosched_process_queue(pd);
}
return 0;
}
static void pkt_print_settings(struct pktcdvd_device *pd)
{
dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
pd->settings.fp ? "Fixed" : "Variable",
pd->settings.size >> 2,
pd->settings.block_mode == 8 ? '1' : '2');
}
static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
{
memset(cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_MODE_SENSE_10;
cgc->cmd[2] = page_code | (page_control << 6);
put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
cgc->data_direction = CGC_DATA_READ;
return pkt_generic_packet(pd, cgc);
}
static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
{
memset(cgc->cmd, 0, sizeof(cgc->cmd));
memset(cgc->buffer, 0, 2);
cgc->cmd[0] = GPCMD_MODE_SELECT_10;
cgc->cmd[1] = 0x10; /* PF */
put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
cgc->data_direction = CGC_DATA_WRITE;
return pkt_generic_packet(pd, cgc);
}
static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
{
struct packet_command cgc;
int ret;
/* set up command and get the disc info */
init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DISC_INFO;
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
ret = pkt_generic_packet(pd, &cgc);
if (ret)
return ret;
/* not all drives have the same disc_info length, so requeue
* packet with the length the drive tells us it can supply
*/
cgc.buflen = be16_to_cpu(di->disc_information_length) +
sizeof(di->disc_information_length);
if (cgc.buflen > sizeof(disc_information))
cgc.buflen = sizeof(disc_information);
cgc.cmd[8] = cgc.buflen;
return pkt_generic_packet(pd, &cgc);
}
static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
{
struct packet_command cgc;
int ret;
init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
cgc.cmd[1] = type & 3;
put_unaligned_be16(track, &cgc.cmd[4]);
cgc.cmd[8] = 8;
cgc.quiet = 1;
ret = pkt_generic_packet(pd, &cgc);
if (ret)
return ret;
cgc.buflen = be16_to_cpu(ti->track_information_length) +
sizeof(ti->track_information_length);
if (cgc.buflen > sizeof(track_information))
cgc.buflen = sizeof(track_information);
cgc.cmd[8] = cgc.buflen;
return pkt_generic_packet(pd, &cgc);
}
static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
long *last_written)
{
disc_information di;
track_information ti;
__u32 last_track;
int ret;
ret = pkt_get_disc_info(pd, &di);
if (ret)
return ret;
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
ret = pkt_get_track_info(pd, last_track, 1, &ti);
if (ret)
return ret;
/* if this track is blank, try the previous. */
if (ti.blank) {
last_track--;
ret = pkt_get_track_info(pd, last_track, 1, &ti);
if (ret)
return ret;
}
/* if last recorded field is valid, return it. */
if (ti.lra_v) {
*last_written = be32_to_cpu(ti.last_rec_address);
} else {
/* make it up instead */
*last_written = be32_to_cpu(ti.track_start) +
be32_to_cpu(ti.track_size);
if (ti.free_blocks)
*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
}
return 0;
}
/*
* write mode select package based on pd->settings
*/
static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
write_param_page *wp;
char buffer[128];
int ret, size;
/* doesn't apply to DVD+RW or DVD-RAM */
if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
return 0;
memset(buffer, 0, sizeof(buffer));
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
cgc.sshdr = &sshdr;
ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
size = 2 + get_unaligned_be16(&buffer[0]);
pd->mode_offset = get_unaligned_be16(&buffer[6]);
if (size > sizeof(buffer))
size = sizeof(buffer);
/*
* now get it all
*/
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
cgc.sshdr = &sshdr;
ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
/*
* write page is offset header + block descriptor length
*/
wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
wp->fp = pd->settings.fp;
wp->track_mode = pd->settings.track_mode;
wp->write_type = pd->settings.write_type;
wp->data_block_type = pd->settings.block_mode;
wp->multi_session = 0;
#ifdef PACKET_USE_LS
wp->link_size = 7;
wp->ls_v = 1;
#endif
if (wp->data_block_type == PACKET_BLOCK_MODE1) {
wp->session_format = 0;
wp->subhdr2 = 0x20;
} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
wp->session_format = 0x20;
wp->subhdr2 = 8;
#if 0
wp->mcn[0] = 0x80;
memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
#endif
} else {
/*
* paranoia
*/
dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
return 1;
}
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
ret = pkt_mode_select(pd, &cgc);
if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
pkt_print_settings(pd);
return 0;
}
/*
* 1 -- we can write to this track, 0 -- we can't
*/
static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
{
struct device *ddev = disk_to_dev(pd->disk);
switch (pd->mmc3_profile) {
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
/* The track is always writable on DVD+RW/DVD-RAM */
return 1;
default:
break;
}
if (!ti->packet || !ti->fp)
return 0;
/*
* "good" settings as per Mt Fuji.
*/
if (ti->rt == 0 && ti->blank == 0)
return 1;
if (ti->rt == 0 && ti->blank == 1)
return 1;
if (ti->rt == 1 && ti->blank == 0)
return 1;
dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 0;
}
/*
* 1 -- we can write to this disc, 0 -- we can't
*/
static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
{
struct device *ddev = disk_to_dev(pd->disk);
switch (pd->mmc3_profile) {
case 0x0a: /* CD-RW */
case 0xffff: /* MMC3 not supported */
break;
case 0x1a: /* DVD+RW */
case 0x13: /* DVD-RW */
case 0x12: /* DVD-RAM */
return 1;
default:
dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
return 0;
}
/*
* for disc type 0xff we should probably reserve a new track.
* but i'm not sure, should we leave this to user apps? probably.
*/
if (di->disc_type == 0xff) {
dev_notice(ddev, "unknown disc - no track?\n");
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
return 0;
}
if (di->erasable == 0) {
dev_err(ddev, "disc not erasable\n");
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
dev_err(ddev, "can't write to last track (reserved)\n");
return 0;
}
return 1;
}
static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
unsigned char buf[12];
disc_information di;
track_information ti;
int ret, track;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[8] = 8;
ret = pkt_generic_packet(pd, &cgc);
pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
memset(&di, 0, sizeof(disc_information));
memset(&ti, 0, sizeof(track_information));
ret = pkt_get_disc_info(pd, &di);
if (ret) {
dev_err(ddev, "failed get_disc\n");
return ret;
}
if (!pkt_writable_disc(pd, &di))
return -EROFS;
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
ret = pkt_get_track_info(pd, track, 1, &ti);
if (ret) {
dev_err(ddev, "failed get_track\n");
return ret;
}
if (!pkt_writable_track(pd, &ti)) {
dev_err(ddev, "can't write to this track\n");
return -EROFS;
}
/*
* we keep packet size in 512 byte units, makes it easier to
* deal with request calculations.
*/
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
dev_notice(ddev, "detected zero packet size!\n");
return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
dev_err(ddev, "packet size is too big\n");
return -EROFS;
}
pd->settings.fp = ti.fp;
pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
if (ti.nwa_v) {
pd->nwa = be32_to_cpu(ti.next_writable);
set_bit(PACKET_NWA_VALID, &pd->flags);
}
/*
* in theory we could use lra on -RW media as well and just zero
* blocks that haven't been written yet, but in practice that
* is just a no-go. we'll use that for -R, naturally.
*/
if (ti.lra_v) {
pd->lra = be32_to_cpu(ti.last_rec_address);
set_bit(PACKET_LRA_VALID, &pd->flags);
} else {
pd->lra = 0xffffffff;
set_bit(PACKET_LRA_VALID, &pd->flags);
}
/*
* fine for now
*/
pd->settings.link_loss = 7;
pd->settings.write_type = 0; /* packet */
pd->settings.track_mode = ti.track_mode;
/*
* mode1 or mode2 disc
*/
switch (ti.data_mode) {
case PACKET_MODE1:
pd->settings.block_mode = PACKET_BLOCK_MODE1;
break;
case PACKET_MODE2:
pd->settings.block_mode = PACKET_BLOCK_MODE2;
break;
default:
dev_err(ddev, "unknown data mode\n");
return -EROFS;
}
return 0;
}
/*
* enable/disable write caching on drive
*/
static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
unsigned char buf[64];
bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
int ret;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc.sshdr = &sshdr;
cgc.buflen = pd->mode_offset + 12;
/*
* caching mode page might not be there, so quiet this command
*/
cgc.quiet = 1;
ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
if (ret)
return ret;
/*
* use drive write caching -- we need deferred error handling to be
* able to successfully recover with this option (drive will return good
* status as soon as the cdb is validated).
*/
buf[pd->mode_offset + 10] |= (set << 2);
cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
ret = pkt_mode_select(pd, &cgc);
if (ret) {
dev_err(ddev, "write caching control failed\n");
pkt_dump_sense(pd, &cgc);
} else if (!ret && set)
dev_notice(ddev, "enabled write caching\n");
return ret;
}
static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
cgc.cmd[4] = lockflag ? 1 : 0;
return pkt_generic_packet(pd, &cgc);
}
/*
* Returns drive maximum write speed
*/
static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
unsigned *write_speed)
{
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
unsigned char buf[256+18];
unsigned char *cap_buf;
int ret, offset;
cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
cgc.sshdr = &sshdr;
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
sizeof(struct mode_page_header);
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
}
offset = 20; /* Obsoleted field, used by older drives */
if (cap_buf[1] >= 28)
offset = 28; /* Current write speed selected */
if (cap_buf[1] >= 30) {
/* If the drive reports at least one "Logical Unit Write
* Speed Performance Descriptor Block", use the information
* in the first block. (contains the highest speed)
*/
int num_spdb = get_unaligned_be16(&cap_buf[30]);
if (num_spdb > 0)
offset = 34;
}
*write_speed = get_unaligned_be16(&cap_buf[offset]);
return 0;
}
/* These tables from cdrecord - I don't have orange book */
/* standard speed CD-RW (1-4x) */
static char clv_to_speed[16] = {
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/* high speed CD-RW (-10x) */
static char hs_clv_to_speed[16] = {
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/* ultra high speed CD-RW */
static char us_clv_to_speed[16] = {
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
};
/*
* reads the maximum media speed from ATIP
*/
static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
unsigned *speed)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
unsigned char buf[64];
unsigned int size, st, sp;
int ret;
init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4; /* READ ATIP */
cgc.cmd[8] = 2;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
size = 2 + get_unaligned_be16(&buf[0]);
if (size > sizeof(buf))
size = sizeof(buf);
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4;
cgc.cmd[8] = size;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
if (!(buf[6] & 0x40)) {
dev_notice(ddev, "disc type is not CD-RW\n");
return 1;
}
if (!(buf[6] & 0x4)) {
dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
st = (buf[6] >> 3) & 0x7; /* disc sub-type */
sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
/* Info from cdrecord */
switch (st) {
case 0: /* standard speed */
*speed = clv_to_speed[sp];
break;
case 1: /* high speed */
*speed = hs_clv_to_speed[sp];
break;
case 2: /* ultra high speed */
*speed = us_clv_to_speed[sp];
break;
default:
dev_notice(ddev, "unknown disc sub-type %d\n", st);
return 1;
}
if (*speed) {
dev_info(ddev, "maximum media speed: %d\n", *speed);
return 0;
} else {
dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
return 1;
}
}
static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
int ret;
dev_dbg(ddev, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sshdr = &sshdr;
cgc.timeout = 60*HZ;
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
ret = pkt_generic_packet(pd, &cgc);
if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
}
static int pkt_open_write(struct pktcdvd_device *pd)
{
struct device *ddev = disk_to_dev(pd->disk);
int ret;
unsigned int write_speed, media_write_speed, read_speed;
ret = pkt_probe_settings(pd);
if (ret) {
dev_dbg(ddev, "failed probe\n");
return ret;
}
ret = pkt_set_write_settings(pd);
if (ret) {
dev_notice(ddev, "failed saving write settings\n");
return -EIO;
}
pkt_write_caching(pd);
ret = pkt_get_max_speed(pd, &write_speed);
if (ret)
write_speed = 16 * 177;
switch (pd->mmc3_profile) {
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
dev_notice(ddev, "write speed %ukB/s\n", write_speed);
break;
default:
ret = pkt_media_speed(pd, &media_write_speed);
if (ret)
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
dev_notice(ddev, "write speed %ux\n", write_speed / 176);
break;
}
read_speed = write_speed;
ret = pkt_set_speed(pd, write_speed, read_speed);
if (ret) {
dev_notice(ddev, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
ret = pkt_perform_opc(pd);
if (ret)
dev_notice(ddev, "Optimum Power Calibration failed\n");
return 0;
}
/*
* called at open time.
*/
static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
{
struct device *ddev = disk_to_dev(pd->disk);
int ret;
long lba;
struct request_queue *q;
struct block_device *bdev;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
goto out;
}
ret = pkt_get_last_written(pd, &lba);
if (ret) {
dev_err(ddev, "pkt_get_last_written failed\n");
goto out_putdev;
}
set_capacity(pd->disk, lba << 2);
set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
q = bdev_get_queue(pd->bdev);
if (write) {
ret = pkt_open_write(pd);
if (ret)
goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
*/
blk_queue_max_hw_sectors(q, pd->settings.size);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
clear_bit(PACKET_WRITABLE, &pd->flags);
}
ret = pkt_set_segment_merging(pd, q);
if (ret)
goto out_putdev;
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
dev_err(ddev, "not enough memory for buffers\n");
ret = -ENOMEM;
goto out_putdev;
}
dev_info(ddev, "%lukB available on disc\n", lba << 1);
}
return 0;
out_putdev:
blkdev_put(bdev, pd);
out:
return ret;
}
/*
* called when the device is closed. makes sure that the device flushes
* the internal cache before we close.
*/
static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
{
struct device *ddev = disk_to_dev(pd->disk);
if (flush && pkt_flush_cache(pd))
dev_notice(ddev, "not flushing cache\n");
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
blkdev_put(pd->bdev, pd);
pkt_shrink_pktlist(pd);
}
static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
return pkt_devs[dev_minor];
}
static int pkt_open(struct gendisk *disk, blk_mode_t mode)
{
struct pktcdvd_device *pd = NULL;
int ret;
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(disk->first_minor);
if (!pd) {
ret = -ENODEV;
goto out;
}
BUG_ON(pd->refcnt < 0);
pd->refcnt++;
if (pd->refcnt > 1) {
if ((mode & BLK_OPEN_WRITE) &&
!test_bit(PACKET_WRITABLE, &pd->flags)) {
ret = -EBUSY;
goto out_dec;
}
} else {
ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
if (ret)
goto out_dec;
/*
* needed here as well, since ext2 (among others) may change
* the blocksize at mount time
*/
set_blocksize(disk->part0, CD_FRAMESIZE);
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return 0;
out_dec:
pd->refcnt--;
out:
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return ret;
}
static void pkt_release(struct gendisk *disk)
{
struct pktcdvd_device *pd = disk->private_data;
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd->refcnt--;
BUG_ON(pd->refcnt < 0);
if (pd->refcnt == 0) {
int flush = test_bit(PACKET_WRITABLE, &pd->flags);
pkt_release_dev(pd, flush);
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
}
static void pkt_end_io_read_cloned(struct bio *bio)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
psd->bio->bi_status = bio->bi_status;
bio_put(bio);
bio_endio(psd->bio);
mempool_free(psd, &psd_pool);
pkt_bio_finished(pd);
}
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
struct bio *cloned_bio =
bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd;
psd->bio = bio;
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
pkt_queue_bio(pd, cloned_bio);
}
static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
{
struct pktcdvd_device *pd = q->queuedata;
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
struct pkt_rb_node *node;
zone = get_zone(bio->bi_iter.bi_sector, pd);
/*
* If we find a matching packet in state WAITING or READ_WAIT, we can
* just append this bio to that packet.
*/
spin_lock(&pd->cdrw.active_list_lock);
blocked_bio = 0;
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (pkt->sector == zone) {
spin_lock(&pkt->lock);
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
bio_list_add(&pkt->orig_bios, bio);
pkt->write_size +=
bio->bi_iter.bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
}
spin_unlock(&pkt->lock);
spin_unlock(&pd->cdrw.active_list_lock);
return;
} else {
blocked_bio = 1;
}
spin_unlock(&pkt->lock);
}
}
spin_unlock(&pd->cdrw.active_list_lock);
/*
* Test if there is enough room left in the bio work queue
* (queue size >= congestion on mark).
* If not, wait till the work queue size is below the congestion off mark.
*/
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
struct wait_bit_queue_entry wqe;
init_wait_var_entry(&wqe, &pd->congested, 0);
for (;;) {
prepare_to_wait_event(__var_waitqueue(&pd->congested),
&wqe.wq_entry,
TASK_UNINTERRUPTIBLE);
if (pd->bio_queue_size <= pd->write_congestion_off)
break;
pd->congested = true;
spin_unlock(&pd->lock);
schedule();
spin_lock(&pd->lock);
}
}
spin_unlock(&pd->lock);
/*
* No matching packet found. Store the bio in the work queue.
*/
node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
node->bio = bio;
spin_lock(&pd->lock);
BUG_ON(pd->bio_queue_size < 0);
was_empty = (pd->bio_queue_size == 0);
pkt_rbtree_insert(pd, node);
spin_unlock(&pd->lock);
/*
* Wake up the worker thread.
*/
atomic_set(&pd->scan_queue, 1);
if (was_empty) {
/* This wake_up is required for correct operation */
wake_up(&pd->wqueue);
} else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
/*
* This wake up is not required for correct operation,
* but improves performance in some cases.
*/
wake_up(&pd->wqueue);
}
}
static void pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
struct device *ddev = disk_to_dev(pd->disk);
struct bio *split;
bio = bio_split_to_limits(bio);
if (!bio)
return;
dev_dbg(ddev, "start = %6llx stop = %6llx\n",
bio->bi_iter.bi_sector, bio_end_sector(bio));
/*
* Clone READ bios so we can have our own bi_end_io callback.
*/
if (bio_data_dir(bio) == READ) {
pkt_make_request_read(pd, bio);
return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
goto end_io;
}
if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
dev_err(ddev, "wrong bio size\n");
goto end_io;
}
do {
sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
split = bio_split(bio, last_zone -
bio->bi_iter.bi_sector,
GFP_NOIO, &pkt_bio_set);
bio_chain(split, bio);
} else {
split = bio;
}
pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
return;
end_io:
bio_io_error(bio);
}
static void pkt_init_queue(struct pktcdvd_device *pd)
{
struct request_queue *q = pd->disk->queue;
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
q->queuedata = pd;
}
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
struct device *ddev = disk_to_dev(pd->disk);
int i;
struct block_device *bdev;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
dev_err(ddev, "recursive setup not allowed\n");
return -EBUSY;
}
for (i = 0; i < MAX_WRITERS; i++) {
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
dev_err(ddev, "%pg already setup\n", pd2->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
dev_err(ddev, "can't chain pktcdvd devices\n");
return -EBUSY;
}
}
bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL,
NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
sdev = scsi_device_from_queue(bdev->bd_disk->queue);
if (!sdev) {
blkdev_put(bdev, NULL);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
pd->bdev = bdev;
set_blocksize(bdev, CD_FRAMESIZE);
pkt_init_queue(pd);
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
if (IS_ERR(pd->cdrw.thread)) {
dev_err(ddev, "can't start kernel thread\n");
goto out_mem;
}
proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
dev_notice(ddev, "writer mapped to %pg\n", bdev);
return 0;
out_mem:
blkdev_put(bdev, NULL);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
}
static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
struct device *ddev = disk_to_dev(pd->disk);
int ret;
dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
mutex_lock(&pktcdvd_mutex);
switch (cmd) {
case CDROMEJECT:
/*
* The door gets locked when the device is opened, so we
* have to unlock it or else the eject command fails.
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
fallthrough;
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
case CDROMMULTISESSION:
case CDROMREADTOCENTRY:
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
if (!bdev->bd_disk->fops->ioctl)
ret = -ENOTTY;
else
ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
break;
default:
dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
ret = -ENOTTY;
}
mutex_unlock(&pktcdvd_mutex);
return ret;
}
static unsigned int pkt_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct pktcdvd_device *pd = disk->private_data;
struct gendisk *attached_disk;
if (!pd)
return 0;
if (!pd->bdev)
return 0;
attached_disk = pd->bdev->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
}
static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
}
static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.submit_bio = pkt_submit_bio,
.open = pkt_open,
.release = pkt_release,
.ioctl = pkt_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = pkt_check_events,
.devnode = pkt_devnode,
};
/*
* Set up mapping from pktcdvd device to CD-ROM device.
*/
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
{
int idx;
int ret = -ENOMEM;
struct pktcdvd_device *pd;
struct gendisk *disk;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
for (idx = 0; idx < MAX_WRITERS; idx++)
if (!pkt_devs[idx])
break;
if (idx == MAX_WRITERS) {
pr_err("max %d writers supported\n", MAX_WRITERS);
ret = -EBUSY;
goto out_mutex;
}
pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
if (!pd)
goto out_mutex;
ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
sizeof(struct pkt_rb_node));
if (ret)
goto out_mem;
INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
spin_lock_init(&pd->cdrw.active_list_lock);
spin_lock_init(&pd->lock);
spin_lock_init(&pd->iosched.lock);
bio_list_init(&pd->iosched.read_queue);
bio_list_init(&pd->iosched.write_queue);
init_waitqueue_head(&pd->wqueue);
pd->bio_queue = RB_ROOT;
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
ret = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto out_mem;
pd->disk = disk;
disk->major = pktdev_major;
disk->first_minor = idx;
disk->minors = 1;
disk->fops = &pktcdvd_ops;
disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
disk->private_data = pd;
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
ret = add_disk(disk);
if (ret)
goto out_mem2;
pkt_sysfs_dev_new(pd);
pkt_debugfs_dev_new(pd);
pkt_devs[idx] = pd;
if (pkt_dev)
*pkt_dev = pd->pkt_dev;
mutex_unlock(&ctl_mutex);
return 0;
out_mem2:
put_disk(disk);
out_mem:
mempool_exit(&pd->rb_pool);
kfree(pd);
out_mutex:
mutex_unlock(&ctl_mutex);
pr_err("setup of pktcdvd device failed\n");
return ret;
}
/*
* Tear down mapping from pktcdvd device to CD-ROM device.
*/
static int pkt_remove_dev(dev_t pkt_dev)
{
struct pktcdvd_device *pd;
struct device *ddev;
int idx;
int ret = 0;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
for (idx = 0; idx < MAX_WRITERS; idx++) {
pd = pkt_devs[idx];
if (pd && (pd->pkt_dev == pkt_dev))
break;
}
if (idx == MAX_WRITERS) {
pr_debug("dev not setup\n");
ret = -ENXIO;
goto out;
}
if (pd->refcnt > 0) {
ret = -EBUSY;
goto out;
}
ddev = disk_to_dev(pd->disk);
if (!IS_ERR(pd->cdrw.thread))
kthread_stop(pd->cdrw.thread);
pkt_devs[idx] = NULL;
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
blkdev_put(pd->bdev, NULL);
remove_proc_entry(pd->disk->disk_name, pkt_proc);
dev_notice(ddev, "writer unmapped\n");
del_gendisk(pd->disk);
put_disk(pd->disk);
mempool_exit(&pd->rb_pool);
kfree(pd);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
out:
mutex_unlock(&ctl_mutex);
return ret;
}
static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
{
struct pktcdvd_device *pd;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;
ctrl_cmd->pkt_dev = 0;
}
ctrl_cmd->num_devices = MAX_WRITERS;
mutex_unlock(&ctl_mutex);
}
static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct pkt_ctrl_command ctrl_cmd;
int ret = 0;
dev_t pkt_dev = 0;
if (cmd != PACKET_CTRL_CMD)
return -ENOTTY;
if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
return -EFAULT;
switch (ctrl_cmd.command) {
case PKT_CTRL_CMD_SETUP:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
break;
case PKT_CTRL_CMD_TEARDOWN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
break;
case PKT_CTRL_CMD_STATUS:
pkt_get_status(&ctrl_cmd);
break;
default:
return -ENOTTY;
}
if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
return -EFAULT;
return ret;
}
#ifdef CONFIG_COMPAT
static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static const struct file_operations pkt_ctl_fops = {
.open = nonseekable_open,
.unlocked_ioctl = pkt_ctl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = pkt_ctl_compat_ioctl,
#endif
.owner = THIS_MODULE,
.llseek = no_llseek,
};
static struct miscdevice pkt_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = DRIVER_NAME,
.nodename = "pktcdvd/control",
.fops = &pkt_ctl_fops
};
static int __init pkt_init(void)
{
int ret;
mutex_init(&ctl_mutex);
ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
sizeof(struct packet_stacked_data));
if (ret)
return ret;
ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
if (ret) {
mempool_exit(&psd_pool);
return ret;
}
ret = register_blkdev(pktdev_major, DRIVER_NAME);
if (ret < 0) {
pr_err("unable to register block device\n");
goto out2;
}
if (!pktdev_major)
pktdev_major = ret;
ret = pkt_sysfs_init();
if (ret)
goto out;
pkt_debugfs_init();
ret = misc_register(&pkt_misc);
if (ret) {
pr_err("unable to register misc device\n");
goto out_misc;
}
pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
return 0;
out_misc:
pkt_debugfs_cleanup();
pkt_sysfs_cleanup();
out:
unregister_blkdev(pktdev_major, DRIVER_NAME);
out2:
mempool_exit(&psd_pool);
bioset_exit(&pkt_bio_set);
return ret;
}
static void __exit pkt_exit(void)
{
remove_proc_entry("driver/"DRIVER_NAME, NULL);
misc_deregister(&pkt_misc);
pkt_debugfs_cleanup();
pkt_sysfs_cleanup();
unregister_blkdev(pktdev_major, DRIVER_NAME);
mempool_exit(&psd_pool);
bioset_exit(&pkt_bio_set);
}
MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
MODULE_AUTHOR("Jens Axboe <[email protected]>");
MODULE_LICENSE("GPL");
module_init(pkt_init);
module_exit(pkt_exit);
| linux-master | drivers/block/pktcdvd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ram backed block device driver.
*
* Copyright (C) 2007 Nick Piggin
* Copyright (C) 2007 Novell Inc.
*
* Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
* of their respective owners.
*/
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/xarray.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
/*
* Each block ramdisk device has a xarray brd_pages of pages that stores
* the pages containing the block device's contents. A brd page's ->index is
* its offset in PAGE_SIZE units. This is similar to, but in no way connected
* with, the kernel's pagecache or buffer cache (which sit above our block
* device).
*/
struct brd_device {
int brd_number;
struct gendisk *brd_disk;
struct list_head brd_list;
/*
* Backing store of pages. This is the contents of the block device.
*/
struct xarray brd_pages;
u64 brd_nr_pages;
};
/*
* Look up and return a brd's page for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
pgoff_t idx;
struct page *page;
idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
page = xa_load(&brd->brd_pages, idx);
BUG_ON(page && page->index != idx);
return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
*/
static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
{
pgoff_t idx;
struct page *page, *cur;
int ret = 0;
page = brd_lookup_page(brd, sector);
if (page)
return 0;
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
if (!page)
return -ENOMEM;
xa_lock(&brd->brd_pages);
idx = sector >> PAGE_SECTORS_SHIFT;
page->index = idx;
cur = __xa_cmpxchg(&brd->brd_pages, idx, NULL, page, gfp);
if (unlikely(cur)) {
__free_page(page);
ret = xa_err(cur);
if (!ret && (cur->index != idx))
ret = -EIO;
} else {
brd->brd_nr_pages++;
}
xa_unlock(&brd->brd_pages);
return ret;
}
/*
* Free all backing store pages and xarray. This must only be called when
* there are no other users of the device.
*/
static void brd_free_pages(struct brd_device *brd)
{
struct page *page;
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
__free_page(page);
cond_resched();
}
xa_destroy(&brd->brd_pages);
}
/*
* copy_to_brd_setup must be called before copy_to_brd. It may sleep.
*/
static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
gfp_t gfp)
{
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
size_t copy;
int ret;
copy = min_t(size_t, n, PAGE_SIZE - offset);
ret = brd_insert_page(brd, sector, gfp);
if (ret)
return ret;
if (copy < n) {
sector += copy >> SECTOR_SHIFT;
ret = brd_insert_page(brd, sector, gfp);
}
return ret;
}
/*
* Copy n bytes from src to the brd starting at sector. Does not sleep.
*/
static void copy_to_brd(struct brd_device *brd, const void *src,
sector_t sector, size_t n)
{
struct page *page;
void *dst;
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
size_t copy;
copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
BUG_ON(!page);
dst = kmap_atomic(page);
memcpy(dst + offset, src, copy);
kunmap_atomic(dst);
if (copy < n) {
src += copy;
sector += copy >> SECTOR_SHIFT;
copy = n - copy;
page = brd_lookup_page(brd, sector);
BUG_ON(!page);
dst = kmap_atomic(page);
memcpy(dst, src, copy);
kunmap_atomic(dst);
}
}
/*
* Copy n bytes to dst from the brd starting at sector. Does not sleep.
*/
static void copy_from_brd(void *dst, struct brd_device *brd,
sector_t sector, size_t n)
{
struct page *page;
void *src;
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
size_t copy;
copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
if (page) {
src = kmap_atomic(page);
memcpy(dst, src + offset, copy);
kunmap_atomic(src);
} else
memset(dst, 0, copy);
if (copy < n) {
dst += copy;
sector += copy >> SECTOR_SHIFT;
copy = n - copy;
page = brd_lookup_page(brd, sector);
if (page) {
src = kmap_atomic(page);
memcpy(dst, src, copy);
kunmap_atomic(src);
} else
memset(dst, 0, copy);
}
}
/*
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
unsigned int len, unsigned int off, blk_opf_t opf,
sector_t sector)
{
void *mem;
int err = 0;
if (op_is_write(opf)) {
/*
* Must use NOIO because we don't want to recurse back into the
* block or filesystem layers from page reclaim.
*/
gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
err = copy_to_brd_setup(brd, sector, len, gfp);
if (err)
goto out;
}
mem = kmap_atomic(page);
if (!op_is_write(opf)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
copy_to_brd(brd, mem + off, sector, len);
}
kunmap_atomic(mem);
out:
return err;
}
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
struct bvec_iter iter;
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
/* Don't support un-aligned buffer */
WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
(len & (SECTOR_SIZE - 1)));
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bio->bi_opf, sector);
if (err) {
if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
return;
}
bio_io_error(bio);
return;
}
sector += len >> SECTOR_SHIFT;
}
bio_endio(bio);
}
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.submit_bio = brd_submit_bio,
};
/*
* And now the modules code and kernel interface.
*/
static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
module_param(rd_nr, int, 0444);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
module_param(rd_size, ulong, 0444);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
static int max_part = 1;
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
MODULE_ALIAS("rd");
#ifndef MODULE
/* Legacy boot options - nonmodular */
static int __init ramdisk_size(char *str)
{
rd_size = simple_strtol(str, NULL, 0);
return 1;
}
__setup("ramdisk_size=", ramdisk_size);
#endif
/*
* The device scheme is derived from loop.c. Keep them in synch where possible
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
static struct dentry *brd_debugfs_dir;
static int brd_alloc(int i)
{
struct brd_device *brd;
struct gendisk *disk;
char buf[DISK_NAME_LEN];
int err = -ENOMEM;
list_for_each_entry(brd, &brd_devices, brd_list)
if (brd->brd_number == i)
return -EEXIST;
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd)
return -ENOMEM;
brd->brd_number = i;
list_add_tail(&brd->brd_list, &brd_devices);
xa_init(&brd->brd_pages);
snprintf(buf, DISK_NAME_LEN, "ram%d", i);
if (!IS_ERR_OR_NULL(brd_debugfs_dir))
debugfs_create_u64(buf, 0444, brd_debugfs_dir,
&brd->brd_nr_pages);
disk = brd->brd_disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto out_free_dev;
disk->major = RAMDISK_MAJOR;
disk->first_minor = i * max_part;
disk->minors = max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
/*
* This is so fdisk will align partitions on 4k, because of
* direct_access API needing 4k alignment, returning a PFN
* (This is only a problem on very small devices <= 4M,
* otherwise fdisk will align on 1M. Regardless this call
* is harmless)
*/
blk_queue_physical_block_size(disk->queue, PAGE_SIZE);
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
return 0;
out_cleanup_disk:
put_disk(disk);
out_free_dev:
list_del(&brd->brd_list);
kfree(brd);
return err;
}
static void brd_probe(dev_t dev)
{
brd_alloc(MINOR(dev) / max_part);
}
static void brd_cleanup(void)
{
struct brd_device *brd, *next;
debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
del_gendisk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
list_del(&brd->brd_list);
kfree(brd);
}
}
static inline void brd_check_and_reset_par(void)
{
if (unlikely(!max_part))
max_part = 1;
/*
* make sure 'max_part' can be divided exactly by (1U << MINORBITS),
* otherwise, it is possiable to get same dev_t when adding partitions.
*/
if ((1U << MINORBITS) % max_part != 0)
max_part = 1UL << fls(max_part);
if (max_part > DISK_MAX_PARTS) {
pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
DISK_MAX_PARTS, DISK_MAX_PARTS);
max_part = DISK_MAX_PARTS;
}
}
static int __init brd_init(void)
{
int err, i;
brd_check_and_reset_par();
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
for (i = 0; i < rd_nr; i++) {
err = brd_alloc(i);
if (err)
goto out_free;
}
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
*
* (1) if rd_nr is specified, create that many upfront. else
* it defaults to CONFIG_BLK_DEV_RAM_COUNT
* (2) User can further extend brd devices by create dev node themselves
* and have kernel automatically instantiate actual device
* on-demand. Example:
* mknod /path/devnod_name b 1 X # 1 is the rd major
* fdisk -l /path/devnod_name
* If (X / max_part) was not already created it will be created
* dynamically.
*/
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
err = -EIO;
goto out_free;
}
pr_info("brd: module loaded\n");
return 0;
out_free:
brd_cleanup();
pr_info("brd: module NOT loaded !!!\n");
return err;
}
static void __exit brd_exit(void)
{
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
brd_cleanup();
pr_info("brd: module unloaded\n");
}
module_init(brd_init);
module_exit(brd_exit);
| linux-master | drivers/block/brd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/block/floppy.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1993, 1994 Alain Knaff
* Copyright (C) 1998 Alan Cox
*/
/*
* 02.12.91 - Changed to static variables to indicate need for reset
* and recalibrate. This makes some things easier (output_byte reset
* checking etc), and means less interrupt jumping in case of errors,
* so the code is hopefully easier to understand.
*/
/*
* This file is certainly a mess. I've tried my best to get it working,
* but I don't like programming floppies, and I have only one anyway.
* Urgel. I should check for more errors, and do more graceful error
* recovery. Seems there are problems with several drives. I've tried to
* correct them. No promises.
*/
/*
* As with hd.c, all routines within this file can (and will) be called
* by interrupts, so extreme caution is needed. A hardware interrupt
* handler may not sleep, or a kernel panic will happen. Thus I cannot
* call "floppy-on" directly, but have to set a special timer interrupt
* etc.
*/
/*
* 28.02.92 - made track-buffering routines, based on the routines written
* by [email protected] (Lawrence Foard). Linus.
*/
/*
* Automatic floppy-detection and formatting written by Werner Almesberger
* ([email protected]), who also corrected some problems with
* the floppy-change signal detection.
*/
/*
* 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
* FDC data overrun bug, added some preliminary stuff for vertical
* recording support.
*
* 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
*
* TODO: Errors are still not counted properly.
*/
/* 1992/9/20
* Modifications for ``Sector Shifting'' by Rob Hooft ([email protected])
* modeled after the freeware MS-DOS program fdformat/88 V1.8 by
* Christoph H. Hochst\"atter.
* I have fixed the shift values to the ones I always use. Maybe a new
* ioctl() should be created to be able to modify them.
* There is a bug in the driver that makes it impossible to format a
* floppy as the first thing after bootup.
*/
/*
* 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
* this helped the floppy driver as well. Much cleaner, and still seems to
* work.
*/
/* 1994/6/24 --bbroad-- added the floppy table entries and made
* minor modifications to allow 2.88 floppies to be run.
*/
/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
* disk types.
*/
/*
* 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
* format bug fixes, but unfortunately some new bugs too...
*/
/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
* errors to allow safe writing by specialized programs.
*/
/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
* by defining bit 1 of the "stretch" parameter to mean put sectors on the
* opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
* drives are "upside-down").
*/
/*
* 1995/8/26 -- Andreas Busse -- added Mips support.
*/
/*
* 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
* features to asm/floppy.h.
*/
/*
* 1998/1/21 -- Richard Gooch <[email protected]> -- devfs support
*/
/*
* 1998/05/07 -- Russell King -- More portability cleanups; moved definition of
* interrupt and dma channel to asm/floppy.h. Cleaned up some formatting &
* use of '0' for NULL.
*/
/*
* 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation
* failures.
*/
/*
* 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives.
*/
/*
* 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24
* days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were
* being used to store jiffies, which are unsigned longs).
*/
/*
* 2000/08/28 -- Arnaldo Carvalho de Melo <[email protected]>
* - get rid of check_region
* - s/suser/capable/
*/
/*
* 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no
* floppy controller (lingering task on list after module is gone... boom.)
*/
/*
* 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range
* (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix
* requires many non-obvious changes in arch dependent code.
*/
/* 2003/07/28 -- Daniele Bellucci <[email protected]>.
* Better audit of register_blkdev.
*/
#define REALLY_SLOW_IO
#define DEBUGT 2
#define DPRINT(format, args...) \
pr_info("floppy%d: " format, current_drive, ##args)
#define DCL_DEBUG /* debug disk change line */
#ifdef DCL_DEBUG
#define debug_dcl(test, fmt, args...) \
do { if ((test) & FD_DEBUG) DPRINT(fmt, ##args); } while (0)
#else
#define debug_dcl(test, fmt, args...) \
do { if (0) DPRINT(fmt, ##args); } while (0)
#endif
/* do print messages for unexpected interrupts */
static int print_unex = 1;
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/fdreg.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h> /* CMOS defines */
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/major.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/async.h>
#include <linux/compat.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
* It's been recommended that take about 1/4 of the default speed
* in some more extreme cases.
*/
static DEFINE_MUTEX(floppy_mutex);
static int slow_floppy;
#include <asm/dma.h>
#include <asm/irq.h>
static int FLOPPY_IRQ = 6;
static int FLOPPY_DMA = 2;
static int can_use_virtual_dma = 2;
/* =======
* can use virtual DMA:
* 0 = use of virtual DMA disallowed by config
* 1 = use of virtual DMA prescribed by config
* 2 = no virtual DMA preference configured. By default try hard DMA,
* but fall back on virtual DMA when not enough memory available
*/
static int use_virtual_dma;
/* =======
* use virtual DMA
* 0 using hard DMA
* 1 using virtual DMA
* This variable is set to virtual when a DMA mem problem arises, and
* reset back in floppy_grab_irq_and_dma.
* It is not safe to reset it in other circumstances, because the floppy
* driver may have several buffers in use at once, and we do currently not
* record each buffers capabilities
*/
static DEFINE_SPINLOCK(floppy_lock);
static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
#define K_64 0x10000 /* 64KB */
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
* 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
* a drive is allowed.
*
* NOTE: This must come before we include the arch floppy header because
* some ports reference this variable from there. -DaveM
*/
static int allowed_drive_mask = 0x33;
#include <asm/floppy.h>
static int irqdma_allocated;
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
static LIST_HEAD(floppy_reqs);
static struct request *current_req;
static int set_next_request(void);
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
#endif
/* Dma Memory related stuff */
#ifndef fd_dma_mem_free
#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
#endif
#ifndef fd_dma_mem_alloc
#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
#endif
#ifndef fd_cacheflush
#define fd_cacheflush(addr, size) /* nothing... */
#endif
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
{
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
if (*addr)
return; /* we have the memory */
if (can_use_virtual_dma != 2)
return; /* no fallback allowed */
pr_info("DMA memory shortage. Temporarily falling back on virtual DMA\n");
*addr = (char *)nodma_mem_alloc(l);
#else
return;
#endif
}
/* End dma memory related stuff */
static unsigned long fake_change;
static bool initialized;
#define ITYPE(x) (((x) >> 2) & 0x1f)
#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
#define UNIT(x) ((x) & 0x03) /* drive on fdc */
#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
/* reverse mapping from unit and fdc to drive */
#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
#define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2)
#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
/* read/write commands */
#define COMMAND 0
#define DR_SELECT 1
#define TRACK 2
#define HEAD 3
#define SECTOR 4
#define SIZECODE 5
#define SECT_PER_TRACK 6
#define GAP 7
#define SIZECODE2 8
#define NR_RW 9
/* format commands */
#define F_SIZECODE 2
#define F_SECT_PER_TRACK 3
#define F_GAP 4
#define F_FILL 5
#define NR_F 6
/*
* Maximum disk size (in kilobytes).
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
#define MAX_DISK_SIZE 4 /* 3984 */
/*
* globals used by 'result()'
*/
static unsigned char reply_buffer[FD_RAW_REPLY_SIZE];
static int inr; /* size of reply buffer, when called from interrupt */
#define ST0 0
#define ST1 1
#define ST2 2
#define ST3 0 /* result of GETSTATUS */
#define R_TRACK 3
#define R_HEAD 4
#define R_SECTOR 5
#define R_SIZECODE 6
#define SEL_DLY (2 * HZ / 100)
/*
* this struct defines the different floppy drive types.
*/
static struct {
struct floppy_drive_params params;
const char *name; /* name printed while booting */
} default_drive_params[] = {
/* NOTE: the time values in jiffies should be in msec!
CMOS drive type
| Maximum data rate supported by drive type
| | Head load time, msec
| | | Head unload time, msec (not used)
| | | | Step rate interval, usec
| | | | | Time needed for spinup time (jiffies)
| | | | | | Timeout for spinning down (jiffies)
| | | | | | | Spindown offset (where disk stops)
| | | | | | | | Select delay
| | | | | | | | | RPS
| | | | | | | | | | Max number of tracks
| | | | | | | | | | | Interrupt timeout
| | | | | | | | | | | | Max nonintlv. sectors
| | | | | | | | | | | | | -Max Errors- flags */
{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
/* | --autodetected formats--- | | |
* read_track | | Name printed when booting
* | Native format
* Frequency of disk change checks */
};
static struct floppy_drive_params drive_params[N_DRIVE];
static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct blk_mq_tag_set tag_sets[N_DRIVE];
static struct gendisk *opened_disk[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
/*
* This struct defines the different floppy types.
*
* Bit 0 of 'stretch' tells if the tracks need to be doubled for some
* types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
* tells if the disk is in Commodore 1581 format, which means side 0 sectors
* are located on side 1 of the disk but with a side 0 ID, and vice-versa.
* This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
* 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
* side 0 is on physical side 0 (but with the misnamed sector IDs).
* 'stretch' should probably be renamed to something more general, like
* 'options'.
*
* Bits 2 through 9 of 'stretch' tell the number of the first sector.
* The LSB (bit 2) is flipped. For most disks, the first sector
* is 1 (represented by 0x00<<2). For some CP/M and music sampler
* disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
* For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
*
* Other parameters should be self-explanatory (see also setfdprm(8)).
*/
/*
Size
| Sectors per track
| | Head
| | | Tracks
| | | | Stretch
| | | | | Gap 1 size
| | | | | | Data rate, | 0x40 for perp
| | | | | | | Spec1 (stepping rate, head unload
| | | | | | | | /fmt gap (gap2) */
static struct floppy_struct floppy_type[32] = {
{ 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
{ 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
{ 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
{ 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
{ 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
{ 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
{ 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
{ 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
{ 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */
{ 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
{ 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
{ 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
{ 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
{ 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
{ 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
{ 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
{ 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
{ 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
{ 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
{ 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
{ 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
{ 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
{ 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
{ 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
{ 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
{ 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
{ 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
{ 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
{ 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
{ 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
};
static struct gendisk *disks[N_DRIVE][ARRAY_SIZE(floppy_type)];
#define SECTSIZE (_FD_SECTSIZE(*floppy))
/* Auto-detection: Disk type used until the next media change occurs. */
static struct floppy_struct *current_type[N_DRIVE];
/*
* User-provided type information. current_type points to
* the respective entry of this array.
*/
static struct floppy_struct user_params[N_DRIVE];
static sector_t floppy_sizes[256];
static char floppy_device_name[] = "floppy";
/*
* The driver is trying to determine the correct media format
* while probing is set. rw_interrupt() clears it after a
* successful access.
*/
static int probing;
/* Synchronization of FDC access. */
#define FD_COMMAND_NONE -1
#define FD_COMMAND_ERROR 2
#define FD_COMMAND_OKAY 3
static volatile int command_status = FD_COMMAND_NONE;
static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* errors encountered on the current (or last) request */
static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
/*
* Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
* Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
* H is head unload time (1=16ms, 2=32ms, etc)
*/
/*
* Track buffer
* Because these are written to by the DMA controller, they must
* not contain a 64k byte boundary crossing, or data will be
* corrupted/lost.
*/
static char *floppy_track_buffer;
static int max_buffer_sectors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
/* this is called after the interrupt of the
* main command */
void (*redo)(void); /* this is called to retry the operation */
void (*error)(void); /* this is called to tally an error */
done_f done; /* this is called to say if the operation has
* succeeded/failed */
} *cont;
static void floppy_ready(void);
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
static int floppy_grab_irq_and_dma(void);
static void floppy_release_irq_and_dma(void);
/*
* The "reset" variable should be tested whenever an interrupt is scheduled,
* after the commands have been sent. This is to ensure that the driver doesn't
* get wedged when the interrupt doesn't come because of a failed command.
* reset doesn't need to be tested before sending commands, because
* output_byte is automatically disabled when reset is set.
*/
static void reset_fdc(void);
static int floppy_revalidate(struct gendisk *disk);
/*
* These are global variables, as that's the easiest way to give
* information to interrupts. They are the data used for the current
* request.
*/
#define NO_TRACK -1
#define NEED_1_RECAL -2
#define NEED_2_RECAL -3
static atomic_t usage_count = ATOMIC_INIT(0);
/* buffer related variables */
static int buffer_track = -1;
static int buffer_drive = -1;
static int buffer_min = -1;
static int buffer_max = -1;
/* fdc related variables, should end up in a struct */
static struct floppy_fdc_state fdc_state[N_FDC];
static int current_fdc; /* current fdc */
static struct workqueue_struct *floppy_wq;
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
static unsigned char fsector_t; /* sector in track */
static unsigned char in_sector_offset; /* offset within physical sector,
* expressed in units of 512 bytes */
static inline unsigned char fdc_inb(int fdc, int reg)
{
return fd_inb(fdc_state[fdc].address, reg);
}
static inline void fdc_outb(unsigned char value, int fdc, int reg)
{
fd_outb(value, fdc_state[fdc].address, reg);
}
static inline bool drive_no_geom(int drive)
{
return !current_type[drive] && !ITYPE(drive_state[drive].fd_device);
}
#ifndef fd_eject
static inline int fd_eject(int drive)
{
return -EINVAL;
}
#endif
/*
* Debugging
* =========
*/
#ifdef DEBUGT
static long unsigned debugtimer;
static inline void set_debugt(void)
{
debugtimer = jiffies;
}
static inline void debugt(const char *func, const char *msg)
{
if (drive_params[current_drive].flags & DEBUGT)
pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer);
}
#else
static inline void set_debugt(void) { }
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
!delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
static void (*do_floppy)(void) = NULL;
#define OLOGSIZE 20
static void (*lasthandler)(void);
static unsigned long interruptjiffies;
static unsigned long resultjiffies;
static int resultsize;
static unsigned long lastredo;
static struct output_log {
unsigned char data;
unsigned char status;
unsigned long jiffies;
} output_log[OLOGSIZE];
static int output_log_pos;
#define MAXTIMEOUT -2
static void __reschedule_timeout(int drive, const char *message)
{
unsigned long delay;
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
drive = 0;
} else
delay = drive_params[drive].timeout;
mod_delayed_work(floppy_wq, &fd_timeout, delay);
if (drive_params[drive].flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
}
static void reschedule_timeout(int drive, const char *message)
{
unsigned long flags;
spin_lock_irqsave(&floppy_lock, flags);
__reschedule_timeout(drive, message);
spin_unlock_irqrestore(&floppy_lock, flags);
}
#define INFBOUND(a, b) (a) = max_t(int, a, b)
#define SUPBOUND(a, b) (a) = min_t(int, a, b)
/*
* Bottom half floppy driver.
* ==========================
*
* This part of the file contains the code talking directly to the hardware,
* and also the main service loop (seek-configure-spinup-command)
*/
/*
* disk change.
* This routine is responsible for maintaining the FD_DISK_CHANGE flag,
* and the last_checked date.
*
* last_checked is the date of the last check which showed 'no disk change'
* FD_DISK_CHANGE is set under two conditions:
* 1. The floppy has been changed after some i/o to that floppy already
* took place.
* 2. No floppy disk is in the drive. This is done in order to ensure that
* requests are quickly flushed in case there is no disk in the drive. It
* follows that FD_DISK_CHANGE can only be cleared if there is a disk in
* the drive.
*
* For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
* For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
* each seek. If a disk is present, the disk change line should also be
* cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
* change line is set, this means either that no disk is in the drive, or
* that it has been removed since the last seek.
*
* This means that we really have a third possibility too:
* The floppy has been changed after the last seek.
*/
static int disk_change(int drive)
{
int fdc = FDC(drive);
if (time_before(jiffies, drive_state[drive].select_date + drive_params[drive].select_delay))
DPRINT("WARNING disk change called early\n");
if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))) ||
(fdc_state[fdc].dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
DPRINT("probing disk change on unselected drive\n");
DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive),
(unsigned int)fdc_state[fdc].dor);
}
debug_dcl(drive_params[drive].flags,
"checking disk change line for drive %d\n", drive);
debug_dcl(drive_params[drive].flags, "jiffies=%lu\n", jiffies);
debug_dcl(drive_params[drive].flags, "disk change line=%x\n",
fdc_inb(fdc, FD_DIR) & 0x80);
debug_dcl(drive_params[drive].flags, "flags=%lx\n",
drive_state[drive].flags);
if (drive_params[drive].flags & FD_BROKEN_DCL)
return test_bit(FD_DISK_CHANGED_BIT,
&drive_state[drive].flags);
if ((fdc_inb(fdc, FD_DIR) ^ drive_params[drive].flags) & 0x80) {
set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
/* verify write protection */
if (drive_state[drive].maxblock) /* mark it changed */
set_bit(FD_DISK_CHANGED_BIT,
&drive_state[drive].flags);
/* invalidate its geometry */
if (drive_state[drive].keep_data >= 0) {
if ((drive_params[drive].flags & FTD_MSG) &&
current_type[drive] != NULL)
DPRINT("Disk type is undefined after disk change\n");
current_type[drive] = NULL;
floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1;
}
return 1;
} else {
drive_state[drive].last_checked = jiffies;
clear_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[drive].flags);
}
return 0;
}
static inline int is_selected(int dor, int unit)
{
return ((dor & (0x10 << unit)) && (dor & 3) == unit);
}
static bool is_ready_state(int status)
{
int state = status & (STATUS_READY | STATUS_DIR | STATUS_DMA);
return state == STATUS_READY;
}
static int set_dor(int fdc, char mask, char data)
{
unsigned char unit;
unsigned char drive;
unsigned char newdor;
unsigned char olddor;
if (fdc_state[fdc].address == -1)
return -1;
olddor = fdc_state[fdc].dor;
newdor = (olddor & mask) | data;
if (newdor != olddor) {
unit = olddor & 0x3;
if (is_selected(olddor, unit) && !is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
debug_dcl(drive_params[drive].flags,
"calling disk change from set_dor\n");
disk_change(drive);
}
fdc_state[fdc].dor = newdor;
fdc_outb(newdor, fdc, FD_DOR);
unit = newdor & 0x3;
if (!is_selected(olddor, unit) && is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
drive_state[drive].select_date = jiffies;
}
}
return olddor;
}
static void twaddle(int fdc, int drive)
{
if (drive_params[drive].select_delay)
return;
fdc_outb(fdc_state[fdc].dor & ~(0x10 << UNIT(drive)),
fdc, FD_DOR);
fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
drive_state[drive].select_date = jiffies;
}
/*
* Reset all driver information about the specified fdc.
* This is needed after a reset, and after a raw command.
*/
static void reset_fdc_info(int fdc, int mode)
{
int drive;
fdc_state[fdc].spec1 = fdc_state[fdc].spec2 = -1;
fdc_state[fdc].need_configure = 1;
fdc_state[fdc].perp_mode = 1;
fdc_state[fdc].rawcmd = 0;
for (drive = 0; drive < N_DRIVE; drive++)
if (FDC(drive) == fdc &&
(mode || drive_state[drive].track != NEED_1_RECAL))
drive_state[drive].track = NEED_2_RECAL;
}
/*
* selects the fdc and drive, and enables the fdc's input/dma.
* Both current_drive and current_fdc are changed to match the new drive.
*/
static void set_fdc(int drive)
{
unsigned int fdc;
if (drive < 0 || drive >= N_DRIVE) {
pr_info("bad drive value %d\n", drive);
return;
}
fdc = FDC(drive);
if (fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);
#endif
if (fdc_state[fdc].rawcmd == 2)
reset_fdc_info(fdc, 1);
if (fdc_inb(fdc, FD_STATUS) != STATUS_READY)
fdc_state[fdc].reset = 1;
current_drive = drive;
current_fdc = fdc;
}
/*
* locks the driver.
* Both current_drive and current_fdc are changed to match the new drive.
*/
static int lock_fdc(int drive)
{
if (WARN(atomic_read(&usage_count) == 0,
"Trying to lock fdc while usage count=0\n"))
return -1;
if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy)))
return -EINTR;
command_status = FD_COMMAND_NONE;
reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
/* unlocks the driver */
static void unlock_fdc(void)
{
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
cancel_delayed_work(&fd_timeout);
do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
wake_up(&fdc_wait);
}
/* switches the motor off after a given timeout */
static void motor_off_callback(struct timer_list *t)
{
unsigned long nr = t - motor_off_timer;
unsigned char mask = ~(0x10 << UNIT(nr));
if (WARN_ON_ONCE(nr >= N_DRIVE))
return;
set_dor(FDC(nr), mask, 0);
}
/* schedules motor off */
static void floppy_off(unsigned int drive)
{
unsigned long volatile delta;
int fdc = FDC(drive);
if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))))
return;
del_timer(motor_off_timer + drive);
/* make spindle stop in a position which minimizes spinup time
* next time */
if (drive_params[drive].rps) {
delta = jiffies - drive_state[drive].first_read_date + HZ -
drive_params[drive].spindown_offset;
delta = ((delta * drive_params[drive].rps) % HZ) / drive_params[drive].rps;
motor_off_timer[drive].expires =
jiffies + drive_params[drive].spindown - delta;
}
add_timer(motor_off_timer + drive);
}
/*
* cycle through all N_DRIVE floppy drives, for disk change testing.
* stopping at current drive. This is done before any long operation, to
* be sure to have up to date disk change information.
*/
static void scandrives(void)
{
int i;
int drive;
int saved_drive;
if (drive_params[current_drive].select_delay)
return;
saved_drive = current_drive;
for (i = 0; i < N_DRIVE; i++) {
drive = (saved_drive + i + 1) % N_DRIVE;
if (drive_state[drive].fd_ref == 0 || drive_params[drive].select_delay != 0)
continue; /* skip closed drives */
set_fdc(drive);
if (!(set_dor(current_fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
(0x10 << UNIT(drive))))
/* switch the motor off again, if it was off to
* begin with */
set_dor(current_fdc, ~(0x10 << UNIT(drive)), 0);
}
set_fdc(saved_drive);
}
static void empty(void)
{
}
static void (*floppy_work_fn)(void);
static void floppy_work_workfn(struct work_struct *work)
{
floppy_work_fn();
}
static DECLARE_WORK(floppy_work, floppy_work_workfn);
static void schedule_bh(void (*handler)(void))
{
WARN_ON(work_pending(&floppy_work));
floppy_work_fn = handler;
queue_work(floppy_wq, &floppy_work);
}
static void (*fd_timer_fn)(void) = NULL;
static void fd_timer_workfn(struct work_struct *work)
{
fd_timer_fn();
}
static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
do_floppy = NULL;
cancel_delayed_work(&fd_timer);
cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
static void fd_watchdog(void)
{
debug_dcl(drive_params[current_drive].flags,
"calling disk change from watchdog\n");
if (disk_change(current_drive)) {
DPRINT("disk removed during i/o\n");
cancel_activity();
cont->done(0);
reset_fdc();
} else {
cancel_delayed_work(&fd_timer);
fd_timer_fn = fd_watchdog;
queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
static int fd_wait_for_completion(unsigned long expires,
void (*function)(void))
{
if (fdc_state[current_fdc].reset) {
reset_fdc(); /* do the reset during sleep to win time
* if we don't need to sleep, it's a good
* occasion anyways */
return 1;
}
if (time_before(jiffies, expires)) {
cancel_delayed_work(&fd_timer);
fd_timer_fn = function;
queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
}
static void setup_DMA(void)
{
unsigned long f;
if (raw_cmd->length == 0) {
print_hex_dump(KERN_INFO, "zero dma transfer size: ",
DUMP_PREFIX_NONE, 16, 1,
raw_cmd->fullcmd, raw_cmd->cmd_count, false);
cont->done(0);
fdc_state[current_fdc].reset = 1;
return;
}
if (((unsigned long)raw_cmd->kernel_data) % 512) {
pr_info("non aligned address: %p\n", raw_cmd->kernel_data);
cont->done(0);
fdc_state[current_fdc].reset = 1;
return;
}
f = claim_dma_lock();
fd_disable_dma();
#ifdef fd_dma_setup
if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length,
(raw_cmd->flags & FD_RAW_READ) ?
DMA_MODE_READ : DMA_MODE_WRITE,
fdc_state[current_fdc].address) < 0) {
release_dma_lock(f);
cont->done(0);
fdc_state[current_fdc].reset = 1;
return;
}
release_dma_lock(f);
#else
fd_clear_dma_ff();
fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ?
DMA_MODE_READ : DMA_MODE_WRITE);
fd_set_dma_addr(raw_cmd->kernel_data);
fd_set_dma_count(raw_cmd->length);
virtual_dma_port = fdc_state[current_fdc].address;
fd_enable_dma();
release_dma_lock(f);
#endif
}
static void show_floppy(int fdc);
/* waits until the fdc becomes ready */
static int wait_til_ready(int fdc)
{
int status;
int counter;
if (fdc_state[fdc].reset)
return -1;
for (counter = 0; counter < 10000; counter++) {
status = fdc_inb(fdc, FD_STATUS);
if (status & STATUS_READY)
return status;
}
if (initialized) {
DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
show_floppy(fdc);
}
fdc_state[fdc].reset = 1;
return -1;
}
/* sends a command byte to the fdc */
static int output_byte(int fdc, char byte)
{
int status = wait_til_ready(fdc);
if (status < 0)
return -1;
if (is_ready_state(status)) {
fdc_outb(byte, fdc, FD_DATA);
output_log[output_log_pos].data = byte;
output_log[output_log_pos].status = status;
output_log[output_log_pos].jiffies = jiffies;
output_log_pos = (output_log_pos + 1) % OLOGSIZE;
return 0;
}
fdc_state[fdc].reset = 1;
if (initialized) {
DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
byte, fdc, status);
show_floppy(fdc);
}
return -1;
}
/* gets the response from the fdc */
static int result(int fdc)
{
int i;
int status = 0;
for (i = 0; i < FD_RAW_REPLY_SIZE; i++) {
status = wait_til_ready(fdc);
if (status < 0)
break;
status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
if ((status & ~STATUS_BUSY) == STATUS_READY) {
resultjiffies = jiffies;
resultsize = i;
return i;
}
if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
reply_buffer[i] = fdc_inb(fdc, FD_DATA);
else
break;
}
if (initialized) {
DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
fdc, status, i);
show_floppy(fdc);
}
fdc_state[fdc].reset = 1;
return -1;
}
#define MORE_OUTPUT -2
/* does the fdc need more output? */
static int need_more_output(int fdc)
{
int status = wait_til_ready(fdc);
if (status < 0)
return -1;
if (is_ready_state(status))
return MORE_OUTPUT;
return result(fdc);
}
/* Set perpendicular mode as required, based on data rate, if supported.
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
*/
static void perpendicular_mode(int fdc)
{
unsigned char perp_mode;
if (raw_cmd->rate & 0x40) {
switch (raw_cmd->rate & 3) {
case 0:
perp_mode = 2;
break;
case 3:
perp_mode = 3;
break;
default:
DPRINT("Invalid data rate for perpendicular mode!\n");
cont->done(0);
fdc_state[fdc].reset = 1;
/*
* convenient way to return to
* redo without too much hassle
* (deep stack et al.)
*/
return;
}
} else
perp_mode = 0;
if (fdc_state[fdc].perp_mode == perp_mode)
return;
if (fdc_state[fdc].version >= FDC_82077_ORIG) {
output_byte(fdc, FD_PERPENDICULAR);
output_byte(fdc, perp_mode);
fdc_state[fdc].perp_mode = perp_mode;
} else if (perp_mode) {
DPRINT("perpendicular mode not supported by this FDC.\n");
}
} /* perpendicular_mode */
static int fifo_depth = 0xa;
static int no_fifo;
static int fdc_configure(int fdc)
{
/* Turn on FIFO */
output_byte(fdc, FD_CONFIGURE);
if (need_more_output(fdc) != MORE_OUTPUT)
return 0;
output_byte(fdc, 0);
output_byte(fdc, 0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
output_byte(fdc, 0); /* pre-compensation from track 0 upwards */
return 1;
}
#define NOMINAL_DTR 500
/* Issue a "SPECIFY" command to set the step rate time, head unload time,
* head load time, and DMA disable flag to values needed by floppy.
*
* The value "dtr" is the data transfer rate in Kbps. It is needed
* to account for the data rate-based scaling done by the 82072 and 82077
* FDC types. This parameter is ignored for other types of FDCs (i.e.
* 8272a).
*
* Note that changing the data transfer rate has a (probably deleterious)
* effect on the parameters subject to scaling for 82072/82077 FDCs, so
* fdc_specify is called again after each data transfer rate
* change.
*
* srt: 1000 to 16000 in microseconds
* hut: 16 to 240 milliseconds
* hlt: 2 to 254 milliseconds
*
* These values are rounded up to the next highest available delay time.
*/
static void fdc_specify(int fdc, int drive)
{
unsigned char spec1;
unsigned char spec2;
unsigned long srt;
unsigned long hlt;
unsigned long hut;
unsigned long dtr = NOMINAL_DTR;
unsigned long scale_dtr = NOMINAL_DTR;
int hlt_max_code = 0x7f;
int hut_max_code = 0xf;
if (fdc_state[fdc].need_configure &&
fdc_state[fdc].version >= FDC_82072A) {
fdc_configure(fdc);
fdc_state[fdc].need_configure = 0;
}
switch (raw_cmd->rate & 0x03) {
case 3:
dtr = 1000;
break;
case 1:
dtr = 300;
if (fdc_state[fdc].version >= FDC_82078) {
/* chose the default rate table, not the one
* where 1 = 2 Mbps */
output_byte(fdc, FD_DRIVESPEC);
if (need_more_output(fdc) == MORE_OUTPUT) {
output_byte(fdc, UNIT(drive));
output_byte(fdc, 0xc0);
}
}
break;
case 2:
dtr = 250;
break;
}
if (fdc_state[fdc].version >= FDC_82072) {
scale_dtr = dtr;
hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
srt = 16 - DIV_ROUND_UP(drive_params[drive].srt * scale_dtr / 1000,
NOMINAL_DTR);
if (slow_floppy)
srt = srt / 4;
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
hlt = DIV_ROUND_UP(drive_params[drive].hlt * scale_dtr / 2,
NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
hut = DIV_ROUND_UP(drive_params[drive].hut * scale_dtr / 16,
NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
else if (hut > 0xf)
hut = hut_max_code;
spec1 = (srt << 4) | hut;
spec2 = (hlt << 1) | (use_virtual_dma & 1);
/* If these parameters did not change, just return with success */
if (fdc_state[fdc].spec1 != spec1 ||
fdc_state[fdc].spec2 != spec2) {
/* Go ahead and set spec1 and spec2 */
output_byte(fdc, FD_SPECIFY);
output_byte(fdc, fdc_state[fdc].spec1 = spec1);
output_byte(fdc, fdc_state[fdc].spec2 = spec2);
}
} /* fdc_specify */
/* Set the FDC's data transfer rate on behalf of the specified drive.
* NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
* of the specify command (i.e. using the fdc_specify function).
*/
static int fdc_dtr(void)
{
/* If data rate not already set to desired value, set it. */
if ((raw_cmd->rate & 3) == fdc_state[current_fdc].dtr)
return 0;
/* Set dtr */
fdc_outb(raw_cmd->rate & 3, current_fdc, FD_DCR);
/* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
* need a stabilization period of several milliseconds to be
* enforced after data rate changes before R/W operations.
* Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
*/
fdc_state[current_fdc].dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
{
pr_cont(": track %d, head %d, sector %d, size %d",
reply_buffer[R_TRACK], reply_buffer[R_HEAD],
reply_buffer[R_SECTOR],
reply_buffer[R_SIZECODE]);
} /* tell_sector */
static void print_errors(void)
{
DPRINT("");
if (reply_buffer[ST0] & ST0_ECE) {
pr_cont("Recalibrate failed!");
} else if (reply_buffer[ST2] & ST2_CRC) {
pr_cont("data CRC error");
tell_sector();
} else if (reply_buffer[ST1] & ST1_CRC) {
pr_cont("CRC error");
tell_sector();
} else if ((reply_buffer[ST1] & (ST1_MAM | ST1_ND)) ||
(reply_buffer[ST2] & ST2_MAM)) {
if (!probing) {
pr_cont("sector not found");
tell_sector();
} else
pr_cont("probe failed...");
} else if (reply_buffer[ST2] & ST2_WC) { /* seek error */
pr_cont("wrong cylinder");
} else if (reply_buffer[ST2] & ST2_BC) { /* cylinder marked as bad */
pr_cont("bad cylinder");
} else {
pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
reply_buffer[ST0], reply_buffer[ST1],
reply_buffer[ST2]);
tell_sector();
}
pr_cont("\n");
}
/*
* OK, this error interpreting routine is called after a
* DMA read/write has succeeded
* or failed, so we check the results, and copy any buffers.
* hhb: Added better error reporting.
* ak: Made this into a separate routine.
*/
static int interpret_errors(void)
{
char bad;
if (inr != 7) {
DPRINT("-- FDC reply error\n");
fdc_state[current_fdc].reset = 1;
return 1;
}
/* check IC to find cause of interrupt */
switch (reply_buffer[ST0] & ST0_INTR) {
case 0x40: /* error occurred during command execution */
if (reply_buffer[ST1] & ST1_EOC)
return 0; /* occurs with pseudo-DMA */
bad = 1;
if (reply_buffer[ST1] & ST1_WP) {
DPRINT("Drive is write protected\n");
clear_bit(FD_DISK_WRITABLE_BIT,
&drive_state[current_drive].flags);
cont->done(0);
bad = 2;
} else if (reply_buffer[ST1] & ST1_ND) {
set_bit(FD_NEED_TWADDLE_BIT,
&drive_state[current_drive].flags);
} else if (reply_buffer[ST1] & ST1_OR) {
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
} else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
/* wrong cylinder => recal */
drive_state[current_drive].track = NEED_2_RECAL;
return bad;
case 0x80: /* invalid command given */
DPRINT("Invalid FDC command given!\n");
cont->done(0);
return 2;
case 0xc0:
DPRINT("Abnormal termination caused by polling\n");
cont->error();
return 2;
default: /* (0) Normal command termination */
return 0;
}
}
/*
* This routine is called when everything should be correctly set up
* for the transfer (i.e. floppy motor is on, the correct floppy is
* selected, and the head is sitting on the right track).
*/
static void setup_rw_floppy(void)
{
int i;
int r;
int flags;
unsigned long ready_date;
void (*function)(void);
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
flags |= FD_RAW_INTR;
if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
ready_date = drive_state[current_drive].spinup_date + drive_params[current_drive].spinup;
/* If spinup will take a long time, rerun scandrives
* again just before spinup completion. Beware that
* after scandrives, we must again wait for selection.
*/
if (time_after(ready_date, jiffies + drive_params[current_drive].select_delay)) {
ready_date -= drive_params[current_drive].select_delay;
function = floppy_start;
} else
function = setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
return;
}
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
if (flags & FD_RAW_INTR)
do_floppy = main_command_interrupt;
r = 0;
for (i = 0; i < raw_cmd->cmd_count; i++)
r |= output_byte(current_fdc, raw_cmd->fullcmd[i]);
debugt(__func__, "rw_command");
if (r) {
cont->error();
reset_fdc();
return;
}
if (!(flags & FD_RAW_INTR)) {
inr = result(current_fdc);
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
fd_watchdog();
}
static int blind_seek;
/*
* This is the routine called after every seek (or recalibrate) interrupt
* from the floppy controller.
*/
static void seek_interrupt(void)
{
debugt(__func__, "");
if (inr != 2 || (reply_buffer[ST0] & 0xF8) != 0x20) {
DPRINT("seek failed\n");
drive_state[current_drive].track = NEED_2_RECAL;
cont->error();
cont->redo();
return;
}
if (drive_state[current_drive].track >= 0 &&
drive_state[current_drive].track != reply_buffer[ST1] &&
!blind_seek) {
debug_dcl(drive_params[current_drive].flags,
"clearing NEWCHANGE flag because of effective seek\n");
debug_dcl(drive_params[current_drive].flags, "jiffies=%lu\n",
jiffies);
clear_bit(FD_DISK_NEWCHANGE_BIT,
&drive_state[current_drive].flags);
/* effective seek */
drive_state[current_drive].select_date = jiffies;
}
drive_state[current_drive].track = reply_buffer[ST1];
floppy_ready();
}
static void check_wp(int fdc, int drive)
{
if (test_bit(FD_VERIFY_BIT, &drive_state[drive].flags)) {
/* check write protection */
output_byte(fdc, FD_GETSTATUS);
output_byte(fdc, UNIT(drive));
if (result(fdc) != 1) {
fdc_state[fdc].reset = 1;
return;
}
clear_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
clear_bit(FD_NEED_TWADDLE_BIT,
&drive_state[drive].flags);
debug_dcl(drive_params[drive].flags,
"checking whether disk is write protected\n");
debug_dcl(drive_params[drive].flags, "wp=%x\n",
reply_buffer[ST3] & 0x40);
if (!(reply_buffer[ST3] & 0x40))
set_bit(FD_DISK_WRITABLE_BIT,
&drive_state[drive].flags);
else
clear_bit(FD_DISK_WRITABLE_BIT,
&drive_state[drive].flags);
}
}
static void seek_floppy(void)
{
int track;
blind_seek = 0;
debug_dcl(drive_params[current_drive].flags,
"calling disk change from %s\n", __func__);
if (!test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) &&
disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) {
/* the media changed flag should be cleared after the seek.
* If it isn't, this means that there is really no disk in
* the drive.
*/
set_bit(FD_DISK_CHANGED_BIT,
&drive_state[current_drive].flags);
cont->done(0);
cont->redo();
return;
}
if (drive_state[current_drive].track <= NEED_1_RECAL) {
recalibrate_floppy();
return;
} else if (test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) &&
(raw_cmd->flags & FD_RAW_NEED_DISK) &&
(drive_state[current_drive].track <= NO_TRACK || drive_state[current_drive].track == raw_cmd->track)) {
/* we seek to clear the media-changed condition. Does anybody
* know a more elegant way, which works on all drives? */
if (raw_cmd->track)
track = raw_cmd->track - 1;
else {
if (drive_params[current_drive].flags & FD_SILENT_DCL_CLEAR) {
set_dor(current_fdc, ~(0x10 << UNIT(current_drive)), 0);
blind_seek = 1;
raw_cmd->flags |= FD_RAW_NEED_SEEK;
}
track = 1;
}
} else {
check_wp(current_fdc, current_drive);
if (raw_cmd->track != drive_state[current_drive].track &&
(raw_cmd->flags & FD_RAW_NEED_SEEK))
track = raw_cmd->track;
else {
setup_rw_floppy();
return;
}
}
do_floppy = seek_interrupt;
output_byte(current_fdc, FD_SEEK);
output_byte(current_fdc, UNIT(current_drive));
if (output_byte(current_fdc, track) < 0) {
reset_fdc();
return;
}
debugt(__func__, "");
}
static void recal_interrupt(void)
{
debugt(__func__, "");
if (inr != 2)
fdc_state[current_fdc].reset = 1;
else if (reply_buffer[ST0] & ST0_ECE) {
switch (drive_state[current_drive].track) {
case NEED_1_RECAL:
debugt(__func__, "need 1 recal");
/* after a second recalibrate, we still haven't
* reached track 0. Probably no drive. Raise an
* error, as failing immediately might upset
* computers possessed by the Devil :-) */
cont->error();
cont->redo();
return;
case NEED_2_RECAL:
debugt(__func__, "need 2 recal");
/* If we already did a recalibrate,
* and we are not at track 0, this
* means we have moved. (The only way
* not to move at recalibration is to
* be already at track 0.) Clear the
* new change flag */
debug_dcl(drive_params[current_drive].flags,
"clearing NEWCHANGE flag because of second recalibrate\n");
clear_bit(FD_DISK_NEWCHANGE_BIT,
&drive_state[current_drive].flags);
drive_state[current_drive].select_date = jiffies;
fallthrough;
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
* most 80 steps. If after one
* recalibrate we don't have reached
* track 0, this might mean that we
* started beyond track 80. Try
* again. */
drive_state[current_drive].track = NEED_1_RECAL;
break;
}
} else
drive_state[current_drive].track = reply_buffer[ST1];
floppy_ready();
}
static void print_result(char *message, int inr)
{
int i;
DPRINT("%s ", message);
if (inr >= 0)
for (i = 0; i < inr; i++)
pr_cont("repl[%d]=%x ", i, reply_buffer[i]);
pr_cont("\n");
}
/* interrupt handler. Note that this can be called externally on the Sparc */
irqreturn_t floppy_interrupt(int irq, void *dev_id)
{
int do_print;
unsigned long f;
void (*handler)(void) = do_floppy;
lasthandler = handler;
interruptjiffies = jiffies;
f = claim_dma_lock();
fd_disable_dma();
release_dma_lock(f);
do_floppy = NULL;
if (current_fdc >= N_FDC || fdc_state[current_fdc].address == -1) {
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
pr_info("floppy interrupt on bizarre fdc %d\n", current_fdc);
pr_info("handler=%ps\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
fdc_state[current_fdc].reset = 0;
/* We have to clear the reset flag here, because apparently on boxes
* with level triggered interrupts (PS/2, Sparc, ...), it is needed to
* emit SENSEI's to clear the interrupt line. And fdc_state[fdc].reset
* blocks the emission of the SENSEI's.
* It is OK to emit floppy commands because we are in an interrupt
* handler here, and thus we have to fear no interference of other
* activity.
*/
do_print = !handler && print_unex && initialized;
inr = result(current_fdc);
if (do_print)
print_result("unexpected interrupt", inr);
if (inr == 0) {
int max_sensei = 4;
do {
output_byte(current_fdc, FD_SENSEI);
inr = result(current_fdc);
if (do_print)
print_result("sensei", inr);
max_sensei--;
} while ((reply_buffer[ST0] & 0x83) != UNIT(current_drive) &&
inr == 2 && max_sensei);
}
if (!handler) {
fdc_state[current_fdc].reset = 1;
return IRQ_NONE;
}
schedule_bh(handler);
is_alive(__func__, "normal interrupt end");
/* FIXME! Was it really for us? */
return IRQ_HANDLED;
}
static void recalibrate_floppy(void)
{
debugt(__func__, "");
do_floppy = recal_interrupt;
output_byte(current_fdc, FD_RECALIBRATE);
if (output_byte(current_fdc, UNIT(current_drive)) < 0)
reset_fdc();
}
/*
* Must do 4 FD_SENSEIs after reset because of ``drive polling''.
*/
static void reset_interrupt(void)
{
debugt(__func__, "");
result(current_fdc); /* get the status ready for set_fdc */
if (fdc_state[current_fdc].reset) {
pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
}
cont->redo();
}
/*
* reset is done by pulling bit 2 of DOR low for a while (old FDCs),
* or by setting the self clearing bit 7 of STATUS (newer FDCs).
* This WILL trigger an interrupt, causing the handlers in the current
* cont's ->redo() to be called via reset_interrupt().
*/
static void reset_fdc(void)
{
unsigned long flags;
do_floppy = reset_interrupt;
fdc_state[current_fdc].reset = 0;
reset_fdc_info(current_fdc, 0);
/* Pseudo-DMA may intercept 'reset finished' interrupt. */
/* Irrelevant for systems with true DMA (i386). */
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
if (fdc_state[current_fdc].version >= FDC_82072A)
fdc_outb(0x80 | (fdc_state[current_fdc].dtr & 3),
current_fdc, FD_STATUS);
else {
fdc_outb(fdc_state[current_fdc].dor & ~0x04, current_fdc, FD_DOR);
udelay(FD_RESET_DELAY);
fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
}
}
static void show_floppy(int fdc)
{
int i;
pr_info("\n");
pr_info("floppy driver state\n");
pr_info("-------------------\n");
pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%ps\n",
jiffies, interruptjiffies, jiffies - interruptjiffies,
lasthandler);
pr_info("timeout_message=%s\n", timeout_message);
pr_info("last output bytes:\n");
for (i = 0; i < OLOGSIZE; i++)
pr_info("%2x %2x %lu\n",
output_log[(i + output_log_pos) % OLOGSIZE].data,
output_log[(i + output_log_pos) % OLOGSIZE].status,
output_log[(i + output_log_pos) % OLOGSIZE].jiffies);
pr_info("last result at %lu\n", resultjiffies);
pr_info("last redo_fd_request at %lu\n", lastredo);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
reply_buffer, resultsize, true);
pr_info("status=%x\n", fdc_inb(fdc, FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
pr_info("do_floppy=%ps\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%ps\n", floppy_work.func);
if (delayed_work_pending(&fd_timer))
pr_info("delayed work.function=%p expires=%ld\n",
fd_timer.work.func,
fd_timer.timer.expires - jiffies);
if (delayed_work_pending(&fd_timeout))
pr_info("timer_function=%p expires=%ld\n",
fd_timeout.work.func,
fd_timeout.timer.expires - jiffies);
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
if (initialized)
show_floppy(current_fdc);
cancel_activity();
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
/* avoid dma going to a random drive after shutdown */
if (initialized)
DPRINT("floppy timeout called\n");
fdc_state[current_fdc].reset = 1;
if (cont) {
cont->done(0);
cont->redo(); /* this will recall reset when needed */
} else {
pr_info("no cont in shutdown!\n");
process_fd_request();
}
is_alive(__func__, "");
}
/* start motor, check media-changed condition and write protection */
static int start_motor(void (*function)(void))
{
int mask;
int data;
mask = 0xfc;
data = UNIT(current_drive);
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) {
if (!(fdc_state[current_fdc].dor & (0x10 << UNIT(current_drive)))) {
set_debugt();
/* no read since this drive is running */
drive_state[current_drive].first_read_date = 0;
/* note motor start time if motor is not yet running */
drive_state[current_drive].spinup_date = jiffies;
data |= (0x10 << UNIT(current_drive));
}
} else if (fdc_state[current_fdc].dor & (0x10 << UNIT(current_drive)))
mask &= ~(0x10 << UNIT(current_drive));
/* starts motor and selects floppy */
del_timer(motor_off_timer + current_drive);
set_dor(current_fdc, mask, data);
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(drive_state[current_drive].select_date + drive_params[current_drive].select_delay,
function);
}
static void floppy_ready(void)
{
if (fdc_state[current_fdc].reset) {
reset_fdc();
return;
}
if (start_motor(floppy_ready))
return;
if (fdc_dtr())
return;
debug_dcl(drive_params[current_drive].flags,
"calling disk change from floppy_ready\n");
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
disk_change(current_drive) && !drive_params[current_drive].select_delay)
twaddle(current_fdc, current_drive); /* this clears the dcl on certain
* drive/controller combinations */
#ifdef fd_chose_dma_mode
if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) {
unsigned long flags = claim_dma_lock();
fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length);
release_dma_lock(flags);
}
#endif
if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
perpendicular_mode(current_fdc);
fdc_specify(current_fdc, current_drive); /* must be done here because of hut, hlt ... */
seek_floppy();
} else {
if ((raw_cmd->flags & FD_RAW_READ) ||
(raw_cmd->flags & FD_RAW_WRITE))
fdc_specify(current_fdc, current_drive);
setup_rw_floppy();
}
}
static void floppy_start(void)
{
reschedule_timeout(current_drive, "floppy start");
scandrives();
debug_dcl(drive_params[current_drive].flags,
"setting NEWCHANGE in floppy_start\n");
set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags);
floppy_ready();
}
/*
* ========================================================================
* here ends the bottom half. Exported routines are:
* floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
* start_motor, reset_fdc, reset_fdc_info, interpret_errors.
* Initialization also uses output_byte, result, set_dor, floppy_interrupt
* and set_dor.
* ========================================================================
*/
/*
* General purpose continuations.
* ==============================
*/
static void do_wakeup(void)
{
reschedule_timeout(MAXTIMEOUT, "do wakeup");
cont = NULL;
command_status += 2;
wake_up(&command_done);
}
static const struct cont_t wakeup_cont = {
.interrupt = empty,
.redo = do_wakeup,
.error = empty,
.done = (done_f)empty
};
static const struct cont_t intr_cont = {
.interrupt = empty,
.redo = process_fd_request,
.error = empty,
.done = (done_f)empty
};
/* schedules handler, waiting for completion. May be interrupted, will then
* return -EINTR, in which case the driver will automatically be unlocked.
*/
static int wait_til_done(void (*handler)(void), bool interruptible)
{
int ret;
schedule_bh(handler);
if (interruptible)
wait_event_interruptible(command_done, command_status >= 2);
else
wait_event(command_done, command_status >= 2);
if (command_status < 2) {
cancel_activity();
cont = &intr_cont;
reset_fdc();
return -EINTR;
}
if (fdc_state[current_fdc].reset)
command_status = FD_COMMAND_ERROR;
if (command_status == FD_COMMAND_OKAY)
ret = 0;
else
ret = -EIO;
command_status = FD_COMMAND_NONE;
return ret;
}
static void generic_done(int result)
{
command_status = result;
cont = &wakeup_cont;
}
static void generic_success(void)
{
cont->done(1);
}
static void generic_failure(void)
{
cont->done(0);
}
static void success_and_wakeup(void)
{
generic_success();
cont->redo();
}
/*
* formatting and rw support.
* ==========================
*/
static int next_valid_format(int drive)
{
int probed_format;
probed_format = drive_state[drive].probed_format;
while (1) {
if (probed_format >= FD_AUTODETECT_SIZE ||
!drive_params[drive].autodetect[probed_format]) {
drive_state[drive].probed_format = 0;
return 1;
}
if (floppy_type[drive_params[drive].autodetect[probed_format]].sect) {
drive_state[drive].probed_format = probed_format;
return 0;
}
probed_format++;
}
}
static void bad_flp_intr(void)
{
int err_count;
if (probing) {
drive_state[current_drive].probed_format++;
if (!next_valid_format(current_drive))
return;
}
err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
if (err_count > drive_params[current_drive].max_errors.reset)
fdc_state[current_fdc].reset = 1;
else if (err_count > drive_params[current_drive].max_errors.recal)
drive_state[current_drive].track = NEED_2_RECAL;
}
static void set_floppy(int drive)
{
int type = ITYPE(drive_state[drive].fd_device);
if (type)
_floppy = floppy_type + type;
else
_floppy = current_type[drive];
}
/*
* formatting support.
* ===================
*/
static void format_interrupt(void)
{
switch (interpret_errors()) {
case 1:
cont->error();
break;
case 2:
break;
case 0:
cont->done(1);
}
cont->redo();
}
#define FM_MODE(x, y) ((y) & ~(((x)->rate & 0x80) >> 1))
#define CT(x) ((x) | 0xc0)
static void setup_format_params(int track)
{
int n;
int il;
int count;
int head_shift;
int track_shift;
struct fparm {
unsigned char track, head, sect, size;
} *here = (struct fparm *)floppy_track_buffer;
raw_cmd = &default_raw_cmd;
raw_cmd->track = track;
raw_cmd->flags = (FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK);
raw_cmd->rate = _floppy->rate & 0x43;
raw_cmd->cmd_count = NR_F;
raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_FORMAT);
raw_cmd->cmd[DR_SELECT] = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
raw_cmd->cmd[F_SIZECODE] = FD_SIZECODE(_floppy);
raw_cmd->cmd[F_SECT_PER_TRACK] = _floppy->sect << 2 >> raw_cmd->cmd[F_SIZECODE];
raw_cmd->cmd[F_GAP] = _floppy->fmt_gap;
raw_cmd->cmd[F_FILL] = FD_FILL_BYTE;
raw_cmd->kernel_data = floppy_track_buffer;
raw_cmd->length = 4 * raw_cmd->cmd[F_SECT_PER_TRACK];
if (!raw_cmd->cmd[F_SECT_PER_TRACK])
return;
/* allow for about 30ms for data transport per track */
head_shift = (raw_cmd->cmd[F_SECT_PER_TRACK] + 5) / 6;
/* a ``cylinder'' is two tracks plus a little stepping time */
track_shift = 2 * head_shift + 3;
/* position of logical sector 1 on this track */
n = (track_shift * format_req.track + head_shift * format_req.head)
% raw_cmd->cmd[F_SECT_PER_TRACK];
/* determine interleave */
il = 1;
if (_floppy->fmt_gap < 0x22)
il++;
/* initialize field */
for (count = 0; count < raw_cmd->cmd[F_SECT_PER_TRACK]; ++count) {
here[count].track = format_req.track;
here[count].head = format_req.head;
here[count].sect = 0;
here[count].size = raw_cmd->cmd[F_SIZECODE];
}
/* place logical sectors */
for (count = 1; count <= raw_cmd->cmd[F_SECT_PER_TRACK]; ++count) {
here[n].sect = count;
n = (n + il) % raw_cmd->cmd[F_SECT_PER_TRACK];
if (here[n].sect) { /* sector busy, find next free sector */
++n;
if (n >= raw_cmd->cmd[F_SECT_PER_TRACK]) {
n -= raw_cmd->cmd[F_SECT_PER_TRACK];
while (here[n].sect)
++n;
}
}
}
if (_floppy->stretch & FD_SECTBASEMASK) {
for (count = 0; count < raw_cmd->cmd[F_SECT_PER_TRACK]; count++)
here[count].sect += FD_SECTBASE(_floppy) - 1;
}
}
static void redo_format(void)
{
buffer_track = -1;
setup_format_params(format_req.track << STRETCH(_floppy));
floppy_start();
debugt(__func__, "queue format request");
}
static const struct cont_t format_cont = {
.interrupt = format_interrupt,
.redo = redo_format,
.error = bad_flp_intr,
.done = generic_done
};
static int do_format(int drive, struct format_descr *tmp_format_req)
{
int ret;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
if (!_floppy ||
_floppy->track > drive_params[current_drive].tracks ||
tmp_format_req->track >= _floppy->track ||
tmp_format_req->head >= _floppy->head ||
(_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
!_floppy->fmt_gap) {
process_fd_request();
return -EINVAL;
}
format_req = *tmp_format_req;
cont = &format_cont;
floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
process_fd_request();
return ret;
}
/*
* Buffer read/write and support
* =============================
*/
static void floppy_end_request(struct request *req, blk_status_t error)
{
unsigned int nr_sectors = current_count_sectors;
unsigned int drive = (unsigned long)req->q->disk->private_data;
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = blk_rq_cur_sectors(req);
if (blk_update_request(req, error, nr_sectors << 9))
return;
__blk_mq_end_request(req, error);
/* We're done with the request */
floppy_off(drive);
current_req = NULL;
}
/* new request_done. Can handle physical sectors which are smaller than a
* logical buffer */
static void request_done(int uptodate)
{
struct request *req = current_req;
int block;
char msg[sizeof("request done ") + sizeof(int) * 3];
probing = 0;
snprintf(msg, sizeof(msg), "request done %d", uptodate);
reschedule_timeout(MAXTIMEOUT, msg);
if (!req) {
pr_info("floppy.c: no request in request_done\n");
return;
}
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
block = current_count_sectors + blk_rq_pos(req);
INFBOUND(drive_state[current_drive].maxblock, block);
if (block > _floppy->sect)
drive_state[current_drive].maxtrack = 1;
floppy_end_request(req, 0);
} else {
if (rq_data_dir(req) == WRITE) {
/* record write error information */
write_errors[current_drive].write_errors++;
if (write_errors[current_drive].write_errors == 1) {
write_errors[current_drive].first_error_sector = blk_rq_pos(req);
write_errors[current_drive].first_error_generation = drive_state[current_drive].generation;
}
write_errors[current_drive].last_error_sector = blk_rq_pos(req);
write_errors[current_drive].last_error_generation = drive_state[current_drive].generation;
}
floppy_end_request(req, BLK_STS_IOERR);
}
}
/* Interrupt handler evaluating the result of the r/w operation */
static void rw_interrupt(void)
{
int eoc;
int ssize;
int heads;
int nr_sectors;
if (reply_buffer[R_HEAD] >= 2) {
/* some Toshiba floppy controllers occasionnally seem to
* return bogus interrupts after read/write operations, which
* can be recognized by a bad head number (>= 2) */
return;
}
if (!drive_state[current_drive].first_read_date)
drive_state[current_drive].first_read_date = jiffies;
ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4);
if (reply_buffer[ST1] & ST1_EOC)
eoc = 1;
else
eoc = 0;
if (raw_cmd->cmd[COMMAND] & 0x80)
heads = 2;
else
heads = 1;
nr_sectors = (((reply_buffer[R_TRACK] - raw_cmd->cmd[TRACK]) * heads +
reply_buffer[R_HEAD] - raw_cmd->cmd[HEAD]) * raw_cmd->cmd[SECT_PER_TRACK] +
reply_buffer[R_SECTOR] - raw_cmd->cmd[SECTOR] + eoc) << raw_cmd->cmd[SIZECODE] >> 2;
if (nr_sectors / ssize >
DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
DPRINT("long rw: %x instead of %lx\n",
nr_sectors, current_count_sectors);
pr_info("rs=%d s=%d\n", reply_buffer[R_SECTOR],
raw_cmd->cmd[SECTOR]);
pr_info("rh=%d h=%d\n", reply_buffer[R_HEAD],
raw_cmd->cmd[HEAD]);
pr_info("rt=%d t=%d\n", reply_buffer[R_TRACK],
raw_cmd->cmd[TRACK]);
pr_info("heads=%d eoc=%d\n", heads, eoc);
pr_info("spt=%d st=%d ss=%d\n",
raw_cmd->cmd[SECT_PER_TRACK], fsector_t, ssize);
pr_info("in_sector_offset=%d\n", in_sector_offset);
}
nr_sectors -= in_sector_offset;
INFBOUND(nr_sectors, 0);
SUPBOUND(current_count_sectors, nr_sectors);
switch (interpret_errors()) {
case 2:
cont->redo();
return;
case 1:
if (!current_count_sectors) {
cont->error();
cont->redo();
return;
}
break;
case 0:
if (!current_count_sectors) {
cont->redo();
return;
}
current_type[current_drive] = _floppy;
floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
break;
}
if (probing) {
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Auto-detected floppy type %s in fd%d\n",
_floppy->name, current_drive);
current_type[current_drive] = _floppy;
floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
probing = 0;
}
if (CT(raw_cmd->cmd[COMMAND]) != FD_READ) {
/* transfer directly from buffer */
cont->done(1);
} else {
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
INFBOUND(buffer_max, nr_sectors + fsector_t);
}
cont->redo();
}
/* Compute the maximal transfer size */
static int transfer_size(int ssize, int max_sector, int max_size)
{
SUPBOUND(max_sector, fsector_t + max_size);
/* alignment */
max_sector -= (max_sector % _floppy->sect) % ssize;
/* transfer size, beginning not aligned */
current_count_sectors = max_sector - fsector_t;
return max_sector;
}
/*
* Move data from/to the track buffer to/from the buffer cache.
*/
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec bv;
char *dma_buffer;
int size;
struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE &&
buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
if (remaining > blk_rq_bytes(current_req) && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
DPRINT("in copy buffer\n");
pr_info("current_count_sectors=%ld\n", current_count_sectors);
pr_info("remaining=%d\n", remaining >> 9);
pr_info("current_req->nr_sectors=%u\n",
blk_rq_sectors(current_req));
pr_info("current_req->current_nr_sectors=%u\n",
blk_rq_cur_sectors(current_req));
pr_info("max_sector=%d\n", max_sector);
pr_info("ssize=%d\n", ssize);
}
buffer_max = max(max_sector, buffer_max);
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;
size = bv.bv_len;
SUPBOUND(size, remaining);
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer - dma_buffer) >> 9));
pr_info("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
pr_info("read\n");
if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE)
pr_info("write\n");
break;
}
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
memcpy_to_bvec(&bv, dma_buffer);
else
memcpy_from_bvec(dma_buffer, &bv);
remaining -= size;
dma_buffer += size;
}
if (remaining) {
if (remaining > 0)
max_sector -= remaining >> 9;
DPRINT("weirdness: remaining %d\n", remaining >> 9);
}
}
/* work around a bug in pseudo DMA
* (on some FDCs) pseudo DMA does not stop when the CPU stops
* sending data. Hence we need a different way to signal the
* transfer length: We use raw_cmd->cmd[SECT_PER_TRACK]. Unfortunately, this
* does not work with MT, hence we can only transfer one head at
* a time
*/
static void virtualdmabug_workaround(void)
{
int hard_sectors;
int end_sector;
if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
raw_cmd->cmd[COMMAND] &= ~0x80; /* switch off multiple track mode */
hard_sectors = raw_cmd->length >> (7 + raw_cmd->cmd[SIZECODE]);
end_sector = raw_cmd->cmd[SECTOR] + hard_sectors - 1;
if (end_sector > raw_cmd->cmd[SECT_PER_TRACK]) {
pr_info("too many sectors %d > %d\n",
end_sector, raw_cmd->cmd[SECT_PER_TRACK]);
return;
}
raw_cmd->cmd[SECT_PER_TRACK] = end_sector;
/* make sure raw_cmd->cmd[SECT_PER_TRACK]
* points to end of transfer */
}
}
/*
* Formulate a read/write request.
* this routine decides where to load the data (directly to buffer, or to
* tmp floppy area), how much data to load (the size of the buffer, the whole
* track, or a single sector)
* All floppy_track_buffer handling goes in here. If we ever add track buffer
* allocation on the fly, it should be done here. No other part should need
* modification.
*/
static int make_raw_rw_request(void)
{
int aligned_sector_t;
int max_sector;
int max_size;
int tracksize;
int ssize;
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
return 0;
set_fdc((long)current_req->q->disk->private_data);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
raw_cmd->cmd_count = NR_RW;
if (rq_data_dir(current_req) == READ) {
raw_cmd->flags |= FD_RAW_READ;
raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ);
} else if (rq_data_dir(current_req) == WRITE) {
raw_cmd->flags |= FD_RAW_WRITE;
raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_WRITE);
} else {
DPRINT("%s: unknown command\n", __func__);
return 0;
}
max_sector = _floppy->sect * _floppy->head;
raw_cmd->cmd[TRACK] = (int)blk_rq_pos(current_req) / max_sector;
fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && raw_cmd->cmd[TRACK] >= _floppy->track) {
if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
return 0;
}
raw_cmd->cmd[HEAD] = fsector_t / _floppy->sect;
if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags)) &&
fsector_t < _floppy->sect)
max_sector = _floppy->sect;
/* 2M disks have phantom sectors on the first track */
if ((_floppy->rate & FD_2M) && (!raw_cmd->cmd[TRACK]) && (!raw_cmd->cmd[HEAD])) {
max_sector = 2 * _floppy->sect / 3;
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
blk_rq_sectors(current_req));
return 1;
}
raw_cmd->cmd[SIZECODE] = 2;
} else
raw_cmd->cmd[SIZECODE] = FD_SIZECODE(_floppy);
raw_cmd->rate = _floppy->rate & 0x43;
if ((_floppy->rate & FD_2M) &&
(raw_cmd->cmd[TRACK] || raw_cmd->cmd[HEAD]) && raw_cmd->rate == 2)
raw_cmd->rate = 1;
if (raw_cmd->cmd[SIZECODE])
raw_cmd->cmd[SIZECODE2] = 0xff;
else
raw_cmd->cmd[SIZECODE2] = 0x80;
raw_cmd->track = raw_cmd->cmd[TRACK] << STRETCH(_floppy);
raw_cmd->cmd[DR_SELECT] = UNIT(current_drive) + PH_HEAD(_floppy, raw_cmd->cmd[HEAD]);
raw_cmd->cmd[GAP] = _floppy->gap;
ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4);
raw_cmd->cmd[SECT_PER_TRACK] = _floppy->sect << 2 >> raw_cmd->cmd[SIZECODE];
raw_cmd->cmd[SECTOR] = ((fsector_t % _floppy->sect) << 2 >> raw_cmd->cmd[SIZECODE]) +
FD_SECTBASE(_floppy);
/* tracksize describes the size which can be filled up with sectors
* of size ssize.
*/
tracksize = _floppy->sect - _floppy->sect % ssize;
if (tracksize < _floppy->sect) {
raw_cmd->cmd[SECT_PER_TRACK]++;
if (tracksize <= fsector_t % _floppy->sect)
raw_cmd->cmd[SECTOR]--;
/* if we are beyond tracksize, fill up using smaller sectors */
while (tracksize <= fsector_t % _floppy->sect) {
while (tracksize + ssize > _floppy->sect) {
raw_cmd->cmd[SIZECODE]--;
ssize >>= 1;
}
raw_cmd->cmd[SECTOR]++;
raw_cmd->cmd[SECT_PER_TRACK]++;
tracksize += ssize;
}
max_sector = raw_cmd->cmd[HEAD] * _floppy->sect + tracksize;
} else if (!raw_cmd->cmd[TRACK] && !raw_cmd->cmd[HEAD] && !(_floppy->rate & FD_2M) && probing) {
max_sector = _floppy->sect;
} else if (!raw_cmd->cmd[HEAD] && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
/* for virtual DMA bug workaround */
max_sector = _floppy->sect;
}
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
/* data already in track buffer */
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) {
copy_buffer(1, max_sector, buffer_max);
return 1;
}
} else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
unsigned int sectors;
sectors = fsector_t + blk_rq_sectors(current_req);
if (sectors > ssize && sectors < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
}
raw_cmd->flags &= ~FD_RAW_WRITE;
raw_cmd->flags |= FD_RAW_READ;
raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ);
}
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
max_size = max_sector; /* unbounded */
/* claim buffer track if needed */
if (buffer_track != raw_cmd->track || /* bad track */
buffer_drive != current_drive || /* bad drive */
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(raw_cmd->cmd[COMMAND]) == FD_READ ||
(!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) {
/* not enough space */
buffer_track = -1;
buffer_drive = current_drive;
buffer_max = buffer_min = aligned_sector_t;
}
raw_cmd->kernel_data = floppy_track_buffer +
((aligned_sector_t - buffer_min) << 9);
if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
/* copy write buffer to track buffer.
* if we get here, we know that the write
* is either aligned or the data already in the buffer
* (buffer will be overwritten) */
if (in_sector_offset && buffer_track == -1)
DPRINT("internal error offset !=0 on write\n");
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
copy_buffer(ssize, max_sector,
2 * max_buffer_sectors + buffer_min);
} else
transfer_size(ssize, max_sector,
2 * max_buffer_sectors + buffer_min -
aligned_sector_t);
/* round up current_count_sectors to get dma xfer size */
raw_cmd->length = in_sector_offset + current_count_sectors;
raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
raw_cmd->length <<= 9;
if ((raw_cmd->length < current_count_sectors << 9) ||
(CT(raw_cmd->cmd[COMMAND]) == FD_WRITE &&
(aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
aligned_sector_t < buffer_min)) ||
raw_cmd->length % (128 << raw_cmd->cmd[SIZECODE]) ||
raw_cmd->length <= 0 || current_count_sectors <= 0) {
DPRINT("fractionary current count b=%lx s=%lx\n",
raw_cmd->length, current_count_sectors);
pr_info("addr=%d, length=%ld\n",
(int)((raw_cmd->kernel_data -
floppy_track_buffer) >> 9),
current_count_sectors);
pr_info("st=%d ast=%d mse=%d msi=%d\n",
fsector_t, aligned_sector_t, max_sector, max_size);
pr_info("ssize=%x SIZECODE=%d\n", ssize, raw_cmd->cmd[SIZECODE]);
pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
raw_cmd->cmd[COMMAND], raw_cmd->cmd[SECTOR],
raw_cmd->cmd[HEAD], raw_cmd->cmd[TRACK]);
pr_info("buffer drive=%d\n", buffer_drive);
pr_info("buffer track=%d\n", buffer_track);
pr_info("buffer_min=%d\n", buffer_min);
pr_info("buffer_max=%d\n", buffer_max);
return 0;
}
if (raw_cmd->kernel_data < floppy_track_buffer ||
current_count_sectors < 0 ||
raw_cmd->length < 0 ||
raw_cmd->kernel_data + raw_cmd->length >
floppy_track_buffer + (max_buffer_sectors << 10)) {
DPRINT("buffer overrun in schedule dma\n");
pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n",
fsector_t, buffer_min, raw_cmd->length >> 9);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
pr_info("read\n");
if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE)
pr_info("write\n");
return 0;
}
if (raw_cmd->length == 0) {
DPRINT("zero dma transfer attempted from make_raw_request\n");
return 0;
}
virtualdmabug_workaround();
return 2;
}
static int set_next_request(void)
{
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
floppy_errors = 0;
list_del_init(¤t_req->queuelist);
return 1;
}
return 0;
}
/* Starts or continues processing request. Will automatically unlock the
* driver at end of request.
*/
static void redo_fd_request(void)
{
int drive;
int tmp;
lastredo = jiffies;
if (current_drive < N_DRIVE)
floppy_off(current_drive);
do_request:
if (!current_req) {
int pending;
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
if (!pending) {
do_floppy = NULL;
unlock_fdc();
return;
}
}
drive = (long)current_req->q->disk->private_data;
set_fdc(drive);
reschedule_timeout(current_drive, "redo fd request");
set_floppy(drive);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = 0;
if (start_motor(redo_fd_request))
return;
disk_change(current_drive);
if (test_bit(current_drive, &fake_change) ||
test_bit(FD_DISK_CHANGED_BIT, &drive_state[current_drive].flags)) {
DPRINT("disk absent or changed during operation\n");
request_done(0);
goto do_request;
}
if (!_floppy) { /* Autodetection */
if (!probing) {
drive_state[current_drive].probed_format = 0;
if (next_valid_format(current_drive)) {
DPRINT("no autodetectable formats\n");
_floppy = NULL;
request_done(0);
goto do_request;
}
}
probing = 1;
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
goto do_request;
}
if (test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags))
twaddle(current_fdc, current_drive);
schedule_bh(floppy_start);
debugt(__func__, "queue fd request");
return;
}
static const struct cont_t rw_cont = {
.interrupt = rw_interrupt,
.redo = redo_fd_request,
.error = bad_flp_intr,
.done = request_done
};
/* schedule the request and automatically unlock the driver on completion */
static void process_fd_request(void)
{
cont = &rw_cont;
schedule_bh(redo_fd_request);
}
static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
blk_mq_start_request(bd->rq);
if (WARN(max_buffer_sectors == 0,
"VFS: %s called on non-open device\n", __func__))
return BLK_STS_IOERR;
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(__force unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return BLK_STS_RESOURCE;
}
spin_lock_irq(&floppy_lock);
list_add_tail(&bd->rq->queuelist, &floppy_reqs);
spin_unlock_irq(&floppy_lock);
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
process_fd_request();
is_alive(__func__, "");
return BLK_STS_OK;
}
static const struct cont_t poll_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_ready,
.error = generic_failure,
.done = generic_done
};
static int poll_drive(bool interruptible, int flag)
{
/* no auto-sense, just clear dcl */
raw_cmd = &default_raw_cmd;
raw_cmd->flags = flag;
raw_cmd->track = 0;
raw_cmd->cmd_count = 0;
cont = &poll_cont;
debug_dcl(drive_params[current_drive].flags,
"setting NEWCHANGE in poll_drive\n");
set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags);
return wait_til_done(floppy_ready, interruptible);
}
/*
* User triggered reset
* ====================
*/
static void reset_intr(void)
{
pr_info("weird, reset interrupt called\n");
}
static const struct cont_t reset_cont = {
.interrupt = reset_intr,
.redo = success_and_wakeup,
.error = generic_failure,
.done = generic_done
};
/*
* Resets the FDC connected to drive <drive>.
* Both current_drive and current_fdc are changed to match the new drive.
*/
static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
if (lock_fdc(drive))
return -EINTR;
if (arg == FD_RESET_ALWAYS)
fdc_state[current_fdc].reset = 1;
if (fdc_state[current_fdc].reset) {
/* note: reset_fdc will take care of unlocking the driver
* on completion.
*/
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
return -EINTR;
}
process_fd_request();
return 0;
}
/*
* Misc Ioctl's and support
* ========================
*/
static inline int fd_copyout(void __user *param, const void *address,
unsigned long size)
{
return copy_to_user(param, address, size) ? -EFAULT : 0;
}
static inline int fd_copyin(void __user *param, void *address,
unsigned long size)
{
return copy_from_user(address, param, size) ? -EFAULT : 0;
}
static const char *drive_name(int type, int drive)
{
struct floppy_struct *floppy;
if (type)
floppy = floppy_type + type;
else {
if (drive_params[drive].native_format)
floppy = floppy_type + drive_params[drive].native_format;
else
return "(null)";
}
if (floppy->name)
return floppy->name;
else
return "(null)";
}
#ifdef CONFIG_BLK_DEV_FD_RAWCMD
/* raw commands */
static void raw_cmd_done(int flag)
{
if (!flag) {
raw_cmd->flags |= FD_RAW_FAILURE;
raw_cmd->flags |= FD_RAW_HARDFAILURE;
} else {
raw_cmd->reply_count = inr;
if (raw_cmd->reply_count > FD_RAW_REPLY_SIZE)
raw_cmd->reply_count = 0;
memcpy(raw_cmd->reply, reply_buffer, raw_cmd->reply_count);
if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
unsigned long flags;
flags = claim_dma_lock();
raw_cmd->length = fd_get_dma_residue();
release_dma_lock(flags);
}
if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
(!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
raw_cmd->flags |= FD_RAW_FAILURE;
if (disk_change(current_drive))
raw_cmd->flags |= FD_RAW_DISK_CHANGE;
else
raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
motor_off_callback(&motor_off_timer[current_drive]);
if (raw_cmd->next &&
(!(raw_cmd->flags & FD_RAW_FAILURE) ||
!(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
((raw_cmd->flags & FD_RAW_FAILURE) ||
!(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) {
raw_cmd = raw_cmd->next;
return;
}
}
generic_done(flag);
}
static const struct cont_t raw_cmd_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_start,
.error = generic_failure,
.done = raw_cmd_done
};
static int raw_cmd_copyout(int cmd, void __user *param,
struct floppy_raw_cmd *ptr)
{
int ret;
while (ptr) {
struct floppy_raw_cmd cmd = *ptr;
cmd.next = NULL;
cmd.kernel_data = NULL;
ret = copy_to_user(param, &cmd, sizeof(cmd));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
if (ptr->length >= 0 &&
ptr->length <= ptr->buffer_length) {
long length = ptr->buffer_length - ptr->length;
ret = fd_copyout(ptr->data, ptr->kernel_data,
length);
if (ret)
return ret;
}
}
ptr = ptr->next;
}
return 0;
}
static void raw_cmd_free(struct floppy_raw_cmd **ptr)
{
struct floppy_raw_cmd *next;
struct floppy_raw_cmd *this;
this = *ptr;
*ptr = NULL;
while (this) {
if (this->buffer_length) {
fd_dma_mem_free((unsigned long)this->kernel_data,
this->buffer_length);
this->buffer_length = 0;
}
next = this->next;
kfree(this);
this = next;
}
}
#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
int ret;
*rcmd = NULL;
loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
memset(ptr->reply, 0, FD_RAW_REPLY_SIZE);
ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0 || ptr->length > MAX_LEN)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
if (!ptr->kernel_data)
return -ENOMEM;
ptr->buffer_length = ptr->length;
}
if (ptr->flags & FD_RAW_WRITE) {
ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length);
if (ret)
return ret;
}
if (ptr->flags & FD_RAW_MORE) {
rcmd = &(ptr->next);
ptr->rate &= 0x43;
goto loop;
}
return 0;
}
static int raw_cmd_ioctl(int cmd, void __user *param)
{
struct floppy_raw_cmd *my_raw_cmd;
int drive;
int ret2;
int ret;
if (fdc_state[current_fdc].rawcmd <= 1)
fdc_state[current_fdc].rawcmd = 1;
for (drive = 0; drive < N_DRIVE; drive++) {
if (FDC(drive) != current_fdc)
continue;
if (drive == current_drive) {
if (drive_state[drive].fd_ref > 1) {
fdc_state[current_fdc].rawcmd = 2;
break;
}
} else if (drive_state[drive].fd_ref) {
fdc_state[current_fdc].rawcmd = 2;
break;
}
}
if (fdc_state[current_fdc].reset)
return -EIO;
ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
if (ret) {
raw_cmd_free(&my_raw_cmd);
return ret;
}
raw_cmd = my_raw_cmd;
cont = &raw_cmd_cont;
ret = wait_til_done(floppy_start, true);
debug_dcl(drive_params[current_drive].flags,
"calling disk change from raw_cmd ioctl\n");
if (ret != -EINTR && fdc_state[current_fdc].reset)
ret = -EIO;
drive_state[current_drive].track = NO_TRACK;
ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
if (!ret)
ret = ret2;
raw_cmd_free(&my_raw_cmd);
return ret;
}
static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
void __user *param)
{
int ret;
pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n");
if (type)
return -EINVAL;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
ret = raw_cmd_ioctl(cmd, param);
if (ret == -EINTR)
return -EINTR;
process_fd_request();
return ret;
}
#else /* CONFIG_BLK_DEV_FD_RAWCMD */
static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
void __user *param)
{
return -EOPNOTSUPP;
}
#endif
static int invalidate_drive(struct gendisk *disk)
{
/* invalidate the buffer track to force a reread */
set_bit((long)disk->private_data, &fake_change);
process_fd_request();
if (disk_check_media_change(disk))
floppy_revalidate(disk);
return 0;
}
static int set_geometry(unsigned int cmd, struct floppy_struct *g,
int drive, int type, struct block_device *bdev)
{
int cnt;
/* sanity checking for parameters. */
if ((int)g->sect <= 0 ||
(int)g->head <= 0 ||
/* check for overflow in max_sector */
(int)(g->sect * g->head) <= 0 ||
/* check for zero in raw_cmd->cmd[F_SECT_PER_TRACK] */
(unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
g->track <= 0 || g->track > drive_params[drive].tracks >> STRETCH(g) ||
/* check if reserved bits are set */
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
return -EINVAL;
if (type) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
if (lock_fdc(drive)) {
mutex_unlock(&open_lock);
return -EINTR;
}
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] =
floppy_type[type].size + 1;
process_fd_request();
for (cnt = 0; cnt < N_DRIVE; cnt++) {
struct gendisk *disk = opened_disk[cnt];
if (!disk || ITYPE(drive_state[cnt].fd_device) != type)
continue;
disk_force_media_change(disk);
}
mutex_unlock(&open_lock);
} else {
int oldStretch;
if (lock_fdc(drive))
return -EINTR;
if (cmd != FDDEFPRM) {
/* notice a disk change immediately, else
* we lose our settings immediately*/
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
}
oldStretch = g->stretch;
user_params[drive] = *g;
if (buffer_drive == drive)
SUPBOUND(buffer_max, user_params[drive].sect);
current_type[drive] = &user_params[drive];
floppy_sizes[drive] = user_params[drive].size;
if (cmd == FDDEFPRM)
drive_state[current_drive].keep_data = -1;
else
drive_state[current_drive].keep_data = 1;
/* invalidation. Invalidate only when needed, i.e.
* when there are already sectors in the buffer cache
* whose number will change. This is useful, because
* mtools often changes the geometry of the disk after
* looking at the boot block */
if (drive_state[current_drive].maxblock > user_params[drive].sect ||
drive_state[current_drive].maxtrack ||
((user_params[drive].sect ^ oldStretch) &
(FD_SWAPSIDES | FD_SECTBASEMASK)))
invalidate_drive(bdev->bd_disk);
else
process_fd_request();
}
return 0;
}
/* handle obsolete ioctl's */
static unsigned int ioctl_table[] = {
FDCLRPRM,
FDSETPRM,
FDDEFPRM,
FDGETPRM,
FDMSGON,
FDMSGOFF,
FDFMTBEG,
FDFMTTRK,
FDFMTEND,
FDSETEMSGTRESH,
FDFLUSH,
FDSETMAXERRS,
FDGETMAXERRS,
FDGETDRVTYP,
FDSETDRVPRM,
FDGETDRVPRM,
FDGETDRVSTAT,
FDPOLLDRVSTAT,
FDRESET,
FDGETFDCSTAT,
FDWERRORCLR,
FDWERRORGET,
FDRAWCMD,
FDEJECT,
FDTWADDLE
};
static int normalize_ioctl(unsigned int *cmd, int *size)
{
int i;
for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) {
if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) {
*size = _IOC_SIZE(*cmd);
*cmd = ioctl_table[i];
if (*size > _IOC_SIZE(*cmd)) {
pr_info("ioctl not yet supported\n");
return -EFAULT;
}
return 0;
}
}
return -EINVAL;
}
static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
{
if (type)
*g = &floppy_type[type];
else {
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(false, 0) == -EINTR)
return -EINTR;
process_fd_request();
*g = current_type[drive];
}
if (!*g)
return -ENODEV;
return 0;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
ret = get_floppy_geometry(drive, type, &g);
if (ret)
return ret;
geo->heads = g->head;
geo->sectors = g->sect;
geo->cylinders = g->track;
return 0;
}
static bool valid_floppy_drive_params(const short autodetect[FD_AUTODETECT_SIZE],
int native_format)
{
size_t floppy_type_size = ARRAY_SIZE(floppy_type);
size_t i = 0;
for (i = 0; i < FD_AUTODETECT_SIZE; ++i) {
if (autodetect[i] < 0 ||
autodetect[i] >= floppy_type_size)
return false;
}
if (native_format < 0 || native_format >= floppy_type_size)
return false;
return true;
}
static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
int ret;
int size;
union inparam {
struct floppy_struct g; /* geometry */
struct format_descr f;
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
const void *outparam; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
* installing the new fdutils package */
if (cmd == CDROMEJECT || /* CD-ROM eject */
cmd == 0x6470) { /* SunOS floppy eject */
DPRINT("obsolete eject ioctl\n");
DPRINT("please use floppycontrol --eject\n");
cmd = FDEJECT;
}
if (!((cmd & 0xff00) == 0x0200))
return -EINVAL;
/* convert the old style command into a new style command */
ret = normalize_ioctl(&cmd, &size);
if (ret)
return ret;
/* permission checks */
if (((cmd & 0x40) &&
!(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL))) ||
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
if (WARN_ON(size < 0 || size > sizeof(inparam)))
return -EINVAL;
/* copyin */
memset(&inparam, 0, sizeof(inparam));
if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = fd_copyin((void __user *)param, &inparam, size);
if (ret)
return ret;
}
switch (cmd) {
case FDEJECT:
if (drive_state[drive].fd_ref != 1)
/* somebody else has this drive open */
return -EBUSY;
if (lock_fdc(drive))
return -EINTR;
/* do the actual eject. Fails on
* non-Sparc architectures */
ret = fd_eject(UNIT(drive));
set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
process_fd_request();
return ret;
case FDCLRPRM:
if (lock_fdc(drive))
return -EINTR;
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
drive_state[drive].keep_data = 0;
return invalidate_drive(bdev->bd_disk);
case FDSETPRM:
case FDDEFPRM:
return set_geometry(cmd, &inparam.g, drive, type, bdev);
case FDGETPRM:
ret = get_floppy_geometry(drive, type,
(struct floppy_struct **)&outparam);
if (ret)
return ret;
memcpy(&inparam.g, outparam,
offsetof(struct floppy_struct, name));
outparam = &inparam.g;
break;
case FDMSGON:
drive_params[drive].flags |= FTD_MSG;
return 0;
case FDMSGOFF:
drive_params[drive].flags &= ~FTD_MSG;
return 0;
case FDFMTBEG:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
ret = drive_state[drive].flags;
process_fd_request();
if (ret & FD_VERIFY)
return -ENODEV;
if (!(ret & FD_DISK_WRITABLE))
return -EROFS;
return 0;
case FDFMTTRK:
if (drive_state[drive].fd_ref != 1)
return -EBUSY;
return do_format(drive, &inparam.f);
case FDFMTEND:
case FDFLUSH:
if (lock_fdc(drive))
return -EINTR;
return invalidate_drive(bdev->bd_disk);
case FDSETEMSGTRESH:
drive_params[drive].max_errors.reporting = (unsigned short)(param & 0x0f);
return 0;
case FDGETMAXERRS:
outparam = &drive_params[drive].max_errors;
break;
case FDSETMAXERRS:
drive_params[drive].max_errors = inparam.max_errors;
break;
case FDGETDRVTYP:
outparam = drive_name(type, drive);
SUPBOUND(size, strlen((const char *)outparam) + 1);
break;
case FDSETDRVPRM:
if (!valid_floppy_drive_params(inparam.dp.autodetect,
inparam.dp.native_format))
return -EINVAL;
drive_params[drive] = inparam.dp;
break;
case FDGETDRVPRM:
outparam = &drive_params[drive];
break;
case FDPOLLDRVSTAT:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
fallthrough;
case FDGETDRVSTAT:
outparam = &drive_state[drive];
break;
case FDRESET:
return user_reset_fdc(drive, (int)param, true);
case FDGETFDCSTAT:
outparam = &fdc_state[FDC(drive)];
break;
case FDWERRORCLR:
memset(&write_errors[drive], 0, sizeof(write_errors[drive]));
return 0;
case FDWERRORGET:
outparam = &write_errors[drive];
break;
case FDRAWCMD:
return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param);
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
twaddle(current_fdc, current_drive);
process_fd_request();
return 0;
default:
return -EINVAL;
}
if (_IOC_DIR(cmd) & _IOC_READ)
return fd_copyout((void __user *)param, outparam, size);
return 0;
}
static int fd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&floppy_mutex);
ret = fd_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&floppy_mutex);
return ret;
}
#ifdef CONFIG_COMPAT
struct compat_floppy_drive_params {
char cmos;
compat_ulong_t max_dtr;
compat_ulong_t hlt;
compat_ulong_t hut;
compat_ulong_t srt;
compat_ulong_t spinup;
compat_ulong_t spindown;
unsigned char spindown_offset;
unsigned char select_delay;
unsigned char rps;
unsigned char tracks;
compat_ulong_t timeout;
unsigned char interleave_sect;
struct floppy_max_errors max_errors;
char flags;
char read_track;
short autodetect[FD_AUTODETECT_SIZE];
compat_int_t checkfreq;
compat_int_t native_format;
};
struct compat_floppy_drive_struct {
signed char flags;
compat_ulong_t spinup_date;
compat_ulong_t select_date;
compat_ulong_t first_read_date;
short probed_format;
short track;
short maxblock;
short maxtrack;
compat_int_t generation;
compat_int_t keep_data;
compat_int_t fd_ref;
compat_int_t fd_device;
compat_int_t last_checked;
compat_caddr_t dmabuf;
compat_int_t bufblocks;
};
struct compat_floppy_fdc_state {
compat_int_t spec1;
compat_int_t spec2;
compat_int_t dtr;
unsigned char version;
unsigned char dor;
compat_ulong_t address;
unsigned int rawcmd:2;
unsigned int reset:1;
unsigned int need_configure:1;
unsigned int perp_mode:2;
unsigned int has_fifo:1;
unsigned int driver_version;
unsigned char track[4];
};
struct compat_floppy_write_errors {
unsigned int write_errors;
compat_ulong_t first_error_sector;
compat_int_t first_error_generation;
compat_ulong_t last_error_sector;
compat_int_t last_error_generation;
compat_uint_t badness;
};
#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
static int compat_set_geometry(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, struct compat_floppy_struct __user *arg)
{
struct floppy_struct v;
int drive, type;
int err;
BUILD_BUG_ON(offsetof(struct floppy_struct, name) !=
offsetof(struct compat_floppy_struct, name));
if (!(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL)))
return -EPERM;
memset(&v, 0, sizeof(struct floppy_struct));
if (copy_from_user(&v, arg, offsetof(struct floppy_struct, name)))
return -EFAULT;
mutex_lock(&floppy_mutex);
drive = (long)bdev->bd_disk->private_data;
type = ITYPE(drive_state[drive].fd_device);
err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM,
&v, drive, type, bdev);
mutex_unlock(&floppy_mutex);
return err;
}
static int compat_get_prm(int drive,
struct compat_floppy_struct __user *arg)
{
struct compat_floppy_struct v;
struct floppy_struct *p;
int err;
memset(&v, 0, sizeof(v));
mutex_lock(&floppy_mutex);
err = get_floppy_geometry(drive, ITYPE(drive_state[drive].fd_device),
&p);
if (err) {
mutex_unlock(&floppy_mutex);
return err;
}
memcpy(&v, p, offsetof(struct floppy_struct, name));
mutex_unlock(&floppy_mutex);
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_struct)))
return -EFAULT;
return 0;
}
static int compat_setdrvprm(int drive,
struct compat_floppy_drive_params __user *arg)
{
struct compat_floppy_drive_params v;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
if (!valid_floppy_drive_params(v.autodetect, v.native_format))
return -EINVAL;
mutex_lock(&floppy_mutex);
drive_params[drive].cmos = v.cmos;
drive_params[drive].max_dtr = v.max_dtr;
drive_params[drive].hlt = v.hlt;
drive_params[drive].hut = v.hut;
drive_params[drive].srt = v.srt;
drive_params[drive].spinup = v.spinup;
drive_params[drive].spindown = v.spindown;
drive_params[drive].spindown_offset = v.spindown_offset;
drive_params[drive].select_delay = v.select_delay;
drive_params[drive].rps = v.rps;
drive_params[drive].tracks = v.tracks;
drive_params[drive].timeout = v.timeout;
drive_params[drive].interleave_sect = v.interleave_sect;
drive_params[drive].max_errors = v.max_errors;
drive_params[drive].flags = v.flags;
drive_params[drive].read_track = v.read_track;
memcpy(drive_params[drive].autodetect, v.autodetect,
sizeof(v.autodetect));
drive_params[drive].checkfreq = v.checkfreq;
drive_params[drive].native_format = v.native_format;
mutex_unlock(&floppy_mutex);
return 0;
}
static int compat_getdrvprm(int drive,
struct compat_floppy_drive_params __user *arg)
{
struct compat_floppy_drive_params v;
memset(&v, 0, sizeof(struct compat_floppy_drive_params));
mutex_lock(&floppy_mutex);
v.cmos = drive_params[drive].cmos;
v.max_dtr = drive_params[drive].max_dtr;
v.hlt = drive_params[drive].hlt;
v.hut = drive_params[drive].hut;
v.srt = drive_params[drive].srt;
v.spinup = drive_params[drive].spinup;
v.spindown = drive_params[drive].spindown;
v.spindown_offset = drive_params[drive].spindown_offset;
v.select_delay = drive_params[drive].select_delay;
v.rps = drive_params[drive].rps;
v.tracks = drive_params[drive].tracks;
v.timeout = drive_params[drive].timeout;
v.interleave_sect = drive_params[drive].interleave_sect;
v.max_errors = drive_params[drive].max_errors;
v.flags = drive_params[drive].flags;
v.read_track = drive_params[drive].read_track;
memcpy(v.autodetect, drive_params[drive].autodetect,
sizeof(v.autodetect));
v.checkfreq = drive_params[drive].checkfreq;
v.native_format = drive_params[drive].native_format;
mutex_unlock(&floppy_mutex);
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
return 0;
}
static int compat_getdrvstat(int drive, bool poll,
struct compat_floppy_drive_struct __user *arg)
{
struct compat_floppy_drive_struct v;
memset(&v, 0, sizeof(struct compat_floppy_drive_struct));
mutex_lock(&floppy_mutex);
if (poll) {
if (lock_fdc(drive))
goto Eintr;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
goto Eintr;
process_fd_request();
}
v.spinup_date = drive_state[drive].spinup_date;
v.select_date = drive_state[drive].select_date;
v.first_read_date = drive_state[drive].first_read_date;
v.probed_format = drive_state[drive].probed_format;
v.track = drive_state[drive].track;
v.maxblock = drive_state[drive].maxblock;
v.maxtrack = drive_state[drive].maxtrack;
v.generation = drive_state[drive].generation;
v.keep_data = drive_state[drive].keep_data;
v.fd_ref = drive_state[drive].fd_ref;
v.fd_device = drive_state[drive].fd_device;
v.last_checked = drive_state[drive].last_checked;
v.dmabuf = (uintptr_t) drive_state[drive].dmabuf;
v.bufblocks = drive_state[drive].bufblocks;
mutex_unlock(&floppy_mutex);
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
return -EFAULT;
return 0;
Eintr:
mutex_unlock(&floppy_mutex);
return -EINTR;
}
static int compat_getfdcstat(int drive,
struct compat_floppy_fdc_state __user *arg)
{
struct compat_floppy_fdc_state v32;
struct floppy_fdc_state v;
mutex_lock(&floppy_mutex);
v = fdc_state[FDC(drive)];
mutex_unlock(&floppy_mutex);
memset(&v32, 0, sizeof(struct compat_floppy_fdc_state));
v32.spec1 = v.spec1;
v32.spec2 = v.spec2;
v32.dtr = v.dtr;
v32.version = v.version;
v32.dor = v.dor;
v32.address = v.address;
v32.rawcmd = v.rawcmd;
v32.reset = v.reset;
v32.need_configure = v.need_configure;
v32.perp_mode = v.perp_mode;
v32.has_fifo = v.has_fifo;
v32.driver_version = v.driver_version;
memcpy(v32.track, v.track, 4);
if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_fdc_state)))
return -EFAULT;
return 0;
}
static int compat_werrorget(int drive,
struct compat_floppy_write_errors __user *arg)
{
struct compat_floppy_write_errors v32;
struct floppy_write_errors v;
memset(&v32, 0, sizeof(struct compat_floppy_write_errors));
mutex_lock(&floppy_mutex);
v = write_errors[drive];
mutex_unlock(&floppy_mutex);
v32.write_errors = v.write_errors;
v32.first_error_sector = v.first_error_sector;
v32.first_error_generation = v.first_error_generation;
v32.last_error_sector = v.last_error_sector;
v32.last_error_generation = v.last_error_generation;
v32.badness = v.badness;
if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_write_errors)))
return -EFAULT;
return 0;
}
static int fd_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
switch (cmd) {
case CDROMEJECT: /* CD-ROM eject */
case 0x6470: /* SunOS floppy eject */
case FDMSGON:
case FDMSGOFF:
case FDSETEMSGTRESH:
case FDFLUSH:
case FDWERRORCLR:
case FDEJECT:
case FDCLRPRM:
case FDFMTBEG:
case FDRESET:
case FDTWADDLE:
return fd_ioctl(bdev, mode, cmd, param);
case FDSETMAXERRS:
case FDGETMAXERRS:
case FDGETDRVTYP:
case FDFMTEND:
case FDFMTTRK:
case FDRAWCMD:
return fd_ioctl(bdev, mode, cmd,
(unsigned long)compat_ptr(param));
case FDSETPRM32:
case FDDEFPRM32:
return compat_set_geometry(bdev, mode, cmd, compat_ptr(param));
case FDGETPRM32:
return compat_get_prm(drive, compat_ptr(param));
case FDSETDRVPRM32:
return compat_setdrvprm(drive, compat_ptr(param));
case FDGETDRVPRM32:
return compat_getdrvprm(drive, compat_ptr(param));
case FDPOLLDRVSTAT32:
return compat_getdrvstat(drive, true, compat_ptr(param));
case FDGETDRVSTAT32:
return compat_getdrvstat(drive, false, compat_ptr(param));
case FDGETFDCSTAT32:
return compat_getfdcstat(drive, compat_ptr(param));
case FDWERRORGET32:
return compat_werrorget(drive, compat_ptr(param));
}
return -EINVAL;
}
#endif
static void __init config_types(void)
{
bool has_drive = false;
int drive;
/* read drive info out of physical CMOS */
drive = 0;
if (!drive_params[drive].cmos)
drive_params[drive].cmos = FLOPPY0_TYPE;
drive = 1;
if (!drive_params[drive].cmos)
drive_params[drive].cmos = FLOPPY1_TYPE;
/* FIXME: additional physical CMOS drive detection should go here */
for (drive = 0; drive < N_DRIVE; drive++) {
unsigned int type = drive_params[drive].cmos;
struct floppy_drive_params *params;
const char *name = NULL;
char temparea[32];
if (type < ARRAY_SIZE(default_drive_params)) {
params = &default_drive_params[type].params;
if (type) {
name = default_drive_params[type].name;
allowed_drive_mask |= 1 << drive;
} else
allowed_drive_mask &= ~(1 << drive);
} else {
params = &default_drive_params[0].params;
snprintf(temparea, sizeof(temparea),
"unknown type %d (usb?)", type);
name = temparea;
}
if (name) {
const char *prepend;
if (!has_drive) {
prepend = "";
has_drive = true;
pr_info("Floppy drive(s):");
} else {
prepend = ",";
}
pr_cont("%s fd%d is %s", prepend, drive, name);
}
drive_params[drive] = *params;
}
if (has_drive)
pr_cont("\n");
}
static void floppy_release(struct gendisk *disk)
{
int drive = (long)disk->private_data;
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
if (!drive_state[drive].fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
drive_state[drive].fd_ref = 0;
}
if (!drive_state[drive].fd_ref)
opened_disk[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
}
/*
* floppy_open check for aliasing (/dev/fd0 can be the same as
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
int drive = (long)disk->private_data;
int old_dev, new_dev;
int try;
int res = -EBUSY;
char *tmp;
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
old_dev = drive_state[drive].fd_device;
if (opened_disk[drive] && opened_disk[drive] != disk)
goto out2;
if (!drive_state[drive].fd_ref && (drive_params[drive].flags & FD_BROKEN_DCL)) {
set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
}
drive_state[drive].fd_ref++;
opened_disk[drive] = disk;
res = -ENXIO;
if (!floppy_track_buffer) {
/* if opening an ED drive, reserve a big buffer,
* else reserve a small one */
if ((drive_params[drive].cmos == 6) || (drive_params[drive].cmos == 5))
try = 64; /* Only 48 actually useful */
else
try = 32; /* Only 24 actually useful */
tmp = (char *)fd_dma_mem_alloc(1024 * try);
if (!tmp && !floppy_track_buffer) {
try >>= 1; /* buffer only one side */
INFBOUND(try, 16);
tmp = (char *)fd_dma_mem_alloc(1024 * try);
}
if (!tmp && !floppy_track_buffer)
fallback_on_nodma_alloc(&tmp, 2048 * try);
if (!tmp && !floppy_track_buffer) {
DPRINT("Unable to allocate DMA memory\n");
goto out;
}
if (floppy_track_buffer) {
if (tmp)
fd_dma_mem_free((unsigned long)tmp, try * 1024);
} else {
buffer_min = buffer_max = -1;
floppy_track_buffer = tmp;
max_buffer_sectors = try;
}
}
new_dev = disk->first_minor;
drive_state[drive].fd_device = new_dev;
set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]);
if (old_dev != -1 && old_dev != new_dev) {
if (buffer_drive == drive)
buffer_track = -1;
}
if (fdc_state[FDC(drive)].rawcmd == 1)
fdc_state[FDC(drive)].rawcmd = 2;
if (!(mode & BLK_OPEN_NDELAY)) {
if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
&drive_state[drive].flags);
if (disk_check_media_change(disk))
floppy_revalidate(disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
if ((mode & BLK_OPEN_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
}
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
out:
drive_state[drive].fd_ref--;
if (!drive_state[drive].fd_ref)
opened_disk[drive] = NULL;
out2:
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return res;
}
/*
* Check if the disk has been changed or if a change has been faked.
*/
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
int drive = (long)disk->private_data;
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
test_bit(FD_VERIFY_BIT, &drive_state[drive].flags))
return DISK_EVENT_MEDIA_CHANGE;
if (time_after(jiffies, drive_state[drive].last_checked + drive_params[drive].checkfreq)) {
if (lock_fdc(drive))
return 0;
poll_drive(false, 0);
process_fd_request();
}
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
test_bit(FD_VERIFY_BIT, &drive_state[drive].flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive))
return DISK_EVENT_MEDIA_CHANGE;
return 0;
}
/*
* This implements "read block 0" for floppy_revalidate().
* Needed for format autodetection, checking whether there is
* a disk in the drive, and whether that disk is writable.
*/
struct rb0_cbdata {
int drive;
struct completion complete;
};
static void floppy_rb0_cb(struct bio *bio)
{
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive;
if (bio->bi_status) {
pr_info("floppy: error %d while reading block 0\n",
bio->bi_status);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
}
complete(&cbdata->complete);
}
static int __floppy_read_block_0(struct block_device *bdev, int drive)
{
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
struct rb0_cbdata cbdata;
page = alloc_page(GFP_NOIO);
if (!page) {
process_fd_request();
return -ENOMEM;
}
cbdata.drive = drive;
bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
__bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
init_completion(&cbdata.complete);
submit_bio(&bio);
process_fd_request();
wait_for_completion(&cbdata.complete);
__free_page(page);
return 0;
}
/* revalidate the floppy disk, i.e. trigger format autodetection by reading
* the bootblock (block 0). "Autodetection" is also needed to check whether
* there is a disk in the drive at all... Thus we also do it for fixed
* geometry formats */
static int floppy_revalidate(struct gendisk *disk)
{
int drive = (long)disk->private_data;
int cf;
int res = 0;
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
test_bit(FD_VERIFY_BIT, &drive_state[drive].flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive)) {
if (WARN(atomic_read(&usage_count) == 0,
"VFS: revalidate called on non-open device.\n"))
return -EFAULT;
res = lock_fdc(drive);
if (res)
return res;
cf = (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
test_bit(FD_VERIFY_BIT, &drive_state[drive].flags));
if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
process_fd_request(); /*already done by another thread */
return 0;
}
drive_state[drive].maxblock = 0;
drive_state[drive].maxtrack = 0;
if (buffer_drive == drive)
buffer_track = -1;
clear_bit(drive, &fake_change);
clear_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
if (cf)
drive_state[drive].generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
res = __floppy_read_block_0(opened_disk[drive]->part0,
drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
process_fd_request();
}
}
set_capacity(disk, floppy_sizes[drive_state[drive].fd_device]);
return res;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
#ifdef CONFIG_COMPAT
.compat_ioctl = fd_compat_ioctl,
#endif
};
/*
* Floppy Driver initialization
* =============================
*/
/* Determine the floppy disk controller type */
/* This routine was written by David C. Niemi */
static char __init get_fdc_version(int fdc)
{
int r;
output_byte(fdc, FD_DUMPREGS); /* 82072 and better know DUMPREGS */
if (fdc_state[fdc].reset)
return FDC_NONE;
r = result(fdc);
if (r <= 0x00)
return FDC_NONE; /* No FDC present ??? */
if ((r == 1) && (reply_buffer[ST0] == 0x80)) {
pr_info("FDC %d is an 8272A\n", fdc);
return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
}
if (r != 10) {
pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
if (!fdc_configure(fdc)) {
pr_info("FDC %d is an 82072\n", fdc);
return FDC_82072; /* 82072 doesn't know CONFIGURE */
}
output_byte(fdc, FD_PERPENDICULAR);
if (need_more_output(fdc) == MORE_OUTPUT) {
output_byte(fdc, 0);
} else {
pr_info("FDC %d is an 82072A\n", fdc);
return FDC_82072A; /* 82072A as found on Sparcs. */
}
output_byte(fdc, FD_UNLOCK);
r = result(fdc);
if ((r == 1) && (reply_buffer[ST0] == 0x80)) {
pr_info("FDC %d is a pre-1991 82077\n", fdc);
return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
* LOCK/UNLOCK */
}
if ((r != 1) || (reply_buffer[ST0] != 0x00)) {
pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
output_byte(fdc, FD_PARTID);
r = result(fdc);
if (r != 1) {
pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
if (reply_buffer[ST0] == 0x80) {
pr_info("FDC %d is a post-1991 82077\n", fdc);
return FDC_82077; /* Revised 82077AA passes all the tests */
}
switch (reply_buffer[ST0] >> 5) {
case 0x0:
/* Either a 82078-1 or a 82078SL running at 5Volt */
pr_info("FDC %d is an 82078.\n", fdc);
return FDC_82078;
case 0x1:
pr_info("FDC %d is a 44pin 82078\n", fdc);
return FDC_82078;
case 0x2:
pr_info("FDC %d is a S82078B\n", fdc);
return FDC_S82078B;
case 0x3:
pr_info("FDC %d is a National Semiconductor PC87306\n", fdc);
return FDC_87306;
default:
pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n",
fdc, reply_buffer[ST0] >> 5);
return FDC_82078_UNKN;
}
} /* get_fdc_version */
/* lilo configuration */
static void __init floppy_set_flags(int *ints, int param, int param2)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
if (param)
default_drive_params[i].params.flags |= param2;
else
default_drive_params[i].params.flags &= ~param2;
}
DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param);
}
static void __init daring(int *ints, int param, int param2)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
if (param) {
default_drive_params[i].params.select_delay = 0;
default_drive_params[i].params.flags |=
FD_SILENT_DCL_CLEAR;
} else {
default_drive_params[i].params.select_delay =
2 * HZ / 100;
default_drive_params[i].params.flags &=
~FD_SILENT_DCL_CLEAR;
}
}
DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
}
static void __init set_cmos(int *ints, int dummy, int dummy2)
{
int current_drive = 0;
if (ints[0] != 2) {
DPRINT("wrong number of parameters for CMOS\n");
return;
}
current_drive = ints[1];
if (current_drive < 0 || current_drive >= 8) {
DPRINT("bad drive for set_cmos\n");
return;
}
#if N_FDC > 1
if (current_drive >= 4 && !FDC2)
FDC2 = 0x370;
#endif
drive_params[current_drive].cmos = ints[2];
DPRINT("setting CMOS code to %d\n", ints[2]);
}
static struct param_table {
const char *name;
void (*fn) (int *ints, int param, int param2);
int *var;
int def_param;
int param2;
} config_params[] __initdata = {
{"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
{"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
{"asus_pci", NULL, &allowed_drive_mask, 0x33, 0},
{"irq", NULL, &FLOPPY_IRQ, 6, 0},
{"dma", NULL, &FLOPPY_DMA, 2, 0},
{"daring", daring, NULL, 1, 0},
#if N_FDC > 1
{"two_fdc", NULL, &FDC2, 0x370, 0},
{"one_fdc", NULL, &FDC2, 0, 0},
#endif
{"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL},
{"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL},
{"messages", floppy_set_flags, NULL, 1, FTD_MSG},
{"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR},
{"debug", floppy_set_flags, NULL, 1, FD_DEBUG},
{"nodma", NULL, &can_use_virtual_dma, 1, 0},
{"omnibook", NULL, &can_use_virtual_dma, 1, 0},
{"yesdma", NULL, &can_use_virtual_dma, 0, 0},
{"fifo_depth", NULL, &fifo_depth, 0xa, 0},
{"nofifo", NULL, &no_fifo, 0x20, 0},
{"usefifo", NULL, &no_fifo, 0, 0},
{"cmos", set_cmos, NULL, 0, 0},
{"slow", NULL, &slow_floppy, 1, 0},
{"unexpected_interrupts", NULL, &print_unex, 1, 0},
{"no_unexpected_interrupts", NULL, &print_unex, 0, 0},
{"L40SX", NULL, &print_unex, 0, 0}
EXTRA_FLOPPY_PARAMS
};
static int __init floppy_setup(char *str)
{
int i;
int param;
int ints[11];
str = get_options(str, ARRAY_SIZE(ints), ints);
if (str) {
for (i = 0; i < ARRAY_SIZE(config_params); i++) {
if (strcmp(str, config_params[i].name) == 0) {
if (ints[0])
param = ints[1];
else
param = config_params[i].def_param;
if (config_params[i].fn)
config_params[i].fn(ints, param,
config_params[i].
param2);
if (config_params[i].var) {
DPRINT("%s=%d\n", str, param);
*config_params[i].var = param;
}
return 1;
}
}
}
if (str) {
DPRINT("unknown floppy option [%s]\n", str);
DPRINT("allowed options are:");
for (i = 0; i < ARRAY_SIZE(config_params); i++)
pr_cont(" %s", config_params[i].name);
pr_cont("\n");
} else
DPRINT("botched floppy option\n");
DPRINT("Read Documentation/admin-guide/blockdev/floppy.rst\n");
return 0;
}
static int have_no_fdc = -ENODEV;
static ssize_t floppy_cmos_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *p = to_platform_device(dev);
int drive;
drive = p->id;
return sprintf(buf, "%X\n", drive_params[drive].cmos);
}
static DEVICE_ATTR(cmos, 0444, floppy_cmos_show, NULL);
static struct attribute *floppy_dev_attrs[] = {
&dev_attr_cmos.attr,
NULL
};
ATTRIBUTE_GROUPS(floppy_dev);
static void floppy_device_release(struct device *dev)
{
}
static int floppy_resume(struct device *dev)
{
int fdc;
int saved_drive;
saved_drive = current_drive;
for (fdc = 0; fdc < N_FDC; fdc++)
if (fdc_state[fdc].address != -1)
user_reset_fdc(REVDRIVE(fdc, 0), FD_RESET_ALWAYS, false);
set_fdc(saved_drive);
return 0;
}
static const struct dev_pm_ops floppy_pm_ops = {
.resume = floppy_resume,
.restore = floppy_resume,
};
static struct platform_driver floppy_driver = {
.driver = {
.name = "floppy",
.pm = &floppy_pm_ops,
},
};
static const struct blk_mq_ops floppy_mq_ops = {
.queue_rq = floppy_queue_rq,
};
static struct platform_device floppy_device[N_DRIVE];
static bool registered[N_DRIVE];
static bool floppy_available(int drive)
{
if (!(allowed_drive_mask & (1 << drive)))
return false;
if (fdc_state[FDC(drive)].version == FDC_NONE)
return false;
return true;
}
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
disk = blk_mq_alloc_disk(&tag_sets[drive], NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
blk_queue_max_hw_sectors(disk->queue, 64);
disk->major = FLOPPY_MAJOR;
disk->first_minor = TOMINOR(drive) | (type << 2);
disk->minors = 1;
disk->fops = &floppy_fops;
disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (type)
sprintf(disk->disk_name, "fd%d_type%d", drive, type);
else
sprintf(disk->disk_name, "fd%d", drive);
/* to be cleaned up... */
disk->private_data = (void *)(long)drive;
disk->flags |= GENHD_FL_REMOVABLE;
disks[drive][type] = disk;
return 0;
}
static DEFINE_MUTEX(floppy_probe_lock);
static void floppy_probe(dev_t dev)
{
unsigned int drive = (MINOR(dev) & 3) | ((MINOR(dev) & 0x80) >> 5);
unsigned int type = (MINOR(dev) >> 2) & 0x1f;
if (drive >= N_DRIVE || !floppy_available(drive) ||
type >= ARRAY_SIZE(floppy_type))
return;
mutex_lock(&floppy_probe_lock);
if (disks[drive][type])
goto out;
if (floppy_alloc_disk(drive, type))
goto out;
if (add_disk(disks[drive][type]))
goto cleanup_disk;
out:
mutex_unlock(&floppy_probe_lock);
return;
cleanup_disk:
put_disk(disks[drive][type]);
disks[drive][type] = NULL;
mutex_unlock(&floppy_probe_lock);
}
static int __init do_floppy_init(void)
{
int i, unit, drive, err;
set_debugt();
interruptjiffies = resultjiffies = jiffies;
#if defined(CONFIG_PPC)
if (check_legacy_ioport(FDC1))
return -ENODEV;
#endif
raw_cmd = NULL;
floppy_wq = alloc_ordered_workqueue("floppy", 0);
if (!floppy_wq)
return -ENOMEM;
for (drive = 0; drive < N_DRIVE; drive++) {
memset(&tag_sets[drive], 0, sizeof(tag_sets[drive]));
tag_sets[drive].ops = &floppy_mq_ops;
tag_sets[drive].nr_hw_queues = 1;
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
err = floppy_alloc_disk(drive, 0);
if (err) {
blk_mq_free_tag_set(&tag_sets[drive]);
goto out_put_disk;
}
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
}
err = __register_blkdev(FLOPPY_MAJOR, "fd", floppy_probe);
if (err)
goto out_put_disk;
err = platform_driver_register(&floppy_driver);
if (err)
goto out_unreg_blkdev;
for (i = 0; i < 256; i++)
if (ITYPE(i))
floppy_sizes[i] = floppy_type[ITYPE(i)].size;
else
floppy_sizes[i] = MAX_DISK_SIZE << 1;
reschedule_timeout(MAXTIMEOUT, "floppy init");
config_types();
for (i = 0; i < N_FDC; i++) {
memset(&fdc_state[i], 0, sizeof(*fdc_state));
fdc_state[i].dtr = -1;
fdc_state[i].dor = 0x4;
#if defined(__sparc__) || defined(__mc68000__)
/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
#ifdef __mc68000__
if (MACH_IS_SUN3X)
#endif
fdc_state[i].version = FDC_82072A;
#endif
}
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_driver;
}
#if N_FDC > 1
fdc_state[1].address = FDC2;
#endif
current_fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_driver;
}
/* initialise drive state */
for (drive = 0; drive < N_DRIVE; drive++) {
memset(&drive_state[drive], 0, sizeof(drive_state[drive]));
memset(&write_errors[drive], 0, sizeof(write_errors[drive]));
set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[drive].flags);
set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
drive_state[drive].fd_device = -1;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
}
/*
* Small 10 msec delay to let through any interrupt that
* initialization might have triggered, to not
* confuse detection:
*/
msleep(10);
for (i = 0; i < N_FDC; i++) {
fdc_state[i].driver_version = FD_DRIVER_VERSION;
for (unit = 0; unit < 4; unit++)
fdc_state[i].track[unit] = 0;
if (fdc_state[i].address == -1)
continue;
fdc_state[i].rawcmd = 2;
if (user_reset_fdc(REVDRIVE(i, 0), FD_RESET_ALWAYS, false)) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
floppy_release_regions(i);
fdc_state[i].address = -1;
fdc_state[i].version = FDC_NONE;
continue;
}
/* Try to determine the floppy controller type */
fdc_state[i].version = get_fdc_version(i);
if (fdc_state[i].version == FDC_NONE) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
floppy_release_regions(i);
fdc_state[i].address = -1;
continue;
}
if (can_use_virtual_dma == 2 &&
fdc_state[i].version < FDC_82072A)
can_use_virtual_dma = 0;
have_no_fdc = 0;
/* Not all FDCs seem to be able to handle the version command
* properly, so force a reset for the standard FDC clones,
* to avoid interrupt garbage.
*/
user_reset_fdc(REVDRIVE(i, 0), FD_RESET_ALWAYS, false);
}
current_fdc = 0;
cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
if (!floppy_available(drive))
continue;
floppy_device[drive].name = floppy_device_name;
floppy_device[drive].id = drive;
floppy_device[drive].dev.release = floppy_device_release;
floppy_device[drive].dev.groups = floppy_dev_groups;
err = platform_device_register(&floppy_device[drive]);
if (err)
goto out_remove_drives;
registered[drive] = true;
err = device_add_disk(&floppy_device[drive].dev,
disks[drive][0], NULL);
if (err)
goto out_remove_drives;
}
return 0;
out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive][0]);
if (registered[drive])
platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_driver:
platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive][0])
break;
del_timer_sync(&motor_off_timer[drive]);
put_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
}
return err;
}
#ifndef MODULE
static __init void floppy_async_init(void *data, async_cookie_t cookie)
{
do_floppy_init();
}
#endif
static int __init floppy_init(void)
{
#ifdef MODULE
return do_floppy_init();
#else
/* Don't hold up the bootup by the floppy initialization */
async_schedule(floppy_async_init, NULL);
return 0;
#endif
}
static const struct io_region {
int offset;
int size;
} io_regions[] = {
{ 2, 1 },
/* address + 3 is sometimes reserved by pnp bios for motherboard */
{ 4, 2 },
/* address + 6 is reserved, and may be taken by IDE.
* Unfortunately, Adaptec doesn't know this :-(, */
{ 7, 1 },
};
static void floppy_release_allocated_regions(int fdc, const struct io_region *p)
{
while (p != io_regions) {
p--;
release_region(fdc_state[fdc].address + p->offset, p->size);
}
}
#define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)]))
static int floppy_request_regions(int fdc)
{
const struct io_region *p;
for (p = io_regions; p < ARRAY_END(io_regions); p++) {
if (!request_region(fdc_state[fdc].address + p->offset,
p->size, "floppy")) {
DPRINT("Floppy io-port 0x%04lx in use\n",
fdc_state[fdc].address + p->offset);
floppy_release_allocated_regions(fdc, p);
return -EBUSY;
}
}
return 0;
}
static void floppy_release_regions(int fdc)
{
floppy_release_allocated_regions(fdc, ARRAY_END(io_regions));
}
static int floppy_grab_irq_and_dma(void)
{
int fdc;
if (atomic_inc_return(&usage_count) > 1)
return 0;
/*
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
FLOPPY_IRQ);
atomic_dec(&usage_count);
return -1;
}
if (fd_request_dma()) {
DPRINT("Unable to grab DMA%d for the floppy driver\n",
FLOPPY_DMA);
if (can_use_virtual_dma & 2)
use_virtual_dma = can_use_virtual_dma = 1;
if (!(can_use_virtual_dma & 1)) {
fd_free_irq();
atomic_dec(&usage_count);
return -1;
}
}
for (fdc = 0; fdc < N_FDC; fdc++) {
if (fdc_state[fdc].address != -1) {
if (floppy_request_regions(fdc))
goto cleanup;
}
}
for (fdc = 0; fdc < N_FDC; fdc++) {
if (fdc_state[fdc].address != -1) {
reset_fdc_info(fdc, 1);
fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
}
}
set_dor(0, ~0, 8); /* avoid immediate interrupt */
for (fdc = 0; fdc < N_FDC; fdc++)
if (fdc_state[fdc].address != -1)
fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
/*
* The driver will try and free resources and relies on us
* to know if they were allocated or not.
*/
current_fdc = 0;
irqdma_allocated = 1;
return 0;
cleanup:
fd_free_irq();
fd_free_dma();
while (--fdc >= 0)
floppy_release_regions(fdc);
current_fdc = 0;
atomic_dec(&usage_count);
return -1;
}
static void floppy_release_irq_and_dma(void)
{
int fdc;
#ifndef __sparc__
int drive;
#endif
long tmpsize;
unsigned long tmpaddr;
if (!atomic_dec_and_test(&usage_count))
return;
if (irqdma_allocated) {
fd_disable_dma();
fd_free_dma();
fd_free_irq();
irqdma_allocated = 0;
}
set_dor(0, ~0, 8);
#if N_FDC > 1
set_dor(1, ~8, 0);
#endif
if (floppy_track_buffer && max_buffer_sectors) {
tmpsize = max_buffer_sectors * 1024;
tmpaddr = (unsigned long)floppy_track_buffer;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
buffer_min = buffer_max = -1;
fd_dma_mem_free(tmpaddr, tmpsize);
}
#ifndef __sparc__
for (drive = 0; drive < N_FDC * 4; drive++)
if (timer_pending(motor_off_timer + drive))
pr_info("motor off timer %d still active\n", drive);
#endif
if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
for (fdc = 0; fdc < N_FDC; fdc++)
if (fdc_state[fdc].address != -1)
floppy_release_regions(fdc);
}
#ifdef MODULE
static char *floppy;
static void __init parse_floppy_cfg_string(char *cfg)
{
char *ptr;
while (*cfg) {
ptr = cfg;
while (*cfg && *cfg != ' ' && *cfg != '\t')
cfg++;
if (*cfg) {
*cfg = '\0';
cfg++;
}
if (*ptr)
floppy_setup(ptr);
}
}
static int __init floppy_module_init(void)
{
if (floppy)
parse_floppy_cfg_string(floppy);
return floppy_init();
}
module_init(floppy_module_init);
static void __exit floppy_module_exit(void)
{
int drive, i;
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
if (floppy_available(drive)) {
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
del_gendisk(disks[drive][i]);
}
if (registered[drive])
platform_device_unregister(&floppy_device[drive]);
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
put_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
}
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
/* eject disk, if any */
fd_eject(0);
}
module_exit(floppy_module_exit);
module_param(floppy, charp, 0);
module_param(FLOPPY_IRQ, int, 0);
module_param(FLOPPY_DMA, int, 0);
MODULE_AUTHOR("Alain L. Knaff");
MODULE_LICENSE("GPL");
/* This doesn't actually get used other than for module information */
static const struct pnp_device_id floppy_pnpids[] = {
{"PNP0700", 0},
{}
};
MODULE_DEVICE_TABLE(pnp, floppy_pnpids);
#else
__setup("floppy=", floppy_setup);
module_init(floppy_init)
#endif
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| linux-master | drivers/block/floppy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for SWIM (Sander Woz Integrated Machine) floppy controller
*
* Copyright (C) 2004,2008 Laurent Vivier <[email protected]>
*
* based on Alastair Bridgewater SWIM analysis, 2001
* based on SWIM3 driver (c) Paul Mackerras, 1996
* based on netBSD IWM driver (c) 1997, 1998 Hauke Fath.
*
* 2004-08-21 (lv) - Initial implementation
* 2008-10-30 (lv) - Port to 2.6
*/
#include <linux/module.h>
#include <linux/fd.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/mac_via.h>
#define CARDNAME "swim"
struct sector_header {
unsigned char side;
unsigned char track;
unsigned char sector;
unsigned char size;
unsigned char crc0;
unsigned char crc1;
} __attribute__((packed));
#define DRIVER_VERSION "Version 0.2 (2008-10-30)"
#define REG(x) unsigned char x, x ## _pad[0x200 - 1];
struct swim {
REG(write_data)
REG(write_mark)
REG(write_CRC)
REG(write_parameter)
REG(write_phase)
REG(write_setup)
REG(write_mode0)
REG(write_mode1)
REG(read_data)
REG(read_mark)
REG(read_error)
REG(read_parameter)
REG(read_phase)
REG(read_setup)
REG(read_status)
REG(read_handshake)
} __attribute__((packed));
#define swim_write(base, reg, v) out_8(&(base)->write_##reg, (v))
#define swim_read(base, reg) in_8(&(base)->read_##reg)
/* IWM registers */
struct iwm {
REG(ph0L)
REG(ph0H)
REG(ph1L)
REG(ph1H)
REG(ph2L)
REG(ph2H)
REG(ph3L)
REG(ph3H)
REG(mtrOff)
REG(mtrOn)
REG(intDrive)
REG(extDrive)
REG(q6L)
REG(q6H)
REG(q7L)
REG(q7H)
} __attribute__((packed));
#define iwm_write(base, reg, v) out_8(&(base)->reg, (v))
#define iwm_read(base, reg) in_8(&(base)->reg)
/* bits in phase register */
#define SEEK_POSITIVE 0x070
#define SEEK_NEGATIVE 0x074
#define STEP 0x071
#define MOTOR_ON 0x072
#define MOTOR_OFF 0x076
#define INDEX 0x073
#define EJECT 0x077
#define SETMFM 0x171
#define SETGCR 0x175
#define RELAX 0x033
#define LSTRB 0x008
#define CA_MASK 0x077
/* Select values for swim_select and swim_readbit */
#define READ_DATA_0 0x074
#define ONEMEG_DRIVE 0x075
#define SINGLE_SIDED 0x076
#define DRIVE_PRESENT 0x077
#define DISK_IN 0x170
#define WRITE_PROT 0x171
#define TRACK_ZERO 0x172
#define TACHO 0x173
#define READ_DATA_1 0x174
#define GCR_MODE 0x175
#define SEEK_COMPLETE 0x176
#define TWOMEG_MEDIA 0x177
/* Bits in handshake register */
#define MARK_BYTE 0x01
#define CRC_ZERO 0x02
#define RDDATA 0x04
#define SENSE 0x08
#define MOTEN 0x10
#define ERROR 0x20
#define DAT2BYTE 0x40
#define DAT1BYTE 0x80
/* bits in setup register */
#define S_INV_WDATA 0x01
#define S_3_5_SELECT 0x02
#define S_GCR 0x04
#define S_FCLK_DIV2 0x08
#define S_ERROR_CORR 0x10
#define S_IBM_DRIVE 0x20
#define S_GCR_WRITE 0x40
#define S_TIMEOUT 0x80
/* bits in mode register */
#define CLFIFO 0x01
#define ENBL1 0x02
#define ENBL2 0x04
#define ACTION 0x08
#define WRITE_MODE 0x10
#define HEDSEL 0x20
#define MOTON 0x80
/*----------------------------------------------------------------------------*/
enum drive_location {
INTERNAL_DRIVE = 0x02,
EXTERNAL_DRIVE = 0x04,
};
enum media_type {
DD_MEDIA,
HD_MEDIA,
};
struct floppy_state {
/* physical properties */
enum drive_location location; /* internal or external drive */
int head_number; /* single- or double-sided drive */
/* media */
int disk_in;
int ejected;
enum media_type type;
int write_protected;
int total_secs;
int secpercyl;
int secpertrack;
/* in-use information */
int track;
int ref_count;
bool registered;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
/* parent controller */
struct swim_priv *swd;
};
enum motor_action {
OFF,
ON,
};
enum head {
LOWER_HEAD = 0,
UPPER_HEAD = 1,
};
#define FD_MAX_UNIT 2
struct swim_priv {
struct swim __iomem *base;
spinlock_t lock;
int floppy_count;
struct floppy_state unit[FD_MAX_UNIT];
};
extern int swim_read_sector_header(struct swim __iomem *base,
struct sector_header *header);
extern int swim_read_sector_data(struct swim __iomem *base,
unsigned char *data);
static DEFINE_MUTEX(swim_mutex);
static inline void set_swim_mode(struct swim __iomem *base, int enable)
{
struct iwm __iomem *iwm_base;
unsigned long flags;
if (!enable) {
swim_write(base, mode0, 0xf8);
return;
}
iwm_base = (struct iwm __iomem *)base;
local_irq_save(flags);
iwm_read(iwm_base, q7L);
iwm_read(iwm_base, mtrOff);
iwm_read(iwm_base, q6H);
iwm_write(iwm_base, q7H, 0x57);
iwm_write(iwm_base, q7H, 0x17);
iwm_write(iwm_base, q7H, 0x57);
iwm_write(iwm_base, q7H, 0x57);
local_irq_restore(flags);
}
static inline int get_swim_mode(struct swim __iomem *base)
{
unsigned long flags;
local_irq_save(flags);
swim_write(base, phase, 0xf5);
if (swim_read(base, phase) != 0xf5)
goto is_iwm;
swim_write(base, phase, 0xf6);
if (swim_read(base, phase) != 0xf6)
goto is_iwm;
swim_write(base, phase, 0xf7);
if (swim_read(base, phase) != 0xf7)
goto is_iwm;
local_irq_restore(flags);
return 1;
is_iwm:
local_irq_restore(flags);
return 0;
}
static inline void swim_select(struct swim __iomem *base, int sel)
{
swim_write(base, phase, RELAX);
via1_set_head(sel & 0x100);
swim_write(base, phase, sel & CA_MASK);
}
static inline void swim_action(struct swim __iomem *base, int action)
{
unsigned long flags;
local_irq_save(flags);
swim_select(base, action);
udelay(1);
swim_write(base, phase, (LSTRB<<4) | LSTRB);
udelay(1);
swim_write(base, phase, (LSTRB<<4) | ((~LSTRB) & 0x0F));
udelay(1);
local_irq_restore(flags);
}
static inline int swim_readbit(struct swim __iomem *base, int bit)
{
int stat;
swim_select(base, bit);
udelay(10);
stat = swim_read(base, handshake);
return (stat & SENSE) == 0;
}
static inline void swim_drive(struct swim __iomem *base,
enum drive_location location)
{
if (location == INTERNAL_DRIVE) {
swim_write(base, mode0, EXTERNAL_DRIVE); /* clear drive 1 bit */
swim_write(base, mode1, INTERNAL_DRIVE); /* set drive 0 bit */
} else if (location == EXTERNAL_DRIVE) {
swim_write(base, mode0, INTERNAL_DRIVE); /* clear drive 0 bit */
swim_write(base, mode1, EXTERNAL_DRIVE); /* set drive 1 bit */
}
}
static inline void swim_motor(struct swim __iomem *base,
enum motor_action action)
{
if (action == ON) {
int i;
swim_action(base, MOTOR_ON);
for (i = 0; i < 2*HZ; i++) {
swim_select(base, RELAX);
if (swim_readbit(base, MOTOR_ON))
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
} else if (action == OFF) {
swim_action(base, MOTOR_OFF);
swim_select(base, RELAX);
}
}
static inline void swim_eject(struct swim __iomem *base)
{
int i;
swim_action(base, EJECT);
for (i = 0; i < 2*HZ; i++) {
swim_select(base, RELAX);
if (!swim_readbit(base, DISK_IN))
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
swim_select(base, RELAX);
}
static inline void swim_head(struct swim __iomem *base, enum head head)
{
/* wait drive is ready */
if (head == UPPER_HEAD)
swim_select(base, READ_DATA_1);
else if (head == LOWER_HEAD)
swim_select(base, READ_DATA_0);
}
static inline int swim_step(struct swim __iomem *base)
{
int wait;
swim_action(base, STEP);
for (wait = 0; wait < HZ; wait++) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
swim_select(base, RELAX);
if (!swim_readbit(base, STEP))
return 0;
}
return -1;
}
static inline int swim_track00(struct swim __iomem *base)
{
int try;
swim_action(base, SEEK_NEGATIVE);
for (try = 0; try < 100; try++) {
swim_select(base, RELAX);
if (swim_readbit(base, TRACK_ZERO))
break;
if (swim_step(base))
return -1;
}
if (swim_readbit(base, TRACK_ZERO))
return 0;
return -1;
}
static inline int swim_seek(struct swim __iomem *base, int step)
{
if (step == 0)
return 0;
if (step < 0) {
swim_action(base, SEEK_NEGATIVE);
step = -step;
} else
swim_action(base, SEEK_POSITIVE);
for ( ; step > 0; step--) {
if (swim_step(base))
return -1;
}
return 0;
}
static inline int swim_track(struct floppy_state *fs, int track)
{
struct swim __iomem *base = fs->swd->base;
int ret;
ret = swim_seek(base, track - fs->track);
if (ret == 0)
fs->track = track;
else {
swim_track00(base);
fs->track = 0;
}
return ret;
}
static int floppy_eject(struct floppy_state *fs)
{
struct swim __iomem *base = fs->swd->base;
swim_drive(base, fs->location);
swim_motor(base, OFF);
swim_eject(base);
fs->disk_in = 0;
fs->ejected = 1;
return 0;
}
static inline int swim_read_sector(struct floppy_state *fs,
int side, int track,
int sector, unsigned char *buffer)
{
struct swim __iomem *base = fs->swd->base;
unsigned long flags;
struct sector_header header;
int ret = -1;
short i;
swim_track(fs, track);
swim_write(base, mode1, MOTON);
swim_head(base, side);
swim_write(base, mode0, side);
local_irq_save(flags);
for (i = 0; i < 36; i++) {
ret = swim_read_sector_header(base, &header);
if (!ret && (header.sector == sector)) {
/* found */
ret = swim_read_sector_data(base, buffer);
break;
}
}
local_irq_restore(flags);
swim_write(base, mode0, MOTON);
if ((header.side != side) || (header.track != track) ||
(header.sector != sector))
return 0;
return ret;
}
static blk_status_t floppy_read_sectors(struct floppy_state *fs,
int req_sector, int sectors_nb,
unsigned char *buffer)
{
struct swim __iomem *base = fs->swd->base;
int ret;
int side, track, sector;
int i, try;
swim_drive(base, fs->location);
for (i = req_sector; i < req_sector + sectors_nb; i++) {
int x;
track = i / fs->secpercyl;
x = i % fs->secpercyl;
side = x / fs->secpertrack;
sector = x % fs->secpertrack + 1;
try = 5;
do {
ret = swim_read_sector(fs, side, track, sector,
buffer);
if (try-- == 0)
return BLK_STS_IOERR;
} while (ret != 512);
buffer += ret;
}
return 0;
}
static blk_status_t swim_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct floppy_state *fs = hctx->queue->queuedata;
struct swim_priv *swd = fs->swd;
struct request *req = bd->rq;
blk_status_t err;
if (!spin_trylock_irq(&swd->lock))
return BLK_STS_DEV_RESOURCE;
blk_mq_start_request(req);
if (!fs->disk_in || rq_data_dir(req) == WRITE) {
err = BLK_STS_IOERR;
goto out;
}
do {
err = floppy_read_sectors(fs, blk_rq_pos(req),
blk_rq_cur_sectors(req),
bio_data(req->bio));
} while (blk_update_request(req, err, blk_rq_cur_bytes(req)));
__blk_mq_end_request(req, err);
err = BLK_STS_OK;
out:
spin_unlock_irq(&swd->lock);
return err;
}
static struct floppy_struct floppy_type[4] = {
{ 0, 0, 0, 0, 0, 0x00, 0x00, 0x00, 0x00, NULL }, /* no testing */
{ 720, 9, 1, 80, 0, 0x2A, 0x02, 0xDF, 0x50, NULL }, /* 360KB SS 3.5"*/
{ 1440, 9, 2, 80, 0, 0x2A, 0x02, 0xDF, 0x50, NULL }, /* 720KB 3.5" */
{ 2880, 18, 2, 80, 0, 0x1B, 0x00, 0xCF, 0x6C, NULL }, /* 1.44MB 3.5" */
};
static int get_floppy_geometry(struct floppy_state *fs, int type,
struct floppy_struct **g)
{
if (type >= ARRAY_SIZE(floppy_type))
return -EINVAL;
if (type)
*g = &floppy_type[type];
else if (fs->type == HD_MEDIA) /* High-Density media */
*g = &floppy_type[3];
else if (fs->head_number == 2) /* double-sided */
*g = &floppy_type[2];
else
*g = &floppy_type[1];
return 0;
}
static void setup_medium(struct floppy_state *fs)
{
struct swim __iomem *base = fs->swd->base;
if (swim_readbit(base, DISK_IN)) {
struct floppy_struct *g;
fs->disk_in = 1;
fs->write_protected = swim_readbit(base, WRITE_PROT);
if (swim_track00(base))
printk(KERN_ERR
"SWIM: cannot move floppy head to track 0\n");
swim_track00(base);
fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
HD_MEDIA : DD_MEDIA;
fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
get_floppy_geometry(fs, 0, &g);
fs->total_secs = g->size;
fs->secpercyl = g->head * g->sect;
fs->secpertrack = g->sect;
fs->track = 0;
} else {
fs->disk_in = 0;
}
}
static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
int err;
if (fs->ref_count == -1 || (fs->ref_count && mode & BLK_OPEN_EXCL))
return -EBUSY;
if (mode & BLK_OPEN_EXCL)
fs->ref_count = -1;
else
fs->ref_count++;
swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
udelay(10);
swim_drive(base, fs->location);
swim_motor(base, ON);
swim_action(base, SETMFM);
if (fs->ejected)
setup_medium(fs);
if (!fs->disk_in) {
err = -ENXIO;
goto out;
}
set_capacity(fs->disk, fs->total_secs);
if (mode & BLK_OPEN_NDELAY)
return 0;
if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
if (disk_check_media_change(disk) && fs->disk_in)
fs->ejected = 0;
if ((mode & BLK_OPEN_WRITE) && fs->write_protected) {
err = -EROFS;
goto out;
}
}
return 0;
out:
if (fs->ref_count < 0)
fs->ref_count = 0;
else if (fs->ref_count > 0)
--fs->ref_count;
if (fs->ref_count == 0)
swim_motor(base, OFF);
return err;
}
static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
mutex_lock(&swim_mutex);
ret = floppy_open(disk, mode);
mutex_unlock(&swim_mutex);
return ret;
}
static void floppy_release(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
mutex_lock(&swim_mutex);
if (fs->ref_count < 0)
fs->ref_count = 0;
else if (fs->ref_count > 0)
--fs->ref_count;
if (fs->ref_count == 0)
swim_motor(base, OFF);
mutex_unlock(&swim_mutex);
}
static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
int err;
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case FDEJECT:
if (fs->ref_count != 1)
return -EBUSY;
mutex_lock(&swim_mutex);
err = floppy_eject(fs);
mutex_unlock(&swim_mutex);
return err;
case FDGETPRM:
if (copy_to_user((void __user *) param, (void *) &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
return 0;
}
return -ENOTTY;
}
static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
struct floppy_struct *g;
int ret;
ret = get_floppy_geometry(fs, 0, &g);
if (ret)
return ret;
geo->heads = g->head;
geo->sectors = g->sect;
geo->cylinders = g->track;
return 0;
}
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct floppy_state *fs = disk->private_data;
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_unlocked_open,
.release = floppy_release,
.ioctl = floppy_ioctl,
.getgeo = floppy_getgeo,
.check_events = floppy_check_events,
};
static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
{
struct floppy_state *fs = &swd->unit[swd->floppy_count];
struct swim __iomem *base = swd->base;
fs->location = location;
swim_drive(base, location);
swim_motor(base, OFF);
fs->type = HD_MEDIA;
fs->head_number = 2;
fs->ref_count = 0;
fs->ejected = 1;
swd->floppy_count++;
return 0;
}
static const struct blk_mq_ops swim_mq_ops = {
.queue_rq = swim_queue_rq,
};
static void swim_cleanup_floppy_disk(struct floppy_state *fs)
{
struct gendisk *disk = fs->disk;
if (!disk)
return;
if (fs->registered)
del_gendisk(fs->disk);
put_disk(disk);
blk_mq_free_tag_set(&fs->tag_set);
}
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
int drive;
struct swim __iomem *base = swd->base;
/* scan floppy drives */
swim_drive(base, INTERNAL_DRIVE);
if (swim_readbit(base, DRIVE_PRESENT) &&
!swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, INTERNAL_DRIVE);
swim_drive(base, EXTERNAL_DRIVE);
if (swim_readbit(base, DRIVE_PRESENT) &&
!swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, EXTERNAL_DRIVE);
/* register floppy drives */
err = register_blkdev(FLOPPY_MAJOR, "fd");
if (err) {
printk(KERN_ERR "Unable to get major %d for SWIM floppy\n",
FLOPPY_MAJOR);
return -EBUSY;
}
spin_lock_init(&swd->lock);
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
&swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
if (err)
goto exit_put_disks;
swd->unit[drive].disk =
blk_mq_alloc_disk(&swd->unit[drive].tag_set,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
err = PTR_ERR(swd->unit[drive].disk);
goto exit_put_disks;
}
swd->unit[drive].swd = swd;
}
for (drive = 0; drive < swd->floppy_count; drive++) {
swd->unit[drive].disk->flags = GENHD_FL_REMOVABLE;
swd->unit[drive].disk->major = FLOPPY_MAJOR;
swd->unit[drive].disk->first_minor = drive;
swd->unit[drive].disk->minors = 1;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
swd->unit[drive].disk->flags |= GENHD_FL_NO_PART;
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
err = add_disk(swd->unit[drive].disk);
if (err)
goto exit_put_disks;
swd->unit[drive].registered = true;
}
return 0;
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
do {
swim_cleanup_floppy_disk(&swd->unit[drive]);
} while (drive--);
return err;
}
static int swim_probe(struct platform_device *dev)
{
struct resource *res;
struct swim __iomem *swim_base;
struct swim_priv *swd;
int ret;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto out;
}
if (!request_mem_region(res->start, resource_size(res), CARDNAME)) {
ret = -EBUSY;
goto out;
}
swim_base = (struct swim __iomem *)res->start;
if (!swim_base) {
ret = -ENOMEM;
goto out_release_io;
}
/* probe device */
set_swim_mode(swim_base, 1);
if (!get_swim_mode(swim_base)) {
printk(KERN_INFO "SWIM device not found !\n");
ret = -ENODEV;
goto out_release_io;
}
/* set platform driver data */
swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
if (!swd) {
ret = -ENOMEM;
goto out_release_io;
}
platform_set_drvdata(dev, swd);
swd->base = swim_base;
ret = swim_floppy_init(swd);
if (ret)
goto out_kfree;
return 0;
out_kfree:
kfree(swd);
out_release_io:
release_mem_region(res->start, resource_size(res));
out:
return ret;
}
static int swim_remove(struct platform_device *dev)
{
struct swim_priv *swd = platform_get_drvdata(dev);
int drive;
struct resource *res;
for (drive = 0; drive < swd->floppy_count; drive++)
swim_cleanup_floppy_disk(&swd->unit[drive]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
/* eject floppies */
for (drive = 0; drive < swd->floppy_count; drive++)
floppy_eject(&swd->unit[drive]);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
kfree(swd);
return 0;
}
static struct platform_driver swim_driver = {
.probe = swim_probe,
.remove = swim_remove,
.driver = {
.name = CARDNAME,
},
};
static int __init swim_init(void)
{
printk(KERN_INFO "SWIM floppy driver %s\n", DRIVER_VERSION);
return platform_driver_register(&swim_driver);
}
module_init(swim_init);
static void __exit swim_exit(void)
{
platform_driver_unregister(&swim_driver);
}
module_exit(swim_exit);
MODULE_DESCRIPTION("Driver for SWIM floppy controller");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laurent Vivier <[email protected]>");
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| linux-master | drivers/block/swim.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Userspace block device - block device which IO is handled from userspace
*
* Take full use of io_uring passthrough command for communicating with
* ublk userspace daemon(ublksrvd) for handling basic IO request.
*
* Copyright 2022 Ming Lei <[email protected]>
*
* (part of code stolen from loop.c)
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/mutex.h>
#include <linux/writeback.h>
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/sysfs.h>
#include <linux/miscdevice.h>
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <linux/cdev.h>
#include <linux/io_uring.h>
#include <linux/blk-mq.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <linux/task_work.h>
#include <linux/namei.h>
#include <linux/kref.h>
#include <uapi/linux/ublk_cmd.h>
#define UBLK_MINORS (1U << MINORBITS)
/* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
| UBLK_F_URING_CMD_COMP_IN_TASK \
| UBLK_F_NEED_GET_DATA \
| UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
| UBLK_F_UNPRIVILEGED_DEV \
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
| UBLK_F_ZONED)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
struct ublk_rq_data {
struct llist_node node;
struct kref ref;
__u64 sector;
__u32 operation;
__u32 nr_zones;
};
struct ublk_uring_cmd_pdu {
struct ublk_queue *ubq;
};
/*
* io command is active: sqe cmd is received, and its cqe isn't done
*
* If the flag is set, the io command is owned by ublk driver, and waited
* for incoming blk-mq request from the ublk block device.
*
* If the flag is cleared, the io command will be completed, and owned by
* ublk server.
*/
#define UBLK_IO_FLAG_ACTIVE 0x01
/*
* IO command is completed via cqe, and it is being handled by ublksrv, and
* not committed yet
*
* Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
* cross verification
*/
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
/*
* IO command is aborted, so this flag is set in case of
* !UBLK_IO_FLAG_ACTIVE.
*
* After this flag is observed, any pending or new incoming request
* associated with this io command will be failed immediately
*/
#define UBLK_IO_FLAG_ABORTED 0x04
/*
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
* get data buffer address from ublksrv.
*
* Then, bio data could be copied into this data buffer for a WRITE request
* after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
struct ublk_io {
/* userspace buffer address from io cmd */
__u64 addr;
unsigned int flags;
int res;
struct io_uring_cmd *cmd;
};
struct ublk_queue {
int q_id;
int q_depth;
unsigned long flags;
struct task_struct *ubq_daemon;
char *io_cmd_buf;
struct llist_head io_cmds;
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
bool timeout;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
struct ublk_io ios[];
};
#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
struct ublk_device {
struct gendisk *ub_disk;
char *__queues;
unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
struct cdev cdev;
struct device cdev_dev;
#define UB_STATE_OPEN 0
#define UB_STATE_USED 1
#define UB_STATE_DELETED 2
unsigned long state;
int ub_number;
struct mutex mutex;
spinlock_t mm_lock;
struct mm_struct *mm;
struct ublk_params params;
struct completion completion;
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
/*
* Our ubq->daemon may be killed without any notification, so
* monitor each queue's daemon periodically
*/
struct delayed_work monitor_work;
struct work_struct quiesce_work;
struct work_struct stop_work;
};
/* header of ublk_params */
struct ublk_params_header {
__u32 len;
__u32 types;
};
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag);
static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_USER_COPY;
}
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_ZONED;
}
static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_ZONED;
}
#ifdef CONFIG_BLK_DEV_ZONED
static int ublk_get_nr_zones(const struct ublk_device *ub)
{
const struct ublk_param_basic *p = &ub->params.basic;
/* Zone size is a power of 2 */
return p->dev_sectors >> ilog2(p->chunk_sectors);
}
static int ublk_revalidate_disk_zones(struct ublk_device *ub)
{
return blk_revalidate_disk_zones(ub->ub_disk, NULL);
}
static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
{
const struct ublk_param_zoned *p = &ub->params.zoned;
int nr_zones;
if (!ublk_dev_is_zoned(ub))
return -EINVAL;
if (!p->max_zone_append_sectors)
return -EINVAL;
nr_zones = ublk_get_nr_zones(ub);
if (p->max_active_zones > nr_zones)
return -EINVAL;
if (p->max_open_zones > nr_zones)
return -EINVAL;
return 0;
}
static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
const struct ublk_param_zoned *p = &ub->params.zoned;
disk_set_zoned(ub->ub_disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
blk_queue_required_elevator_features(ub->ub_disk->queue,
ELEVATOR_F_ZBD_SEQ_WRITE);
disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
return 0;
}
/* Based on virtblk_alloc_report_buffer */
static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
unsigned int nr_zones, size_t *buflen)
{
struct request_queue *q = ublk->ub_disk->queue;
size_t bufsize;
void *buf;
nr_zones = min_t(unsigned int, nr_zones,
ublk->ub_disk->nr_zones);
bufsize = nr_zones * sizeof(struct blk_zone);
bufsize =
min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
while (bufsize >= sizeof(struct blk_zone)) {
buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
if (buf) {
*buflen = bufsize;
return buf;
}
bufsize >>= 1;
}
*buflen = 0;
return NULL;
}
static int ublk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct ublk_device *ub = disk->private_data;
unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
unsigned int first_zone = sector >> ilog2(zone_size_sectors);
unsigned int done_zones = 0;
unsigned int max_zones_per_request;
int ret;
struct blk_zone *buffer;
size_t buffer_length;
nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
nr_zones);
buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
if (!buffer)
return -ENOMEM;
max_zones_per_request = buffer_length / sizeof(struct blk_zone);
while (done_zones < nr_zones) {
unsigned int remaining_zones = nr_zones - done_zones;
unsigned int zones_in_request =
min_t(unsigned int, remaining_zones, max_zones_per_request);
struct request *req;
struct ublk_rq_data *pdu;
blk_status_t status;
memset(buffer, 0, buffer_length);
req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
}
pdu = blk_mq_rq_to_pdu(req);
pdu->operation = UBLK_IO_OP_REPORT_ZONES;
pdu->sector = sector;
pdu->nr_zones = zones_in_request;
ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
GFP_KERNEL);
if (ret) {
blk_mq_free_request(req);
goto out;
}
status = blk_execute_rq(req, 0);
ret = blk_status_to_errno(status);
blk_mq_free_request(req);
if (ret)
goto out;
for (unsigned int i = 0; i < zones_in_request; i++) {
struct blk_zone *zone = buffer + i;
/* A zero length zone means no more zones in this response */
if (!zone->len)
break;
ret = cb(zone, i, data);
if (ret)
goto out;
done_zones++;
sector += zone_size_sectors;
}
}
ret = done_zones;
out:
kvfree(buffer);
return ret;
}
static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
u32 ublk_op;
switch (req_op(req)) {
case REQ_OP_ZONE_OPEN:
ublk_op = UBLK_IO_OP_ZONE_OPEN;
break;
case REQ_OP_ZONE_CLOSE:
ublk_op = UBLK_IO_OP_ZONE_CLOSE;
break;
case REQ_OP_ZONE_FINISH:
ublk_op = UBLK_IO_OP_ZONE_FINISH;
break;
case REQ_OP_ZONE_RESET:
ublk_op = UBLK_IO_OP_ZONE_RESET;
break;
case REQ_OP_ZONE_APPEND:
ublk_op = UBLK_IO_OP_ZONE_APPEND;
break;
case REQ_OP_ZONE_RESET_ALL:
ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
ublk_op = pdu->operation;
switch (ublk_op) {
case UBLK_IO_OP_REPORT_ZONES:
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_zones = pdu->nr_zones;
iod->start_sector = pdu->sector;
return BLK_STS_OK;
default:
return BLK_STS_IOERR;
}
case REQ_OP_DRV_OUT:
/* We do not support drv_out */
return BLK_STS_NOTSUPP;
default:
return BLK_STS_IOERR;
}
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
iod->addr = io->addr;
return BLK_STS_OK;
}
#else
#define ublk_report_zones (NULL)
static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
{
return -EOPNOTSUPP;
}
static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
return -EOPNOTSUPP;
}
static int ublk_revalidate_disk_zones(struct ublk_device *ub)
{
return 0;
}
static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
struct request *req)
{
return BLK_STS_NOTSUPP;
}
#endif
static inline void __ublk_complete_rq(struct request *req);
static void ublk_complete_rq(struct kref *ref);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
.name = "ublk-char",
};
static DEFINE_IDR(ublk_index_idr);
static DEFINE_SPINLOCK(ublk_idr_lock);
static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
/*
* Max ublk devices allowed to add
*
* It can be extended to one per-user limit in future or even controlled
* by cgroup.
*/
static unsigned int ublks_max = 64;
static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
static inline unsigned ublk_pos_to_hwq(loff_t pos)
{
return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
UBLK_QID_BITS_MASK;
}
static inline unsigned ublk_pos_to_buf_off(loff_t pos)
{
return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
}
static inline unsigned ublk_pos_to_tag(loff_t pos)
{
return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
UBLK_TAG_BITS_MASK;
}
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
{
struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
blk_queue_io_min(q, 1 << p->io_min_shift);
blk_queue_io_opt(q, 1 << p->io_opt_shift);
blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
p->attrs & UBLK_ATTR_FUA);
if (p->attrs & UBLK_ATTR_ROTATIONAL)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_max_hw_sectors(q, p->max_sectors);
blk_queue_chunk_sectors(q, p->chunk_sectors);
blk_queue_virt_boundary(q, p->virt_boundary_mask);
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
set_capacity(ub->ub_disk, p->dev_sectors);
}
static void ublk_dev_param_discard_apply(struct ublk_device *ub)
{
struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_discard *p = &ub->params.discard;
q->limits.discard_alignment = p->discard_alignment;
q->limits.discard_granularity = p->discard_granularity;
blk_queue_max_discard_sectors(q, p->max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q,
p->max_write_zeroes_sectors);
blk_queue_max_discard_segments(q, p->max_discard_segments);
}
static int ublk_validate_params(const struct ublk_device *ub)
{
/* basic param is the only one which must be set */
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic;
if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift)
return -EINVAL;
if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
return -EINVAL;
if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
return -EINVAL;
} else
return -EINVAL;
if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
const struct ublk_param_discard *p = &ub->params.discard;
/* So far, only support single segment discard */
if (p->max_discard_sectors && p->max_discard_segments != 1)
return -EINVAL;
if (!p->discard_granularity)
return -EINVAL;
}
/* dev_t is read-only */
if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
return -EINVAL;
if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
return ublk_dev_param_zoned_validate(ub);
else if (ublk_dev_is_zoned(ub))
return -EINVAL;
return 0;
}
static int ublk_apply_params(struct ublk_device *ub)
{
if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
return -EINVAL;
ublk_dev_param_basic_apply(ub);
if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
ublk_dev_param_discard_apply(ub);
if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
return ublk_dev_param_zoned_apply(ub);
return 0;
}
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
* read()/write() is involved in user copy, so request reference
* has to be grabbed
*/
return ublk_support_user_copy(ubq);
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
struct request *req)
{
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
kref_init(&data->ref);
}
}
static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
struct request *req)
{
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
return kref_get_unless_zero(&data->ref);
}
return true;
}
static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
struct request *req)
{
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
kref_put(&data->ref, ublk_complete_rq);
} else {
__ublk_complete_rq(req);
}
}
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
static struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
return ub;
return NULL;
}
static void ublk_put_device(struct ublk_device *ub)
{
put_device(&ub->cdev_dev);
}
static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
int qid)
{
return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
}
static inline bool ublk_rq_has_data(const struct request *rq)
{
return bio_has_data(rq->bio);
}
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag)
{
return (struct ublksrv_io_desc *)
&(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
}
static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
PAGE_SIZE);
}
static inline bool ublk_queue_can_use_recovery_reissue(
struct ublk_queue *ubq)
{
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
(ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
}
static inline bool ublk_queue_can_use_recovery(
struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_RECOVERY;
}
static inline bool ublk_can_use_recovery(struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
}
static void ublk_free_disk(struct gendisk *disk)
{
struct ublk_device *ub = disk->private_data;
clear_bit(UB_STATE_USED, &ub->state);
put_device(&ub->cdev_dev);
}
static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
unsigned int *owner_gid)
{
kuid_t uid;
kgid_t gid;
current_uid_gid(&uid, &gid);
*owner_uid = from_kuid(&init_user_ns, uid);
*owner_gid = from_kgid(&init_user_ns, gid);
}
static int ublk_open(struct gendisk *disk, blk_mode_t mode)
{
struct ublk_device *ub = disk->private_data;
if (capable(CAP_SYS_ADMIN))
return 0;
/*
* If it is one unprivileged device, only owner can open
* the disk. Otherwise it could be one trap made by one
* evil user who grants this disk's privileges to other
* users deliberately.
*
* This way is reasonable too given anyone can create
* unprivileged device, and no need other's grant.
*/
if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
unsigned int curr_uid, curr_gid;
ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
ub->dev_info.owner_gid)
return -EPERM;
}
return 0;
}
static const struct block_device_operations ub_fops = {
.owner = THIS_MODULE,
.open = ublk_open,
.free_disk = ublk_free_disk,
.report_zones = ublk_report_zones,
};
#define UBLK_MAX_PIN_PAGES 32
struct ublk_io_iter {
struct page *pages[UBLK_MAX_PIN_PAGES];
struct bio *bio;
struct bvec_iter iter;
};
/* return how many pages are copied */
static void ublk_copy_io_pages(struct ublk_io_iter *data,
size_t total, size_t pg_off, int dir)
{
unsigned done = 0;
unsigned pg_idx = 0;
while (done < total) {
struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
(unsigned)(PAGE_SIZE - pg_off));
void *bv_buf = bvec_kmap_local(&bv);
void *pg_buf = kmap_local_page(data->pages[pg_idx]);
if (dir == ITER_DEST)
memcpy(pg_buf + pg_off, bv_buf, bytes);
else
memcpy(bv_buf, pg_buf + pg_off, bytes);
kunmap_local(pg_buf);
kunmap_local(bv_buf);
/* advance page array */
pg_off += bytes;
if (pg_off == PAGE_SIZE) {
pg_idx += 1;
pg_off = 0;
}
done += bytes;
/* advance bio */
bio_advance_iter_single(data->bio, &data->iter, bytes);
if (!data->iter.bi_size) {
data->bio = data->bio->bi_next;
if (data->bio == NULL)
break;
data->iter = data->bio->bi_iter;
}
}
}
static bool ublk_advance_io_iter(const struct request *req,
struct ublk_io_iter *iter, unsigned int offset)
{
struct bio *bio = req->bio;
for_each_bio(bio) {
if (bio->bi_iter.bi_size > offset) {
iter->bio = bio;
iter->iter = bio->bi_iter;
bio_advance_iter(iter->bio, &iter->iter, offset);
return true;
}
offset -= bio->bi_iter.bi_size;
}
return false;
}
/*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
*/
static size_t ublk_copy_user_pages(const struct request *req,
unsigned offset, struct iov_iter *uiter, int dir)
{
struct ublk_io_iter iter;
size_t done = 0;
if (!ublk_advance_io_iter(req, &iter, offset))
return 0;
while (iov_iter_count(uiter) && iter.bio) {
unsigned nr_pages;
ssize_t len;
size_t off;
int i;
len = iov_iter_get_pages2(uiter, iter.pages,
iov_iter_count(uiter),
UBLK_MAX_PIN_PAGES, &off);
if (len <= 0)
return done;
ublk_copy_io_pages(&iter, len, off, dir);
nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
for (i = 0; i < nr_pages; i++) {
if (dir == ITER_DEST)
set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]);
}
done += len;
}
return done;
}
static inline bool ublk_need_map_req(const struct request *req)
{
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
}
static inline bool ublk_need_unmap_req(const struct request *req)
{
return ublk_rq_has_data(req) &&
(req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
}
static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
if (ublk_support_user_copy(ubq))
return rq_bytes;
/*
* no zero copy, we delay copy WRITE request data into ublksrv
* context and the big benefit is that pinning pages in current
* context is pretty fast, see ublk_pin_user_pages
*/
if (ublk_need_map_req(req)) {
struct iov_iter iter;
struct iovec iov;
const int dir = ITER_DEST;
import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
&iov, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
static int ublk_unmap_io(const struct ublk_queue *ubq,
const struct request *req,
struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
if (ublk_support_user_copy(ubq))
return rq_bytes;
if (ublk_need_unmap_req(req)) {
struct iov_iter iter;
struct iovec iov;
const int dir = ITER_SOURCE;
WARN_ON_ONCE(io->res > rq_bytes);
import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
&iov, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
static inline unsigned int ublk_req_build_flags(struct request *req)
{
unsigned flags = 0;
if (req->cmd_flags & REQ_FAILFAST_DEV)
flags |= UBLK_IO_F_FAILFAST_DEV;
if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
if (req->cmd_flags & REQ_FAILFAST_DRIVER)
flags |= UBLK_IO_F_FAILFAST_DRIVER;
if (req->cmd_flags & REQ_META)
flags |= UBLK_IO_F_META;
if (req->cmd_flags & REQ_FUA)
flags |= UBLK_IO_F_FUA;
if (req->cmd_flags & REQ_NOUNMAP)
flags |= UBLK_IO_F_NOUNMAP;
if (req->cmd_flags & REQ_SWAP)
flags |= UBLK_IO_F_SWAP;
return flags;
}
static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
enum req_op op = req_op(req);
u32 ublk_op;
if (!ublk_queue_is_zoned(ubq) &&
(op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
return BLK_STS_IOERR;
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
break;
case REQ_OP_WRITE:
ublk_op = UBLK_IO_OP_WRITE;
break;
case REQ_OP_FLUSH:
ublk_op = UBLK_IO_OP_FLUSH;
break;
case REQ_OP_DISCARD:
ublk_op = UBLK_IO_OP_DISCARD;
break;
case REQ_OP_WRITE_ZEROES:
ublk_op = UBLK_IO_OP_WRITE_ZEROES;
break;
default:
if (ublk_queue_is_zoned(ubq))
return ublk_setup_iod_zoned(ubq, req);
return BLK_STS_IOERR;
}
/* need to translate since kernel may change */
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
iod->addr = io->addr;
return BLK_STS_OK;
}
static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
}
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
return ubq->ubq_daemon->flags & PF_EXITING;
}
/* todo: handle partial completion */
static inline void __ublk_complete_rq(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
/* called from ublk_abort_queue() code path */
if (io->flags & UBLK_IO_FLAG_ABORTED) {
res = BLK_STS_IOERR;
goto exit;
}
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
if (io->res < 0) {
res = errno_to_blk_status(io->res);
goto exit;
}
/*
* FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
* directly.
*
* Both the two needn't unmap.
*/
if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
req_op(req) != REQ_OP_DRV_IN)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
unmapped_bytes = ublk_unmap_io(ubq, req, io);
/*
* Extremely impossible since we got data filled in just before
*
* Re-read simply for this unlikely case.
*/
if (unlikely(unmapped_bytes < io->res))
io->res = unmapped_bytes;
if (blk_update_request(req, BLK_STS_OK, io->res))
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
return;
exit:
blk_mq_end_request(req, res);
}
static void ublk_complete_rq(struct kref *ref)
{
struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
ref);
struct request *req = blk_mq_rq_from_pdu(data);
__ublk_complete_rq(req);
}
/*
* Since __ublk_rq_task_work always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
* exiting. So lock is unnecessary.
*
* Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again.
*/
static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
io->flags |= UBLK_IO_FLAG_ABORTED;
if (ublk_queue_can_use_recovery_reissue(ubq))
blk_mq_requeue_request(req, false);
else
ublk_put_req_ref(ubq, req);
}
}
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
unsigned issue_flags)
{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
/*
* clear ACTIVE since we are done with this sqe/cmd slot
* We can only accept io cmd in case of being not active.
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
/* tell ublksrv one io request is coming */
io_uring_cmd_done(io->cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
static inline void __ublk_abort_rq(struct ublk_queue *ubq,
struct request *rq)
{
/* We cannot process this rq so just requeue it. */
if (ublk_queue_can_use_recovery(ubq))
blk_mq_requeue_request(rq, false);
else
blk_mq_end_request(rq, BLK_STS_IOERR);
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
}
static inline void __ublk_rq_task_work(struct request *req,
unsigned issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
/*
* Task is exiting if either:
*
* (1) current != ubq_daemon.
* io_uring_cmd_complete_in_task() tries to run task_work
* in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
*
* (2) current->flags & PF_EXITING.
*/
if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
__ublk_abort_rq(ubq, req);
return;
}
if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
* so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
* and notify it.
*/
if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
__func__, io->cmd->cmd_op, ubq->q_id,
req->tag, io->flags);
ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
return;
}
/*
* We have handled UBLK_IO_NEED_GET_DATA command,
* so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
* do the copy work.
*/
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
/* update iod->addr because ublksrv may have passed a new io buffer */
ublk_get_iod(ubq, req->tag)->addr = io->addr;
pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
}
mapped_bytes = ublk_map_io(ubq, req, io);
/* partially mapped, update io descriptor */
if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
/*
* Nothing mapped, retry until we succeed.
*
* We may never succeed in mapping any bytes here because
* of OOM. TODO: reserve one buffer with single page pinned
* for providing forward progress guarantee.
*/
if (unlikely(!mapped_bytes)) {
blk_mq_requeue_request(req, false);
blk_mq_delay_kick_requeue_list(req->q,
UBLK_REQUEUE_DELAY_MS);
return;
}
ublk_get_iod(ubq, req->tag)->nr_sectors =
mapped_bytes >> 9;
}
ublk_init_req_ref(ubq, req);
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
struct ublk_io *io;
if (!llist_add(&data->node, &ubq->io_cmds))
return;
io = &ubq->ios[rq->tag];
/*
* If the check pass, we know that this is a re-issued request aborted
* previously in monitor_work because the ubq_daemon(cmd's task) is
* PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
* because this ioucmd's io_uring context may be freed now if no inflight
* ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
*
* Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
* the tag). Then the request is re-started(allocating the tag) and we are here.
* Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
* guarantees that here is a re-issued request aborted previously.
*/
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
ublk_abort_io_cmds(ubq);
} else {
struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
pdu->ubq = ubq;
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
}
}
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
if (!ubq->timeout) {
send_sig(SIGKILL, ubq->ubq_daemon, 0);
ubq->timeout = true;
}
return BLK_EH_DONE;
}
return BLK_EH_RESET_TIMER;
}
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct ublk_queue *ubq = hctx->driver_data;
struct request *rq = bd->rq;
blk_status_t res;
/* fill iod to slot in io cmd buffer */
res = ublk_setup_iod(ubq, rq);
if (unlikely(res != BLK_STS_OK))
return BLK_STS_IOERR;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
* abort all requeued and new rqs here to let del_gendisk()
* move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
* to avoid UAF on io_uring ctx.
*
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
blk_mq_start_request(bd->rq);
if (unlikely(ubq_daemon_is_dying(ubq))) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
ublk_queue_cmd(ubq, rq);
return BLK_STS_OK;
}
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
struct ublk_device *ub = driver_data;
struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
hctx->driver_data = ubq;
return 0;
}
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
static int ublk_ch_open(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = container_of(inode->i_cdev,
struct ublk_device, cdev);
if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
return -EBUSY;
filp->private_data = ub;
return 0;
}
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
clear_bit(UB_STATE_OPEN, &ub->state);
return 0;
}
/* map pre-allocated per-queue cmd buffer to ublksrv daemon */
static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ublk_device *ub = filp->private_data;
size_t sz = vma->vm_end - vma->vm_start;
unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
int q_id, ret = 0;
spin_lock(&ub->mm_lock);
if (!ub->mm)
ub->mm = current->mm;
if (current->mm != ub->mm)
ret = -EINVAL;
spin_unlock(&ub->mm_lock);
if (ret)
return ret;
if (vma->vm_flags & VM_WRITE)
return -EPERM;
end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
return -EINVAL;
q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
if (sz != ublk_queue_cmd_buf_size(ub, q_id))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
static void ublk_commit_completion(struct ublk_device *ub,
const struct ublksrv_io_cmd *ub_cmd)
{
u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
struct ublk_queue *ubq = ublk_get_queue(ub, qid);
struct ublk_io *io = &ubq->ios[tag];
struct request *req;
/* now this cmd slot is owned by nbd driver */
io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
io->res = ub_cmd->result;
/* find the io request and complete */
req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
if (WARN_ON_ONCE(unlikely(!req)))
return;
if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = ub_cmd->zone_append_lba;
if (likely(!blk_should_fake_timeout(req->q)))
ublk_put_req_ref(ubq, req);
}
/*
* When ->ubq_daemon is exiting, either new request is ended immediately,
* or any queued io command is drained, so it is safe to abort queue
* lockless
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
if (!ublk_get_device(ub))
return;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
struct request *rq;
/*
* Either we fail the request or ublk_rq_task_work_fn
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
if (rq)
__ublk_fail_req(ubq, io, rq);
}
}
ublk_put_device(ub);
}
static void ublk_daemon_monitor_work(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, monitor_work.work);
int i;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
struct ublk_queue *ubq = ublk_get_queue(ub, i);
if (ubq_daemon_is_dying(ubq)) {
if (ublk_queue_can_use_recovery(ubq))
schedule_work(&ub->quiesce_work);
else
schedule_work(&ub->stop_work);
/* abort queue is for making forward progress */
ublk_abort_queue(ub, ubq);
}
}
/*
* We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
* after ublk_remove() or __ublk_quiesce_dev() is started.
*
* No need ub->mutex, monitor work are canceled after state is marked
* as not LIVE, so new state is observed reliably.
*/
if (ub->dev_info.state == UBLK_S_DEV_LIVE)
schedule_delayed_work(&ub->monitor_work,
UBLK_DAEMON_MONITOR_PERIOD);
}
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
{
return ubq->nr_io_ready == ubq->q_depth;
}
static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
{
int i;
if (!ublk_queue_ready(ubq))
return;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_complete_in_task(io->cmd,
ublk_cmd_cancel_cb);
}
/* all io commands are canceled */
ubq->nr_io_ready = 0;
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
static void ublk_cancel_dev(struct ublk_device *ub)
{
int i;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_cancel_queue(ublk_get_queue(ub, i));
}
static bool ublk_check_inflight_rq(struct request *rq, void *data)
{
bool *idle = data;
if (blk_mq_request_started(rq)) {
*idle = false;
return false;
}
return true;
}
static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
{
bool idle;
WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
while (true) {
idle = true;
blk_mq_tagset_busy_iter(&ub->tag_set,
ublk_check_inflight_rq, &idle);
if (idle)
break;
msleep(UBLK_REQUEUE_DELAY_MS);
}
}
static void __ublk_quiesce_dev(struct ublk_device *ub)
{
pr_devel("%s: quiesce ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
blk_mq_quiesce_queue(ub->ub_disk->queue);
ublk_wait_tagset_rqs_idle(ub);
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
ublk_cancel_dev(ub);
/* we are going to release task_struct of ubq_daemon and resets
* ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
* Besides, monitor_work is not necessary in QUIESCED state since we have
* already scheduled quiesce_work and quiesced all ubqs.
*
* Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
* it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
*/
cancel_delayed_work_sync(&ub->monitor_work);
}
static void ublk_quiesce_work_fn(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, quiesce_work);
mutex_lock(&ub->mutex);
if (ub->dev_info.state != UBLK_S_DEV_LIVE)
goto unlock;
__ublk_quiesce_dev(ub);
unlock:
mutex_unlock(&ub->mutex);
}
static void ublk_unquiesce_dev(struct ublk_device *ub)
{
int i;
pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
/* quiesce_work has run. We let requeued rqs be aborted
* before running fallback_wq. "force_abort" must be seen
* after request queue is unqiuesced. Then del_gendisk()
* can move on.
*/
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
}
static void ublk_stop_dev(struct ublk_device *ub)
{
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
goto unlock;
if (ublk_can_use_recovery(ub)) {
if (ub->dev_info.state == UBLK_S_DEV_LIVE)
__ublk_quiesce_dev(ub);
ublk_unquiesce_dev(ub);
}
del_gendisk(ub->ub_disk);
ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1;
put_disk(ub->ub_disk);
ub->ub_disk = NULL;
unlock:
ublk_cancel_dev(ub);
mutex_unlock(&ub->mutex);
cancel_delayed_work_sync(&ub->monitor_work);
}
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
mutex_lock(&ub->mutex);
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
ubq->ubq_daemon = current;
get_task_struct(ubq->ubq_daemon);
ub->nr_queues_ready++;
if (capable(CAP_SYS_ADMIN))
ub->nr_privileged_daemon++;
}
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
complete_all(&ub->completion);
mutex_unlock(&ub->mutex);
}
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
int tag)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
ublk_queue_cmd(ubq, req);
}
static inline int ublk_check_cmd_op(u32 cmd_op)
{
u32 ioc_type = _IOC_TYPE(cmd_op);
if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
return -EOPNOTSUPP;
if (ioc_type != 'u' && ioc_type != 0)
return -EOPNOTSUPP;
return 0;
}
static inline void ublk_fill_io_cmd(struct ublk_io *io,
struct io_uring_cmd *cmd, unsigned long buf_addr)
{
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
io->addr = buf_addr;
}
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
{
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;
ubq = ublk_get_queue(ub, ub_cmd->q_id);
if (!ubq || ub_cmd->q_id != ubq->q_id)
goto out;
if (ubq->ubq_daemon && ubq->ubq_daemon != current)
goto out;
if (tag >= ubq->q_depth)
goto out;
io = &ubq->ios[tag];
/* there is pending io cmd, something must be wrong */
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
ret = -EBUSY;
goto out;
}
/*
* ensure that the user issues UBLK_IO_NEED_GET_DATA
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
*/
if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
goto out;
ret = ublk_check_cmd_op(cmd_op);
if (ret)
goto out;
ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_FETCH_REQ:
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
if (ublk_queue_ready(ubq)) {
ret = -EBUSY;
goto out;
}
/*
* The io is being handled by server, so COMMIT_RQ is expected
* instead of FETCH_REQ
*/
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
if (!ublk_support_user_copy(ubq)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
*/
if (!ub_cmd->addr && !ublk_need_get_data(ubq))
goto out;
} else if (ub_cmd->addr) {
/* User copy requires addr to be unset */
ret = -EINVAL;
goto out;
}
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
if (!ublk_support_user_copy(ubq)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
req_op(req) == REQ_OP_READ))
goto out;
} else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
/*
* User copy requires addr to be unset when command is
* not zone append
*/
ret = -EINVAL;
goto out;
}
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
break;
default:
goto out;
}
return -EIOCBQUEUED;
out:
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
struct ublk_queue *ubq, int tag, size_t offset)
{
struct request *req;
if (!ublk_need_req_ref(ubq))
return NULL;
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
if (!req)
return NULL;
if (!ublk_get_req_ref(ubq, req))
return NULL;
if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
goto fail_put;
if (!ublk_rq_has_data(req))
goto fail_put;
if (offset > blk_rq_bytes(req))
goto fail_put;
return req;
fail_put:
ublk_put_req_ref(ubq, req);
return NULL;
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
/*
* Not necessary for async retry, but let's keep it simple and always
* copy the values to avoid any potential reuse.
*/
const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
const struct ublksrv_io_cmd ub_cmd = {
.q_id = READ_ONCE(ub_src->q_id),
.tag = READ_ONCE(ub_src->tag),
.result = READ_ONCE(ub_src->result),
.addr = READ_ONCE(ub_src->addr)
};
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
static inline bool ublk_check_ubuf_dir(const struct request *req,
int ubuf_dir)
{
/* copy ubuf to request pages */
if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
ubuf_dir == ITER_SOURCE)
return true;
/* copy request pages to ubuf */
if ((req_op(req) == REQ_OP_WRITE ||
req_op(req) == REQ_OP_ZONE_APPEND) &&
ubuf_dir == ITER_DEST)
return true;
return false;
}
static struct request *ublk_check_and_get_req(struct kiocb *iocb,
struct iov_iter *iter, size_t *off, int dir)
{
struct ublk_device *ub = iocb->ki_filp->private_data;
struct ublk_queue *ubq;
struct request *req;
size_t buf_off;
u16 tag, q_id;
if (!ub)
return ERR_PTR(-EACCES);
if (!user_backed_iter(iter))
return ERR_PTR(-EACCES);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
return ERR_PTR(-EACCES);
tag = ublk_pos_to_tag(iocb->ki_pos);
q_id = ublk_pos_to_hwq(iocb->ki_pos);
buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
if (q_id >= ub->dev_info.nr_hw_queues)
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
if (!ubq)
return ERR_PTR(-EINVAL);
if (tag >= ubq->q_depth)
return ERR_PTR(-EINVAL);
req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
if (!req->mq_hctx || !req->mq_hctx->driver_data)
goto fail;
if (!ublk_check_ubuf_dir(req, dir))
goto fail;
*off = buf_off;
return req;
fail:
ublk_put_req_ref(ubq, req);
return ERR_PTR(-EACCES);
}
static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct ublk_queue *ubq;
struct request *req;
size_t buf_off;
size_t ret;
req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
ubq = req->mq_hctx->driver_data;
ublk_put_req_ref(ubq, req);
return ret;
}
static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct ublk_queue *ubq;
struct request *req;
size_t buf_off;
size_t ret;
req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
ubq = req->mq_hctx->driver_data;
ublk_put_req_ref(ubq, req);
return ret;
}
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
.release = ublk_ch_release,
.llseek = no_llseek,
.read_iter = ublk_ch_read_iter,
.write_iter = ublk_ch_write_iter,
.uring_cmd = ublk_ch_uring_cmd,
.mmap = ublk_ch_mmap,
};
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
int size = ublk_queue_cmd_buf_size(ub, q_id);
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
if (ubq->ubq_daemon)
put_task_struct(ubq->ubq_daemon);
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
}
static int ublk_init_queue(struct ublk_device *ub, int q_id)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
void *ptr;
int size;
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
ubq->q_depth = ub->dev_info.queue_depth;
size = ublk_queue_cmd_buf_size(ub, q_id);
ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
if (!ptr)
return -ENOMEM;
ubq->io_cmd_buf = ptr;
ubq->dev = ub;
return 0;
}
static void ublk_deinit_queues(struct ublk_device *ub)
{
int nr_queues = ub->dev_info.nr_hw_queues;
int i;
if (!ub->__queues)
return;
for (i = 0; i < nr_queues; i++)
ublk_deinit_queue(ub, i);
kfree(ub->__queues);
}
static int ublk_init_queues(struct ublk_device *ub)
{
int nr_queues = ub->dev_info.nr_hw_queues;
int depth = ub->dev_info.queue_depth;
int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
int i, ret = -ENOMEM;
ub->queue_size = ubq_size;
ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
if (!ub->__queues)
return ret;
for (i = 0; i < nr_queues; i++) {
if (ublk_init_queue(ub, i))
goto fail;
}
init_completion(&ub->completion);
return 0;
fail:
ublk_deinit_queues(ub);
return ret;
}
static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
{
int i = idx;
int err;
spin_lock(&ublk_idr_lock);
/* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
if (err == -ENOSPC)
err = -EEXIST;
} else {
err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
}
spin_unlock(&ublk_idr_lock);
if (err >= 0)
ub->ub_number = err;
return err;
}
static void ublk_free_dev_number(struct ublk_device *ub)
{
spin_lock(&ublk_idr_lock);
idr_remove(&ublk_index_idr, ub->ub_number);
wake_up_all(&ublk_idr_wq);
spin_unlock(&ublk_idr_lock);
}
static void ublk_cdev_rel(struct device *dev)
{
struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
blk_mq_free_tag_set(&ub->tag_set);
ublk_deinit_queues(ub);
ublk_free_dev_number(ub);
mutex_destroy(&ub->mutex);
kfree(ub);
}
static int ublk_add_chdev(struct ublk_device *ub)
{
struct device *dev = &ub->cdev_dev;
int minor = ub->ub_number;
int ret;
dev->parent = ublk_misc.this_device;
dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
dev->class = &ublk_chr_class;
dev->release = ublk_cdev_rel;
device_initialize(dev);
ret = dev_set_name(dev, "ublkc%d", minor);
if (ret)
goto fail;
cdev_init(&ub->cdev, &ublk_ch_fops);
ret = cdev_device_add(&ub->cdev, dev);
if (ret)
goto fail;
ublks_added++;
return 0;
fail:
put_device(dev);
return ret;
}
static void ublk_stop_work_fn(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, stop_work);
ublk_stop_dev(ub);
}
/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
ub->dev_info.max_io_buf_bytes =
round_down(max_io_bytes, PAGE_SIZE);
}
static int ublk_add_tag_set(struct ublk_device *ub)
{
ub->tag_set.ops = &ublk_mq_ops;
ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
static void ublk_remove(struct ublk_device *ub)
{
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
put_device(&ub->cdev_dev);
ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
{
struct ublk_device *ub = NULL;
if (idx < 0)
return NULL;
spin_lock(&ublk_idr_lock);
ub = idr_find(&ublk_index_idr, idx);
if (ub)
ub = ublk_get_device(ub);
spin_unlock(&ublk_idr_lock);
return ub;
}
static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
struct gendisk *disk;
int ret = -EINVAL;
if (ublksrv_pid <= 0)
return -EINVAL;
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
test_bit(UB_STATE_USED, &ub->state)) {
ret = -EEXIST;
goto out_unlock;
}
disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_unlock;
}
sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
disk->fops = &ub_fops;
disk->private_data = ub;
ub->dev_info.ublksrv_pid = ublksrv_pid;
ub->ub_disk = disk;
ret = ublk_apply_params(ub);
if (ret)
goto out_put_disk;
/* don't probe partitions if any one ubq daemon is un-trusted */
if (ub->nr_privileged_daemon != ub->nr_queues_ready)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
get_device(&ub->cdev_dev);
ub->dev_info.state = UBLK_S_DEV_LIVE;
if (ublk_dev_is_zoned(ub)) {
ret = ublk_revalidate_disk_zones(ub);
if (ret)
goto out_put_cdev;
}
ret = add_disk(disk);
if (ret)
goto out_put_cdev;
set_bit(UB_STATE_USED, &ub->state);
out_put_cdev:
if (ret) {
ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
}
out_put_disk:
if (ret)
put_disk(disk);
out_unlock:
mutex_unlock(&ub->mutex);
return ret;
}
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
unsigned int retlen;
unsigned int i;
int ret;
if (header->len * BITS_PER_BYTE < nr_cpu_ids)
return -EINVAL;
if (header->len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!header->addr)
return -EINVAL;
queue = header->data[0];
if (queue >= ub->dev_info.nr_hw_queues)
return -EINVAL;
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
for_each_possible_cpu(i) {
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
cpumask_set_cpu(i, cpumask);
}
ret = -EFAULT;
retlen = min_t(unsigned short, header->len, cpumask_size());
if (copy_to_user(argp, cpumask, retlen))
goto out_free_cpumask;
if (retlen != header->len &&
clear_user(argp + retlen, header->len - retlen))
goto out_free_cpumask;
ret = 0;
out_free_cpumask:
free_cpumask_var(cpumask);
return ret;
}
static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
{
pr_devel("%s: dev id %d flags %llx\n", __func__,
info->dev_id, info->flags);
pr_devel("\t nr_hw_queues %d queue_depth %d\n",
info->nr_hw_queues, info->queue_depth);
}
static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
int ret = -EINVAL;
if (header->len < sizeof(info) || !header->addr)
return -EINVAL;
if (header->queue_id != (u16)-1) {
pr_warn("%s: queue_id is wrong %x\n",
__func__, header->queue_id);
return -EINVAL;
}
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
if (capable(CAP_SYS_ADMIN))
info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
* support recovery features for unprivileged ublk now
*
* TODO: provide forward progress for RECOVERY handler, so that
* unprivileged device can benefit from it
*/
if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
UBLK_F_USER_RECOVERY);
/* the created device is always owned by current user */
ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
if (header->dev_id != info.dev_id) {
pr_warn("%s: dev id not match %u %u\n",
__func__, header->dev_id, info.dev_id);
return -EINVAL;
}
ublk_dump_dev_info(&info);
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
ret = -EACCES;
if (ublks_added >= ublks_max)
goto out_unlock;
ret = -ENOMEM;
ub = kzalloc(sizeof(*ub), GFP_KERNEL);
if (!ub)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->mm_lock);
INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
goto out_free_ub;
memcpy(&ub->dev_info, &info, sizeof(info));
/* update device id */
ub->dev_info.dev_id = ub->ub_number;
/*
* 64bit flags will be copied back to userspace as feature
* negotiation result, so have to clear flags which driver
* doesn't support yet, then userspace can get correct flags
* (features) to handle.
*/
ub->dev_info.flags &= UBLK_F_ALL;
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
UBLK_F_URING_CMD_COMP_IN_TASK;
/* GET_DATA isn't needed any more with USER_COPY */
if (ublk_dev_is_user_copy(ub))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
/* Zoned storage support requires user copy feature */
if (ublk_dev_is_zoned(ub) &&
(!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
ret = -EINVAL;
goto out_free_dev_number;
}
/* We are not ready to support zero copy */
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
ret = ublk_init_queues(ub);
if (ret)
goto out_free_dev_number;
ret = ublk_add_tag_set(ub);
if (ret)
goto out_deinit_queues;
ret = -EFAULT;
if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
goto out_free_tag_set;
/*
* Add the char dev so that ublksrv daemon can be setup.
* ublk_add_chdev() will cleanup everything if it fails.
*/
ret = ublk_add_chdev(ub);
goto out_unlock;
out_free_tag_set:
blk_mq_free_tag_set(&ub->tag_set);
out_deinit_queues:
ublk_deinit_queues(ub);
out_free_dev_number:
ublk_free_dev_number(ub);
out_free_ub:
mutex_destroy(&ub->mutex);
kfree(ub);
out_unlock:
mutex_unlock(&ublk_ctl_mutex);
return ret;
}
static inline bool ublk_idr_freed(int id)
{
void *ptr;
spin_lock(&ublk_idr_lock);
ptr = idr_find(&ublk_index_idr, id);
spin_unlock(&ublk_idr_lock);
return ptr == NULL;
}
static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
{
struct ublk_device *ub = *p_ub;
int idx = ub->ub_number;
int ret;
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
if (!test_bit(UB_STATE_DELETED, &ub->state)) {
ublk_remove(ub);
set_bit(UB_STATE_DELETED, &ub->state);
}
/* Mark the reference as consumed */
*p_ub = NULL;
ublk_put_device(ub);
mutex_unlock(&ublk_ctl_mutex);
/*
* Wait until the idr is removed, then it can be reused after
* DEL_DEV command is returned.
*
* If we returns because of user interrupt, future delete command
* may come:
*
* - the device number isn't freed, this device won't or needn't
* be deleted again, since UB_STATE_DELETED is set, and device
* will be released after the last reference is dropped
*
* - the device number is freed already, we will not find this
* device via ublk_get_device_from_id()
*/
if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
return -EINTR;
return 0;
}
static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
__func__, cmd->cmd_op, header->dev_id, header->queue_id,
header->data[0], header->addr, header->len);
}
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
return 0;
}
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
return -EINVAL;
if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
return -EFAULT;
return 0;
}
/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
{
ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
if (ub->ub_disk) {
ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
} else {
ub->params.devt.disk_major = 0;
ub->params.devt.disk_minor = 0;
}
ub->params.types |= UBLK_PARAM_TYPE_DEVT;
}
static int ublk_ctrl_get_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
if (header->len <= sizeof(ph) || !header->addr)
return -EINVAL;
if (copy_from_user(&ph, argp, sizeof(ph)))
return -EFAULT;
if (ph.len > header->len || !ph.len)
return -EINVAL;
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
mutex_lock(&ub->mutex);
ublk_ctrl_fill_params_devt(ub);
if (copy_to_user(argp, &ub->params, ph.len))
ret = -EFAULT;
else
ret = 0;
mutex_unlock(&ub->mutex);
return ret;
}
static int ublk_ctrl_set_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
if (header->len <= sizeof(ph) || !header->addr)
return -EINVAL;
if (copy_from_user(&ph, argp, sizeof(ph)))
return -EFAULT;
if (ph.len > header->len || !ph.len || !ph.types)
return -EINVAL;
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
/* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
} else {
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
if (ret)
ub->params.types = 0;
}
mutex_unlock(&ub->mutex);
return ret;
}
static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
/* All old ioucmds have to be completed */
WARN_ON_ONCE(ubq->nr_io_ready);
/* old daemon is PF_EXITING, put it now */
put_task_struct(ubq->ubq_daemon);
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
ubq->ubq_daemon = NULL;
ubq->timeout = false;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
/* forget everything now and be ready for new FETCH_REQ */
io->flags = 0;
io->cmd = NULL;
io->addr = 0;
}
}
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
int i;
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
/*
* START_RECOVERY is only allowd after:
*
* (1) UB_STATE_OPEN is not set, which means the dying process is exited
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
* released.
*
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
* (a)has quiesced request queue
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
* (d)has completed/camceled all ioucmds owned by ther dying process
*/
if (test_bit(UB_STATE_OPEN, &ub->state) ||
ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
ret = -EBUSY;
goto out_unlock;
}
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_queue_reinit(ub, ublk_get_queue(ub, i));
/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
return ret;
}
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */
if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
ret = -EBUSY;
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
blk_mq_unquiesce_queue(ub->ub_disk->queue);
pr_devel("%s: queue unquiesced, dev id %d.\n",
__func__, header->dev_id);
blk_mq_kick_requeue_list(ub->ub_disk->queue);
ub->dev_info.state = UBLK_S_DEV_LIVE;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
return ret;
}
static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
if (header->len != UBLK_FEATURES_LEN || !header->addr)
return -EINVAL;
if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
return -EFAULT;
return 0;
}
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
*/
static int ublk_char_dev_permission(struct ublk_device *ub,
const char *dev_path, int mask)
{
int err;
struct path path;
struct kstat stat;
err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
if (err)
return err;
err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
if (err)
goto exit;
err = -EPERM;
if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
goto exit;
err = inode_permission(&nop_mnt_idmap,
d_backing_inode(path.dentry), mask);
exit:
path_put(&path);
return err;
}
static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
void __user *argp = (void __user *)(unsigned long)header->addr;
char *dev_path = NULL;
int ret = 0;
int mask;
if (!unprivileged) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/*
* The new added command of UBLK_CMD_GET_DEV_INFO2 includes
* char_dev_path in payload too, since userspace may not
* know if the specified device is created as unprivileged
* mode.
*/
if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
return 0;
}
/*
* User has to provide the char device path for unprivileged ublk
*
* header->addr always points to the dev path buffer, and
* header->dev_path_len records length of dev path buffer.
*/
if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
return -EINVAL;
if (header->len < header->dev_path_len)
return -EINVAL;
dev_path = memdup_user_nul(argp, header->dev_path_len);
if (IS_ERR(dev_path))
return PTR_ERR(dev_path);
ret = -EINVAL;
switch (_IOC_NR(cmd->cmd_op)) {
case UBLK_CMD_GET_DEV_INFO:
case UBLK_CMD_GET_DEV_INFO2:
case UBLK_CMD_GET_QUEUE_AFFINITY:
case UBLK_CMD_GET_PARAMS:
case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
mask = MAY_READ;
break;
case UBLK_CMD_START_DEV:
case UBLK_CMD_STOP_DEV:
case UBLK_CMD_ADD_DEV:
case UBLK_CMD_DEL_DEV:
case UBLK_CMD_SET_PARAMS:
case UBLK_CMD_START_USER_RECOVERY:
case UBLK_CMD_END_USER_RECOVERY:
mask = MAY_READ | MAY_WRITE;
break;
default:
goto exit;
}
ret = ublk_char_dev_permission(ub, dev_path, mask);
if (!ret) {
header->len -= header->dev_path_len;
header->addr += header->dev_path_len;
}
pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
__func__, ub->ub_number, cmd->cmd_op,
ub->dev_info.owner_uid, ub->dev_info.owner_gid,
dev_path, ret);
exit:
kfree(dev_path);
return ret;
}
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
struct ublk_device *ub = NULL;
u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ublk_ctrl_cmd_dump(cmd);
if (!(issue_flags & IO_URING_F_SQE128))
goto out;
ret = ublk_check_cmd_op(cmd_op);
if (ret)
goto out;
if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
ret = ublk_ctrl_get_features(cmd);
goto out;
}
if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
ret = -ENODEV;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
goto out;
ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
if (ret)
goto put_dev;
}
switch (_IOC_NR(cmd_op)) {
case UBLK_CMD_START_DEV:
ret = ublk_ctrl_start_dev(ub, cmd);
break;
case UBLK_CMD_STOP_DEV:
ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
case UBLK_CMD_GET_DEV_INFO2:
ret = ublk_ctrl_get_dev_info(ub, cmd);
break;
case UBLK_CMD_ADD_DEV:
ret = ublk_ctrl_add_dev(cmd);
break;
case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(&ub);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
ret = ublk_ctrl_get_queue_affinity(ub, cmd);
break;
case UBLK_CMD_GET_PARAMS:
ret = ublk_ctrl_get_params(ub, cmd);
break;
case UBLK_CMD_SET_PARAMS:
ret = ublk_ctrl_set_params(ub, cmd);
break;
case UBLK_CMD_START_USER_RECOVERY:
ret = ublk_ctrl_start_recovery(ub, cmd);
break;
case UBLK_CMD_END_USER_RECOVERY:
ret = ublk_ctrl_end_recovery(ub, cmd);
break;
default:
ret = -ENOTSUPP;
break;
}
put_dev:
if (ub)
ublk_put_device(ub);
out:
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;
}
static const struct file_operations ublk_ctl_fops = {
.open = nonseekable_open,
.uring_cmd = ublk_ctrl_uring_cmd,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice ublk_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ublk-control",
.fops = &ublk_ctl_fops,
};
static int __init ublk_init(void)
{
int ret;
BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
init_waitqueue_head(&ublk_idr_wq);
ret = misc_register(&ublk_misc);
if (ret)
return ret;
ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
if (ret)
goto unregister_mis;
ret = class_register(&ublk_chr_class);
if (ret)
goto free_chrdev_region;
return 0;
free_chrdev_region:
unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
unregister_mis:
misc_deregister(&ublk_misc);
return ret;
}
static void __exit ublk_exit(void)
{
struct ublk_device *ub;
int id;
idr_for_each_entry(&ublk_index_idr, ub, id)
ublk_remove(ub);
class_unregister(&ublk_chr_class);
misc_deregister(&ublk_misc);
idr_destroy(&ublk_index_idr);
unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
}
module_init(ublk_init);
module_exit(ublk_exit);
module_param(ublks_max, int, 0444);
MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/block/ublk_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
//#define DEBUG
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
#define PART_BITS 4
#define VQ_NAME_LEN 16
#define MAX_DISCARD_SEGMENTS 256u
/* The maximum number of sg elements that fit into a virtqueue */
#define VIRTIO_BLK_MAX_SG_ELEMS 32768
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define VIRTIO_BLK_INLINE_SG_CNT 0
#else
#define VIRTIO_BLK_INLINE_SG_CNT 2
#endif
static unsigned int num_request_queues;
module_param(num_request_queues, uint, 0644);
MODULE_PARM_DESC(num_request_queues,
"Limit the number of request queues to use for blk device. "
"0 for no limit. "
"Values > nr_cpu_ids truncated to nr_cpu_ids.");
static unsigned int poll_queues;
module_param(poll_queues, uint, 0644);
MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
static int major;
static DEFINE_IDA(vd_index_ida);
static struct workqueue_struct *virtblk_wq;
struct virtio_blk_vq {
struct virtqueue *vq;
spinlock_t lock;
char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;
struct virtio_blk {
/*
* This mutex must be held by anything that may run after
* virtblk_remove() sets vblk->vdev to NULL.
*
* blk-mq, virtqueue processing, and sysfs attribute code paths are
* shut down before vblk->vdev is set to NULL and therefore do not need
* to hold this mutex.
*/
struct mutex vdev_mutex;
struct virtio_device *vdev;
/* The disk structure for the kernel. */
struct gendisk *disk;
/* Block layer tags. */
struct blk_mq_tag_set tag_set;
/* Process context for config space updates */
struct work_struct config_work;
/* Ida index - used to track minor number allocations. */
int index;
/* num of vqs */
int num_vqs;
int io_queues[HCTX_MAX_TYPES];
struct virtio_blk_vq *vqs;
/* For zoned device */
unsigned int zone_sectors;
};
struct virtblk_req {
/* Out header */
struct virtio_blk_outhdr out_hdr;
/* In header */
union {
u8 status;
/*
* The zone append command has an extended in header.
* The status field in zone_append_in_hdr must always
* be the last byte.
*/
struct {
__virtio64 sector;
u8 status;
} zone_append;
} in_hdr;
size_t in_hdr_len;
struct sg_table sg_table;
struct scatterlist sg[];
};
static inline blk_status_t virtblk_result(u8 status)
{
switch (status) {
case VIRTIO_BLK_S_OK:
return BLK_STS_OK;
case VIRTIO_BLK_S_UNSUPP:
return BLK_STS_NOTSUPP;
case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE:
return BLK_STS_ZONE_OPEN_RESOURCE;
case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE:
return BLK_STS_ZONE_ACTIVE_RESOURCE;
case VIRTIO_BLK_S_IOERR:
case VIRTIO_BLK_S_ZONE_UNALIGNED_WP:
default:
return BLK_STS_IOERR;
}
}
static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
return vq;
}
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist out_hdr, in_hdr, *sgs[3];
unsigned int num_out = 0, num_in = 0;
sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &out_hdr;
if (vbr->sg_table.nents) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
sgs[num_out++] = vbr->sg_table.sgl;
else
sgs[num_out + num_in++] = vbr->sg_table.sgl;
}
sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
sgs[num_out + num_in++] = &in_hdr;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
{
unsigned short segments = blk_rq_nr_discard_segments(req);
unsigned short n = 0;
struct virtio_blk_discard_write_zeroes *range;
struct bio *bio;
u32 flags = 0;
if (unmap)
flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
if (!range)
return -ENOMEM;
/*
* Single max discard segment means multi-range discard isn't
* supported, and block layer only runs contiguity merge like
* normal RW request. So we can't reply on bio for retrieving
* each range info.
*/
if (queue_max_discard_segments(req->q) == 1) {
range[0].flags = cpu_to_le32(flags);
range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
range[0].sector = cpu_to_le64(blk_rq_pos(req));
n = 1;
} else {
__rq_for_each_bio(bio, req) {
u64 sector = bio->bi_iter.bi_sector;
u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
range[n].flags = cpu_to_le32(flags);
range[n].num_sectors = cpu_to_le32(num_sectors);
range[n].sector = cpu_to_le64(sector);
n++;
}
}
WARN_ON_ONCE(n != segments);
bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return 0;
}
static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
{
if (blk_rq_nr_phys_segments(req))
sg_free_table_chained(&vbr->sg_table,
VIRTIO_BLK_INLINE_SG_CNT);
}
static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
struct virtblk_req *vbr)
{
int err;
if (!blk_rq_nr_phys_segments(req))
return 0;
vbr->sg_table.sgl = vbr->sg;
err = sg_alloc_table_chained(&vbr->sg_table,
blk_rq_nr_phys_segments(req),
vbr->sg_table.sgl,
VIRTIO_BLK_INLINE_SG_CNT);
if (unlikely(err))
return -ENOMEM;
return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
}
static void virtblk_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
kfree(bvec_virt(&req->special_vec));
}
static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
struct request *req,
struct virtblk_req *vbr)
{
size_t in_hdr_len = sizeof(vbr->in_hdr.status);
bool unmap = false;
u32 type;
u64 sector = 0;
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
return BLK_STS_NOTSUPP;
/* Set fields for all request types */
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
switch (req_op(req)) {
case REQ_OP_READ:
type = VIRTIO_BLK_T_IN;
sector = blk_rq_pos(req);
break;
case REQ_OP_WRITE:
type = VIRTIO_BLK_T_OUT;
sector = blk_rq_pos(req);
break;
case REQ_OP_FLUSH:
type = VIRTIO_BLK_T_FLUSH;
break;
case REQ_OP_DISCARD:
type = VIRTIO_BLK_T_DISCARD;
break;
case REQ_OP_WRITE_ZEROES:
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
case REQ_OP_SECURE_ERASE:
type = VIRTIO_BLK_T_SECURE_ERASE;
break;
case REQ_OP_ZONE_OPEN:
type = VIRTIO_BLK_T_ZONE_OPEN;
sector = blk_rq_pos(req);
break;
case REQ_OP_ZONE_CLOSE:
type = VIRTIO_BLK_T_ZONE_CLOSE;
sector = blk_rq_pos(req);
break;
case REQ_OP_ZONE_FINISH:
type = VIRTIO_BLK_T_ZONE_FINISH;
sector = blk_rq_pos(req);
break;
case REQ_OP_ZONE_APPEND:
type = VIRTIO_BLK_T_ZONE_APPEND;
sector = blk_rq_pos(req);
in_hdr_len = sizeof(vbr->in_hdr.zone_append);
break;
case REQ_OP_ZONE_RESET:
type = VIRTIO_BLK_T_ZONE_RESET;
sector = blk_rq_pos(req);
break;
case REQ_OP_ZONE_RESET_ALL:
type = VIRTIO_BLK_T_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
/*
* Out header has already been prepared by the caller (virtblk_get_id()
* or virtblk_submit_zone_report()), nothing to do here.
*/
return 0;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
}
/* Set fields for non-REQ_OP_DRV_IN request types */
vbr->in_hdr_len = in_hdr_len;
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector);
if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
type == VIRTIO_BLK_T_SECURE_ERASE) {
if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
return BLK_STS_RESOURCE;
}
return 0;
}
/*
* The status byte is always the last byte of the virtblk request
* in-header. This helper fetches its value for all in-header formats
* that are currently defined.
*/
static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
{
return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
}
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = virtio64_to_cpu(vblk->vdev,
vbr->in_hdr.zone_append.sector);
blk_mq_end_request(req, status);
}
static void virtblk_done(struct virtqueue *vq)
{
struct virtio_blk *vblk = vq->vdev->priv;
bool req_done = false;
int qid = vq->index;
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
/* In case queue is stopped waiting for more buffers. */
if (req_done)
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
bool kick;
spin_lock_irq(&vq->lock);
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irq(&vq->lock);
if (kick)
virtqueue_notify(vq->vq);
}
static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
{
virtblk_cleanup_cmd(req);
switch (rc) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
case -ENOMEM:
return BLK_STS_RESOURCE;
default:
return BLK_STS_IOERR;
}
}
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
struct virtio_blk *vblk,
struct request *req,
struct virtblk_req *vbr)
{
blk_status_t status;
int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
return status;
num = virtblk_map_data(hctx, req, vbr);
if (unlikely(num < 0))
return virtblk_fail_to_queue(req, -ENOMEM);
vbr->sg_table.nents = num;
blk_mq_start_request(req);
return BLK_STS_OK;
}
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
int qid = hctx->queue_num;
bool notify = false;
blk_status_t status;
int err;
status = virtblk_prep_rq(hctx, vblk, req, vbr);
if (unlikely(status))
return status;
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
* bounce the buffer due to global resource outage.
*/
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
virtblk_unmap_data(req, vbr);
return virtblk_fail_to_queue(req, err);
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
notify = true;
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
if (notify)
virtqueue_notify(vblk->vqs[qid].vq);
return BLK_STS_OK;
}
static bool virtblk_prep_rq_batch(struct request *req)
{
struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
req->mq_hctx->tags->rqs[req->tag] = req;
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
struct request **rqlist)
{
unsigned long flags;
int err;
bool kick;
spin_lock_irqsave(&vq->lock, flags);
while (!rq_list_empty(*rqlist)) {
struct request *req = rq_list_pop(rqlist);
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
err = virtblk_add_req(vq->vq, vbr);
if (err) {
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
blk_mq_requeue_request(req, true);
}
}
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
return kick;
}
static void virtio_queue_rqs(struct request **rqlist)
{
struct request *req, *next, *prev = NULL;
struct request *requeue_list = NULL;
rq_list_for_each_safe(rqlist, req, next) {
struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
bool kick;
if (!virtblk_prep_rq_batch(req)) {
rq_list_move(rqlist, &requeue_list, req, prev);
req = prev;
if (!req)
continue;
}
if (!next || req->mq_hctx != next->mq_hctx) {
req->rq_next = NULL;
kick = virtblk_add_req_batch(vq, rqlist);
if (kick)
virtqueue_notify(vq->vq);
*rqlist = next;
prev = NULL;
} else
prev = req;
}
*rqlist = requeue_list;
}
#ifdef CONFIG_BLK_DEV_ZONED
static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
unsigned int nr_zones,
size_t *buflen)
{
struct request_queue *q = vblk->disk->queue;
size_t bufsize;
void *buf;
nr_zones = min_t(unsigned int, nr_zones,
get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
bufsize = sizeof(struct virtio_blk_zone_report) +
nr_zones * sizeof(struct virtio_blk_zone_descriptor);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
while (bufsize >= sizeof(struct virtio_blk_zone_report)) {
buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
if (buf) {
*buflen = bufsize;
return buf;
}
bufsize >>= 1;
}
return NULL;
}
static int virtblk_submit_zone_report(struct virtio_blk *vblk,
char *report_buf, size_t report_len,
sector_t sector)
{
struct request_queue *q = vblk->disk->queue;
struct request *req;
struct virtblk_req *vbr;
int err;
req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req);
vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
if (err)
goto out;
blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
}
static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb, void *data)
{
struct blk_zone zone = { };
zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
zone.len = vblk->zone_sectors;
else
zone.len = get_capacity(vblk->disk) - zone.start;
zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
switch (entry->z_type) {
case VIRTIO_BLK_ZT_SWR:
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
break;
case VIRTIO_BLK_ZT_SWP:
zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
break;
case VIRTIO_BLK_ZT_CONV:
zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
break;
default:
dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
zone.start, entry->z_type);
return -EIO;
}
switch (entry->z_state) {
case VIRTIO_BLK_ZS_EMPTY:
zone.cond = BLK_ZONE_COND_EMPTY;
break;
case VIRTIO_BLK_ZS_CLOSED:
zone.cond = BLK_ZONE_COND_CLOSED;
break;
case VIRTIO_BLK_ZS_FULL:
zone.cond = BLK_ZONE_COND_FULL;
zone.wp = zone.start + zone.len;
break;
case VIRTIO_BLK_ZS_EOPEN:
zone.cond = BLK_ZONE_COND_EXP_OPEN;
break;
case VIRTIO_BLK_ZS_IOPEN:
zone.cond = BLK_ZONE_COND_IMP_OPEN;
break;
case VIRTIO_BLK_ZS_NOT_WP:
zone.cond = BLK_ZONE_COND_NOT_WP;
break;
case VIRTIO_BLK_ZS_RDONLY:
zone.cond = BLK_ZONE_COND_READONLY;
zone.wp = ULONG_MAX;
break;
case VIRTIO_BLK_ZS_OFFLINE:
zone.cond = BLK_ZONE_COND_OFFLINE;
zone.wp = ULONG_MAX;
break;
default:
dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
zone.start, entry->z_state);
return -EIO;
}
/*
* The callback below checks the validity of the reported
* entry data, no need to further validate it here.
*/
return cb(&zone, idx, data);
}
static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb,
void *data)
{
struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report;
unsigned long long nz, i;
size_t buflen;
unsigned int zone_idx = 0;
int ret;
if (WARN_ON_ONCE(!vblk->zone_sectors))
return -EOPNOTSUPP;
report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
if (!report)
return -ENOMEM;
mutex_lock(&vblk->vdev_mutex);
if (!vblk->vdev) {
ret = -ENXIO;
goto fail_report;
}
while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
memset(report, 0, buflen);
ret = virtblk_submit_zone_report(vblk, (char *)report,
buflen, sector);
if (ret)
goto fail_report;
nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
nr_zones);
if (!nz)
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i],
zone_idx, cb, data);
if (ret)
goto fail_report;
sector = virtio64_to_cpu(vblk->vdev,
report->zones[i].z_start) +
vblk->zone_sectors;
zone_idx++;
}
}
if (zone_idx > 0)
ret = zone_idx;
else
ret = -EINVAL;
fail_report:
mutex_unlock(&vblk->vdev_mutex);
kvfree(report);
return ret;
}
static void virtblk_revalidate_zones(struct virtio_blk *vblk)
{
u8 model;
virtio_cread(vblk->vdev, struct virtio_blk_config,
zoned.model, &model);
switch (model) {
default:
dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
fallthrough;
case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
return;
case VIRTIO_BLK_Z_HM:
WARN_ON_ONCE(!vblk->zone_sectors);
if (!blk_revalidate_disk_zones(vblk->disk, NULL))
set_capacity_and_notify(vblk->disk, 0);
}
}
static int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk,
struct request_queue *q)
{
u32 v, wg;
u8 model;
virtio_cread(vdev, struct virtio_blk_config,
zoned.model, &model);
switch (model) {
case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
/* Present the host-aware device as non-zoned */
return 0;
case VIRTIO_BLK_Z_HM:
break;
default:
dev_err(&vdev->dev, "unsupported zone model %d\n", model);
return -EINVAL;
}
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
disk_set_zoned(vblk->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
disk_set_max_open_zones(vblk->disk, v);
dev_dbg(&vdev->dev, "max open zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v);
disk_set_max_active_zones(vblk->disk, v);
dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.write_granularity, &wg);
if (!wg) {
dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV;
}
blk_queue_physical_block_size(q, wg);
blk_queue_io_min(q, wg);
dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
/*
* virtio ZBD specification doesn't require zones to be a power of
* two sectors in size, but the code in this driver expects that.
*/
virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
&vblk->zone_sectors);
if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
dev_err(&vdev->dev,
"zoned device with non power of two zone size %u\n",
vblk->zone_sectors);
return -ENODEV;
}
blk_queue_chunk_sectors(q, vblk->zone_sectors);
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
dev_warn(&vblk->vdev->dev,
"ignoring negotiated F_DISCARD for zoned device\n");
blk_queue_max_discard_sectors(q, 0);
}
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_append_sectors, &v);
if (!v) {
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
return -ENODEV;
}
if ((v << SECTOR_SHIFT) < wg) {
dev_err(&vdev->dev,
"write granularity %u exceeds max_append_sectors %u limit\n",
wg, v);
return -ENODEV;
}
blk_queue_max_zone_append_sectors(q, v);
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return blk_revalidate_disk_zones(vblk->disk, NULL);
}
#else
/*
* Zoned block device support is not configured in this kernel.
* Host-managed zoned devices can't be supported, but others are
* good to go as regular block devices.
*/
#define virtblk_report_zones NULL
static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
{
}
static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk, struct request_queue *q)
{
u8 model;
virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
if (model == VIRTIO_BLK_Z_HM) {
dev_err(&vdev->dev,
"virtio_blk: zoned devices are not supported");
return -EOPNOTSUPP;
}
return 0;
}
#endif /* CONFIG_BLK_DEV_ZONED */
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
struct virtio_blk *vblk = disk->private_data;
struct request_queue *q = vblk->disk->queue;
struct request *req;
struct virtblk_req *vbr;
int err;
req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req);
vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
if (!vblk->vdev) {
ret = -ENXIO;
goto out;
}
/* see if the host passed in geometry config */
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
virtio_cread(vblk->vdev, struct virtio_blk_config,
geometry.cylinders, &geo->cylinders);
virtio_cread(vblk->vdev, struct virtio_blk_config,
geometry.heads, &geo->heads);
virtio_cread(vblk->vdev, struct virtio_blk_config,
geometry.sectors, &geo->sectors);
} else {
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
return ret;
}
static void virtblk_free_disk(struct gendisk *disk)
{
struct virtio_blk *vblk = disk->private_data;
ida_free(&vd_index_ida, vblk->index);
mutex_destroy(&vblk->vdev_mutex);
kfree(vblk);
}
static const struct block_device_operations virtblk_fops = {
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
.free_disk = virtblk_free_disk,
.report_zones = virtblk_report_zones,
};
static int index_to_minor(int index)
{
return index << PART_BITS;
}
static int minor_to_index(int minor)
{
return minor >> PART_BITS;
}
static ssize_t serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
int err;
/* sysfs gives us a PAGE_SIZE buffer */
BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
buf[VIRTIO_BLK_ID_BYTES] = '\0';
err = virtblk_get_id(disk, buf);
if (!err)
return strlen(buf);
if (err == -EIO) /* Unsupported? Make it empty. */
return 0;
return err;
}
static DEVICE_ATTR_RO(serial);
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
{
struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
unsigned long long nblocks;
u64 capacity;
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
"[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
vblk->disk->disk_name,
resize ? "new size: " : "",
nblocks,
queue_logical_block_size(q),
cap_str_10,
cap_str_2);
set_capacity_and_notify(vblk->disk, capacity);
}
static void virtblk_config_changed_work(struct work_struct *work)
{
struct virtio_blk *vblk =
container_of(work, struct virtio_blk, config_work);
virtblk_revalidate_zones(vblk);
virtblk_update_capacity(vblk, true);
}
static void virtblk_config_changed(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
queue_work(virtblk_wq, &vblk->config_work);
}
static int init_vq(struct virtio_blk *vblk)
{
int err;
int i;
vq_callback_t **callbacks;
const char **names;
struct virtqueue **vqs;
unsigned short num_vqs;
unsigned int num_poll_vqs;
struct virtio_device *vdev = vblk->vdev;
struct irq_affinity desc = { 0, };
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
struct virtio_blk_config, num_queues,
&num_vqs);
if (err)
num_vqs = 1;
if (!err && !num_vqs) {
dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
return -EINVAL;
}
num_vqs = min_t(unsigned int,
min_not_zero(num_request_queues, nr_cpu_ids),
num_vqs);
num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
vblk->io_queues[HCTX_TYPE_READ] = 0;
vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
vblk->io_queues[HCTX_TYPE_DEFAULT],
vblk->io_queues[HCTX_TYPE_READ],
vblk->io_queues[HCTX_TYPE_POLL]);
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
if (!names || !callbacks || !vqs) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < num_vqs - num_poll_vqs; i++) {
callbacks[i] = virtblk_done;
snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
names[i] = vblk->vqs[i].name;
}
for (; i < num_vqs; i++) {
callbacks[i] = NULL;
snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
names[i] = vblk->vqs[i].name;
}
/* Discover virtqueues and write information to configuration. */
err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
if (err)
goto out;
for (i = 0; i < num_vqs; i++) {
spin_lock_init(&vblk->vqs[i].lock);
vblk->vqs[i].vq = vqs[i];
}
vblk->num_vqs = num_vqs;
out:
kfree(vqs);
kfree(callbacks);
kfree(names);
if (err)
kfree(vblk->vqs);
return err;
}
/*
* Legacy naming scheme used for virtio devices. We are stuck with it for
* virtio blk but don't ever use it for any new driver.
*/
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
const int base = 'z' - 'a' + 1;
char *begin = buf + strlen(prefix);
char *end = buf + buflen;
char *p;
int unit;
p = end - 1;
*p = '\0';
unit = base;
do {
if (p == begin)
return -EINVAL;
*--p = 'a' + (index % unit);
index = (index / unit) - 1;
} while (index >= 0);
memmove(begin, p, end - p);
memcpy(buf, prefix, strlen(prefix));
return 0;
}
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
u8 writeback;
int err;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
struct virtio_blk_config, wce,
&writeback);
/*
* If WCE is not configurable and flush is not available,
* assume no writeback cache is in use.
*/
if (err)
writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
return writeback;
}
static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
u8 writeback = virtblk_get_cache_mode(vdev);
struct virtio_blk *vblk = vdev->priv;
blk_queue_write_cache(vblk->disk->queue, writeback, false);
}
static const char *const virtblk_cache_types[] = {
"write through", "write back"
};
static ssize_t
cache_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
i = sysfs_match_string(virtblk_cache_types, buf);
if (i < 0)
return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
return count;
}
static ssize_t
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
u8 writeback = virtblk_get_cache_mode(vblk->vdev);
BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
}
static DEVICE_ATTR_RW(cache_type);
static struct attribute *virtblk_attrs[] = {
&dev_attr_serial.attr,
&dev_attr_cache_type.attr,
NULL,
};
static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
if (a == &dev_attr_cache_type.attr &&
!virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
return S_IRUGO;
return a->mode;
}
static const struct attribute_group virtblk_attr_group = {
.attrs = virtblk_attrs,
.is_visible = virtblk_attrs_are_visible,
};
static const struct attribute_group *virtblk_attr_groups[] = {
&virtblk_attr_group,
NULL,
};
static void virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
int i, qoff;
for (i = 0, qoff = 0; i < set->nr_maps; i++) {
struct blk_mq_queue_map *map = &set->map[i];
map->nr_queues = vblk->io_queues[i];
map->queue_offset = qoff;
qoff += map->nr_queues;
if (map->nr_queues == 0)
continue;
/*
* Regular queues have interrupts and hence CPU affinity is
* defined by the core virtio code, but polling queues have
* no interrupts so we let the block layer assign CPU affinity.
*/
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
}
}
static void virtblk_complete_batch(struct io_comp_batch *iob)
{
struct request *req;
rq_list_for_each(&iob->req_list, req) {
virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
virtblk_cleanup_cmd(req);
}
blk_mq_end_request_batch(iob);
}
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
int found = 0;
spin_lock_irqsave(&vq->lock, flags);
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
!blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
virtblk_complete_batch))
virtblk_request_done(req);
}
if (found)
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
spin_unlock_irqrestore(&vq->lock, flags);
return found;
}
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
.poll = virtblk_poll,
};
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
int err, index;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0;
u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
err = ida_alloc_range(&vd_index_ida, 0,
minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
if (err < 0)
goto out;
index = err;
/* We need to know how many segments before we allocate. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
struct virtio_blk_config, seg_max,
&sg_elems);
/* We need at least one SG element, whatever they say. */
if (err || !sg_elems)
sg_elems = 1;
/* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
}
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev;
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
err = init_vq(vblk);
if (err)
goto out_free_vblk;
/* Default queue sizing is to fill the ring. */
if (!virtblk_queue_depth) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
queue_depth /= 2;
} else {
queue_depth = virtblk_queue_depth;
}
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
vblk->tag_set.nr_maps = 1;
if (vblk->io_queues[HCTX_TYPE_POLL])
vblk->tag_set.nr_maps = 3;
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
goto out_free_vq;
vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
if (IS_ERR(vblk->disk)) {
err = PTR_ERR(vblk->disk);
goto out_free_tags;
}
q = vblk->disk->queue;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
vblk->disk->major = major;
vblk->disk->first_minor = index_to_minor(index);
vblk->disk->minors = 1 << PART_BITS;
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->index = index;
/* configure queue flush support */
virtblk_update_cache_mode(vdev);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
/* We can handle whatever the host told us to handle. */
blk_queue_max_segments(q, sg_elems);
/* No real sector limit. */
blk_queue_max_hw_sectors(q, UINT_MAX);
max_size = virtio_max_dma_size(vdev);
/* Host can optionally specify maximum segment size and number of
* segments. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
struct virtio_blk_config, size_max, &v);
if (!err)
max_size = min(max_size, v);
blk_queue_max_segment_size(q, max_size);
/* Host can optionally specify the block size of the device */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
&blk_size);
if (!err) {
err = blk_validate_block_size(blk_size);
if (err) {
dev_err(&vdev->dev,
"virtio_blk: invalid block size: 0x%x\n",
blk_size);
goto out_cleanup_disk;
}
blk_queue_logical_block_size(q, blk_size);
} else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
&physical_block_exp);
if (!err && physical_block_exp)
blk_queue_physical_block_size(q,
blk_size * (1 << physical_block_exp));
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, alignment_offset,
&alignment_offset);
if (!err && alignment_offset)
blk_queue_alignment_offset(q, blk_size * alignment_offset);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, min_io_size,
&min_io_size);
if (!err && min_io_size)
blk_queue_io_min(q, blk_size * min_io_size);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, opt_io_size,
&opt_io_size);
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
discard_sector_alignment, &discard_granularity);
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
&max_discard_segs);
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
virtio_cread(vdev, struct virtio_blk_config,
max_write_zeroes_sectors, &v);
blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
}
/* The discard and secure erase limits are combined since the Linux
* block layer uses the same limit for both commands.
*
* If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
* are negotiated, we will use the minimum between the limits.
*
* discard sector alignment is set to the minimum between discard_sector_alignment
* and secure_erase_sector_alignment.
*
* max discard sectors is set to the minimum between max_discard_seg and
* max_secure_erase_seg.
*/
if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
virtio_cread(vdev, struct virtio_blk_config,
secure_erase_sector_alignment, &v);
/* secure_erase_sector_alignment should not be zero, the device should set a
* valid number of sectors.
*/
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: secure_erase_sector_alignment can't be 0\n");
err = -EINVAL;
goto out_cleanup_disk;
}
discard_granularity = min_not_zero(discard_granularity, v);
virtio_cread(vdev, struct virtio_blk_config,
max_secure_erase_sectors, &v);
/* max_secure_erase_sectors should not be zero, the device should set a
* valid number of sectors.
*/
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_sectors can't be 0\n");
err = -EINVAL;
goto out_cleanup_disk;
}
blk_queue_max_secure_erase_sectors(q, v);
virtio_cread(vdev, struct virtio_blk_config,
max_secure_erase_seg, &v);
/* max_secure_erase_seg should not be zero, the device should set a
* valid number of segments
*/
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_seg can't be 0\n");
err = -EINVAL;
goto out_cleanup_disk;
}
max_discard_segs = min_not_zero(max_discard_segs, v);
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
/* max_discard_seg and discard_granularity will be 0 only
* if max_discard_seg and discard_sector_alignment fields in the virtio
* config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
* In this case, we use default values.
*/
if (!max_discard_segs)
max_discard_segs = sg_elems;
blk_queue_max_discard_segments(q,
min(max_discard_segs, MAX_DISCARD_SEGMENTS));
if (discard_granularity)
q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
else
q->limits.discard_granularity = blk_size;
}
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
/*
* All steps that follow use the VQs therefore they need to be
* placed after the virtio_device_ready() call above.
*/
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
err = virtblk_probe_zoned_device(vdev, vblk, q);
if (err)
goto out_cleanup_disk;
}
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
return 0;
out_cleanup_disk:
put_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
out_free_vblk:
kfree(vblk);
out_free_index:
ida_free(&vd_index_ida, index);
out:
return err;
}
static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
/* Stop all the virtqueues. */
virtio_reset_device(vdev);
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
mutex_unlock(&vblk->vdev_mutex);
put_disk(vblk->disk);
}
#ifdef CONFIG_PM_SLEEP
static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
blk_mq_quiesce_queue(vblk->disk->queue);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
return 0;
}
static int virtblk_restore(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int ret;
ret = init_vq(vdev->priv);
if (ret)
return ret;
virtio_device_ready(vdev);
blk_mq_unquiesce_queue(vblk->disk->queue);
return 0;
}
#endif
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
VIRTIO_BLK_F_SECURE_ERASE,
}
;
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
};
static struct virtio_driver virtio_blk = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtblk_probe,
.remove = virtblk_remove,
.config_changed = virtblk_config_changed,
#ifdef CONFIG_PM_SLEEP
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
};
static int __init virtio_blk_init(void)
{
int error;
virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
if (!virtblk_wq)
return -ENOMEM;
major = register_blkdev(0, "virtblk");
if (major < 0) {
error = major;
goto out_destroy_workqueue;
}
error = register_virtio_driver(&virtio_blk);
if (error)
goto out_unregister_blkdev;
return 0;
out_unregister_blkdev:
unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
destroy_workqueue(virtblk_wq);
return error;
}
static void __exit virtio_blk_fini(void)
{
unregister_virtio_driver(&virtio_blk);
unregister_blkdev(major, "virtblk");
destroy_workqueue(virtblk_wq);
}
module_init(virtio_blk_init);
module_exit(virtio_blk_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/block/virtio_blk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Network block device - make block devices work over TCP
*
* Note that you can not swap over this thing, yet. Seems to work but
* deadlocks sometimes - you can not swap over TCP in general.
*
* Copyright 1997-2000, 2008 Pavel Machek <[email protected]>
* Parts copyright 2001 Steven Whitehouse <[email protected]>
*
* (part of code stolen from loop.c)
*/
#define pr_fmt(fmt) "nbd: " fmt
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/ioctl.h>
#include <linux/mutex.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/net.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/blk-mq.h>
#include <linux/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
#include <linux/nbd-netlink.h>
#include <net/genetlink.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nbd.h>
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
static int nbd_total_devices = 0;
struct nbd_sock {
struct socket *sock;
struct mutex tx_lock;
struct request *pending;
int sent;
bool dead;
int fallback_index;
int cookie;
};
struct recv_thread_args {
struct work_struct work;
struct nbd_device *nbd;
int index;
};
struct link_dead_args {
struct work_struct work;
int index;
};
#define NBD_RT_TIMEDOUT 0
#define NBD_RT_DISCONNECT_REQUESTED 1
#define NBD_RT_DISCONNECTED 2
#define NBD_RT_HAS_PID_FILE 3
#define NBD_RT_HAS_CONFIG_REF 4
#define NBD_RT_BOUND 5
#define NBD_RT_DISCONNECT_ON_CLOSE 6
#define NBD_RT_HAS_BACKEND_FILE 7
#define NBD_DESTROY_ON_DISCONNECT 0
#define NBD_DISCONNECT_REQUESTED 1
struct nbd_config {
u32 flags;
unsigned long runtime_flags;
u64 dead_conn_timeout;
struct nbd_sock **socks;
int num_connections;
atomic_t live_connections;
wait_queue_head_t conn_wait;
atomic_t recv_threads;
wait_queue_head_t recv_wq;
unsigned int blksize_bits;
loff_t bytesize;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
#endif
};
static inline unsigned int nbd_blksize(struct nbd_config *config)
{
return 1u << config->blksize_bits;
}
struct nbd_device {
struct blk_mq_tag_set tag_set;
int index;
refcount_t config_refs;
refcount_t refs;
struct nbd_config *config;
struct mutex config_lock;
struct gendisk *disk;
struct workqueue_struct *recv_workq;
struct work_struct remove_work;
struct list_head list;
struct task_struct *task_setup;
unsigned long flags;
pid_t pid; /* pid of nbd-client, if attached */
char *backend;
};
#define NBD_CMD_REQUEUED 1
/*
* This flag will be set if nbd_queue_rq() succeed, and will be checked and
* cleared in completion. Both setting and clearing of the flag are protected
* by cmd->lock.
*/
#define NBD_CMD_INFLIGHT 2
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
int index;
int cookie;
int retries;
blk_status_t status;
unsigned long flags;
u32 cmd_cookie;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
static struct dentry *nbd_dbg_dir;
#endif
#define nbd_name(nbd) ((nbd)->disk->disk_name)
#define NBD_DEF_BLKSIZE_BITS 10
static unsigned int nbds_max = 16;
static int max_part = 16;
static int part_shift;
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
static void nbd_config_put(struct nbd_device *nbd);
static void nbd_connect_reply(struct genl_info *info, int index);
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
static void nbd_dead_link_work(struct work_struct *work);
static void nbd_disconnect_and_put(struct nbd_device *nbd);
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
return disk_to_dev(nbd->disk);
}
static void nbd_requeue_cmd(struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
blk_mq_requeue_request(req, true);
}
#define NBD_COOKIE_BITS 32
static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
u32 tag = blk_mq_unique_tag(req);
u64 cookie = cmd->cmd_cookie;
return (cookie << NBD_COOKIE_BITS) | tag;
}
static u32 nbd_handle_to_tag(u64 handle)
{
return (u32)handle;
}
static u32 nbd_handle_to_cookie(u64 handle)
{
return (u32)(handle >> NBD_COOKIE_BITS);
}
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
case NBD_CMD_READ: return "read";
case NBD_CMD_WRITE: return "write";
case NBD_CMD_DISC: return "disconnect";
case NBD_CMD_FLUSH: return "flush";
case NBD_CMD_TRIM: return "trim/discard";
}
return "invalid";
}
static ssize_t pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
return sprintf(buf, "%d\n", nbd->pid);
}
static const struct device_attribute pid_attr = {
.attr = { .name = "pid", .mode = 0444},
.show = pid_show,
};
static ssize_t backend_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
return sprintf(buf, "%s\n", nbd->backend ?: "");
}
static const struct device_attribute backend_attr = {
.attr = { .name = "backend", .mode = 0444},
.show = backend_show,
};
static void nbd_dev_remove(struct nbd_device *nbd)
{
struct gendisk *disk = nbd->disk;
del_gendisk(disk);
put_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set);
/*
* Remove from idr after del_gendisk() completes, so if the same ID is
* reused, the following add_disk() will succeed.
*/
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex);
destroy_workqueue(nbd->recv_workq);
kfree(nbd);
}
static void nbd_dev_remove_work(struct work_struct *work)
{
nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
}
static void nbd_put(struct nbd_device *nbd)
{
if (!refcount_dec_and_test(&nbd->refs))
return;
/* Call del_gendisk() asynchrounously to prevent deadlock */
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
queue_work(nbd_del_wq, &nbd->remove_work);
else
nbd_dev_remove(nbd);
}
static int nbd_disconnected(struct nbd_config *config)
{
return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
}
static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
int notify)
{
if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
struct link_dead_args *args;
args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
queue_work(system_wq, &args->work);
}
}
if (!nsock->dead) {
kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
if (atomic_dec_return(&nbd->config->live_connections) == 0) {
if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
&nbd->config->runtime_flags)) {
set_bit(NBD_RT_DISCONNECTED,
&nbd->config->runtime_flags);
dev_info(nbd_to_dev(nbd),
"Disconnected due to user request.\n");
}
}
}
nsock->dead = true;
nsock->pending = NULL;
nsock->sent = 0;
}
static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
if (blk_validate_block_size(blksize))
return -EINVAL;
if (bytesize < 0)
return -EINVAL;
nbd->config->bytesize = bytesize;
nbd->config->blksize_bits = __ffs(blksize);
if (!nbd->pid)
return 0;
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
nbd->disk->queue->limits.discard_granularity = blksize;
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
}
blk_queue_logical_block_size(nbd->disk->queue, blksize);
blk_queue_physical_block_size(nbd->disk->queue, blksize);
if (max_part)
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
return 0;
}
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
cmd->status ? "failed" : "done");
blk_mq_end_request(req, cmd->status);
}
/*
* Forcibly shutdown the socket causing all listeners to error
*/
static void sock_shutdown(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
int i;
if (config->num_connections == 0)
return;
if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return;
for (i = 0; i < config->num_connections; i++) {
struct nbd_sock *nsock = config->socks[i];
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 0);
mutex_unlock(&nsock->tx_lock);
}
dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
}
static u32 req_to_nbd_cmd_type(struct request *req)
{
switch (req_op(req)) {
case REQ_OP_DISCARD:
return NBD_CMD_TRIM;
case REQ_OP_FLUSH:
return NBD_CMD_FLUSH;
case REQ_OP_WRITE:
return NBD_CMD_WRITE;
case REQ_OP_READ:
return NBD_CMD_READ;
default:
return U32_MAX;
}
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
goto done;
}
config = nbd->config;
if (config->num_connections > 1 ||
(config->num_connections == 1 && nbd->tag_set.timeout)) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
atomic_read(&config->live_connections),
config->num_connections);
/*
* Hooray we have more connections, requeue this IO, the submit
* path will put it on a real connection. Or if only one
* connection is configured, the submit path will wait util
* a new connection is reconfigured or util dead timeout.
*/
if (config->socks) {
if (cmd->index < config->num_connections) {
struct nbd_sock *nsock =
config->socks[cmd->index];
mutex_lock(&nsock->tx_lock);
/* We can have multiple outstanding requests, so
* we don't want to mark the nsock dead if we've
* already reconnected with a new socket, so
* only mark it dead if its the same socket we
* were sent out on.
*/
if (cmd->cookie == nsock->cookie)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
mutex_unlock(&cmd->lock);
nbd_requeue_cmd(cmd);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
}
if (!nbd->tag_set.timeout) {
/*
* Userspace sets timeout=0 to disable socket disconnection,
* so just warn and reset the timer.
*/
struct nbd_sock *nsock = config->socks[cmd->index];
cmd->retries++;
dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
(unsigned long long)blk_rq_pos(req) << 9,
blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
mutex_lock(&nsock->tx_lock);
if (cmd->cookie != nsock->cookie) {
nbd_requeue_cmd(cmd);
mutex_unlock(&nsock->tx_lock);
mutex_unlock(&cmd->lock);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
mutex_unlock(&nsock->tx_lock);
mutex_unlock(&cmd->lock);
nbd_config_put(nbd);
return BLK_EH_RESET_TIMER;
}
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
done:
blk_mq_complete_request(req);
return BLK_EH_DONE;
}
/*
* Send or receive packet. Return a positive value on success and
* negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
int result;
struct msghdr msg;
unsigned int noreclaim_flag;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
}
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
sock->sk->sk_use_task_frag = false;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send)
result = sock_sendmsg(sock, &msg);
else
result = sock_recvmsg(sock, &msg, msg.msg_flags);
if (result <= 0) {
if (result == 0)
result = -EPIPE; /* short read */
break;
}
if (sent)
*sent += result;
} while (msg_data_left(&msg));
memalloc_noreclaim_restore(noreclaim_flag);
return result;
}
/*
* Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons?
*/
static inline int was_interrupted(int result)
{
return result == -ERESTARTSYS || result == -EINTR;
}
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
struct nbd_sock *nsock = config->socks[index];
int result;
struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
struct iov_iter from;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u64 handle;
u32 type;
u32 nbd_cmd_flags = 0;
int sent = nsock->sent, skip = 0;
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
type = req_to_nbd_cmd_type(req);
if (type == U32_MAX)
return -EIO;
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
return -EIO;
}
if (req->cmd_flags & REQ_FUA)
nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
/* We did a partial send previously, and we at least sent the whole
* request struct, so just go and send the rest of the pages in the
* request.
*/
if (sent) {
if (sent >= sizeof(request)) {
skip = sent - sizeof(request);
/* initialize handle for tracing purposes */
handle = nbd_cmd_handle(cmd);
goto send_pages;
}
iov_iter_advance(&from, sent);
} else {
cmd->cmd_cookie++;
}
cmd->index = index;
cmd->cookie = nsock->cookie;
cmd->retries = 0;
request.type = htonl(type | nbd_cmd_flags);
if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
handle = nbd_cmd_handle(cmd);
request.cookie = cpu_to_be64(handle);
trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
trace_nbd_header_sent(req, handle);
if (result < 0) {
if (was_interrupted(result)) {
/* If we haven't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
* completely done.
*/
if (sent) {
nsock->pending = req;
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
return -EAGAIN;
}
send_pages:
if (type != NBD_CMD_WRITE)
goto out;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
struct bvec_iter iter;
struct bio_vec bvec;
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
int flags = is_last ? 0 : MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
req, bvec.bv_len);
iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
if (skip) {
if (skip >= iov_iter_count(&from)) {
skip -= iov_iter_count(&from);
continue;
}
iov_iter_advance(&from, skip);
skip = 0;
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
* return BUSY.
*/
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
return -EAGAIN;
}
/*
* The completion might already have come in,
* so break for the last one instead of letting
* the iterator do it. This prevents use-after-free
* of the bio.
*/
if (is_last)
break;
}
bio = next;
}
out:
trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
return 0;
}
static int nbd_read_reply(struct nbd_device *nbd, int index,
struct nbd_reply *reply)
{
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
struct iov_iter to;
int result;
reply->magic = 0;
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result < 0) {
if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
return result;
}
if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply->magic));
return -EPROTO;
}
return 0;
}
/* NULL returned = something went wrong, inform userspace */
static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
struct nbd_reply *reply)
{
int result;
struct nbd_cmd *cmd;
struct request *req = NULL;
u64 handle;
u16 hwq;
u32 tag;
int ret = 0;
handle = be64_to_cpu(reply->cookie);
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
blk_mq_unique_tag_to_tag(tag));
if (!req || !blk_mq_request_started(req)) {
dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
tag, req);
return ERR_PTR(-ENOENT);
}
trace_nbd_header_received(req, handle);
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
tag, cmd->status, cmd->flags);
ret = -ENOENT;
goto out;
}
if (cmd->index != index) {
dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
tag, index, cmd->index);
ret = -ENOENT;
goto out;
}
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
ret = -ENOENT;
goto out;
}
if (cmd->status != BLK_STS_OK) {
dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
req);
ret = -ENOENT;
goto out;
}
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
req);
ret = -ENOENT;
goto out;
}
if (ntohl(reply->error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply->error));
cmd->status = BLK_STS_IOERR;
goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
struct iov_iter to;
rq_for_each_segment(bvec, req, iter) {
iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result < 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
/*
* If we've disconnected, we need to make sure we
* complete this request, otherwise error out
* and let the timeout stuff handle resubmitting
* this request onto another connection.
*/
if (nbd_disconnected(nbd->config)) {
cmd->status = BLK_STS_IOERR;
goto out;
}
ret = -EIO;
goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
req, bvec.bv_len);
}
}
out:
trace_nbd_payload_received(req, handle);
mutex_unlock(&cmd->lock);
return ret ? ERR_PTR(ret) : cmd;
}
static void recv_work(struct work_struct *work)
{
struct recv_thread_args *args = container_of(work,
struct recv_thread_args,
work);
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue;
struct nbd_sock *nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
struct nbd_reply reply;
if (nbd_read_reply(nbd, args->index, &reply))
break;
/*
* Grab .q_usage_counter so request pool won't go away, then no
* request use-after-free is possible during nbd_handle_reply().
* If queue is frozen, there won't be any inflight requests, we
* needn't to handle the incoming garbage message.
*/
if (!percpu_ref_tryget(&q->q_usage_counter)) {
dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
__func__);
break;
}
cmd = nbd_handle_reply(nbd, args->index, &reply);
if (IS_ERR(cmd)) {
percpu_ref_put(&q->q_usage_counter);
break;
}
rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q))) {
bool complete;
mutex_lock(&cmd->lock);
complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
&cmd->flags);
mutex_unlock(&cmd->lock);
if (complete)
blk_mq_complete_request(rq);
}
percpu_ref_put(&q->q_usage_counter);
}
nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
kfree(args);
}
static bool nbd_clear_req(struct request *req, void *data)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
/* don't abort one completed request */
if (blk_mq_request_completed(req))
return true;
mutex_lock(&cmd->lock);
if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return true;
}
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
blk_mq_complete_request(req);
return true;
}
static void nbd_clear_que(struct nbd_device *nbd)
{
blk_mq_quiesce_queue(nbd->disk->queue);
blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
blk_mq_unquiesce_queue(nbd->disk->queue);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
static int find_fallback(struct nbd_device *nbd, int index)
{
struct nbd_config *config = nbd->config;
int new_index = -1;
struct nbd_sock *nsock = config->socks[index];
int fallback = nsock->fallback_index;
if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return new_index;
if (config->num_connections <= 1) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Dead connection, failed to find a fallback\n");
return new_index;
}
if (fallback >= 0 && fallback < config->num_connections &&
!config->socks[fallback]->dead)
return fallback;
if (nsock->fallback_index < 0 ||
nsock->fallback_index >= config->num_connections ||
config->socks[nsock->fallback_index]->dead) {
int i;
for (i = 0; i < config->num_connections; i++) {
if (i == index)
continue;
if (!config->socks[i]->dead) {
new_index = i;
break;
}
}
nsock->fallback_index = new_index;
if (new_index < 0) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Dead connection, failed to find a fallback\n");
return new_index;
}
}
new_index = nsock->fallback_index;
return new_index;
}
static int wait_for_reconnect(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout)
return 0;
if (!wait_event_timeout(config->conn_wait,
test_bit(NBD_RT_DISCONNECTED,
&config->runtime_flags) ||
atomic_read(&config->live_connections) > 0,
config->dead_conn_timeout))
return 0;
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
int ret;
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
return -EINVAL;
}
config = nbd->config;
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
again:
nsock = config->socks[index];
mutex_lock(&nsock->tx_lock);
if (nsock->dead) {
int old_index = index;
index = find_fallback(nbd, index);
mutex_unlock(&nsock->tx_lock);
if (index < 0) {
if (wait_for_reconnect(nbd)) {
index = old_index;
goto again;
}
/* All the sockets should already be down at this point,
* we just want to make sure that DISCONNECTED is set so
* any requests that come in that were queue'ed waiting
* for the reconnect timer don't trigger the timer again
* and instead just error out.
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
return -EIO;
}
goto again;
}
/* Handle the case that we have a pending request that was partially
* transmitted that _has_ to be serviced first. We need to call requeue
* here so that it gets put _after_ the request that is already on the
* dispatch list.
*/
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
nbd_requeue_cmd(cmd);
ret = 0;
goto out;
}
/*
* Some failures are related to the link going down, so anything that
* returns EAGAIN can be retried on a different socket.
*/
ret = nbd_send_cmd(nbd, cmd, index);
/*
* Access to this flag is protected by cmd->lock, thus it's safe to set
* the flag after nbd_send_cmd() succeed to send request to server.
*/
if (!ret)
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
else if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
nbd_requeue_cmd(cmd);
ret = 0;
}
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
return ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
int ret;
/*
* Since we look at the bio's to send the request over the network we
* need to make sure the completion work doesn't mark this request done
* before we are done doing our send. This keeps us from dereferencing
* freed data if we have particularly fast completions (ie we get the
* completion before we exit sock_xmit on the last bvec) or in the case
* that the server is misbehaving (or there was an error) before we're
* done sending everything over the wire.
*/
mutex_lock(&cmd->lock);
clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
* this case we need to return that we are busy, otherwise error out as
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
if (ret < 0)
ret = BLK_STS_IOERR;
else if (!ret)
ret = BLK_STS_OK;
mutex_unlock(&cmd->lock);
return ret;
}
static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
int *err)
{
struct socket *sock;
*err = 0;
sock = sockfd_lookup(fd, err);
if (!sock)
return NULL;
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
sockfd_put(sock);
return NULL;
}
return sock;
}
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
bool netlink)
{
struct nbd_config *config = nbd->config;
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
int err;
/* Arg will be cast to int, check it to avoid overflow */
if (arg > INT_MAX)
return -EINVAL;
sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
/*
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
if (!netlink &&
(nbd->task_setup != current ||
test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
dev_err(disk_to_dev(nbd->disk),
"Device being setup by another task");
err = -EBUSY;
goto put_socket;
}
nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
if (!nsock) {
err = -ENOMEM;
goto put_socket;
}
socks = krealloc(config->socks, (config->num_connections + 1) *
sizeof(struct nbd_sock *), GFP_KERNEL);
if (!socks) {
kfree(nsock);
err = -ENOMEM;
goto put_socket;
}
config->socks = socks;
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
nsock->sock = sock;
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}
static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
{
struct nbd_config *config = nbd->config;
struct socket *sock, *old;
struct recv_thread_args *args;
int i;
int err;
sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args) {
sockfd_put(sock);
return -ENOMEM;
}
for (i = 0; i < config->num_connections; i++) {
struct nbd_sock *nsock = config->socks[i];
if (!nsock->dead)
continue;
mutex_lock(&nsock->tx_lock);
if (!nsock->dead) {
mutex_unlock(&nsock->tx_lock);
continue;
}
sk_set_memalloc(sock->sk);
if (nbd->tag_set.timeout)
sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
nsock->fallback_index = -1;
nsock->sock = sock;
nsock->dead = false;
INIT_WORK(&args->work, recv_work);
args->index = i;
args->nbd = nbd;
nsock->cookie++;
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
queue_work(nbd->recv_workq, &args->work);
atomic_inc(&config->live_connections);
wake_up(&config->conn_wait);
return 0;
}
sockfd_put(sock);
kfree(args);
return -ENOSPC;
}
static void nbd_bdev_reset(struct nbd_device *nbd)
{
if (disk_openers(nbd->disk) > 1)
return;
set_capacity(nbd->disk, 0);
}
static void nbd_parse_flags(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
if (config->flags & NBD_FLAG_READ_ONLY)
set_disk_ro(nbd->disk, true);
else
set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_FLUSH) {
if (config->flags & NBD_FLAG_SEND_FUA)
blk_queue_write_cache(nbd->disk->queue, true, true);
else
blk_queue_write_cache(nbd->disk->queue, true, false);
}
else
blk_queue_write_cache(nbd->disk->queue, false, false);
}
static void send_disconnects(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
struct nbd_request request = {
.magic = htonl(NBD_REQUEST_MAGIC),
.type = htonl(NBD_CMD_DISC),
};
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
struct iov_iter from;
int i, ret;
for (i = 0; i < config->num_connections; i++) {
struct nbd_sock *nsock = config->socks[i];
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret < 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
mutex_unlock(&nsock->tx_lock);
}
}
static int nbd_disconnect(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
send_disconnects(nbd);
return 0;
}
static void nbd_clear_sock(struct nbd_device *nbd)
{
sock_shutdown(nbd);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
}
static void nbd_config_put(struct nbd_device *nbd)
{
if (refcount_dec_and_mutex_lock(&nbd->config_refs,
&nbd->config_lock)) {
struct nbd_config *config = nbd->config;
nbd_dev_dbg_close(nbd);
invalidate_disk(nbd->disk);
if (nbd->config->bytesize)
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->pid = 0;
if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
&config->runtime_flags)) {
device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
kfree(nbd->backend);
nbd->backend = NULL;
}
nbd_clear_sock(nbd);
if (config->num_connections) {
int i;
for (i = 0; i < config->num_connections; i++) {
sockfd_put(config->socks[i]->sock);
kfree(config->socks[i]);
}
kfree(config->socks);
}
kfree(nbd->config);
nbd->config = NULL;
nbd->tag_set.timeout = 0;
nbd->disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(nbd->disk->queue, 0);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
module_put(THIS_MODULE);
}
}
static int nbd_start_device(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
int num_connections = config->num_connections;
int error = 0, i;
if (nbd->pid)
return -EBUSY;
if (!config->socks)
return -EINVAL;
if (num_connections > 1 &&
!(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
return -EINVAL;
}
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (error) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
return error;
}
set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
nbd_dev_dbg_init(nbd);
for (i = 0; i < num_connections; i++) {
struct recv_thread_args *args;
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args) {
sock_shutdown(nbd);
/*
* If num_connections is m (2 < m),
* and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
* But NO.(n + 1) failed. We still have n recv threads.
* So, add flush_workqueue here to prevent recv threads
* dropping the last config_refs and trying to destroy
* the workqueue from inside the workqueue.
*/
if (i)
flush_workqueue(nbd->recv_workq);
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
if (nbd->tag_set.timeout)
config->socks[i]->sock->sk->sk_sndtimeo =
nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
args->nbd = nbd;
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
}
static int nbd_start_device_ioctl(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
int ret;
ret = nbd_start_device(nbd);
if (ret)
return ret;
if (max_part)
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
if (ret) {
sock_shutdown(nbd);
nbd_clear_que(nbd);
}
flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(nbd);
/* user requested, ignore socket errors */
if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
ret = 0;
if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
ret = -ETIMEDOUT;
return ret;
}
static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
{
blk_mark_disk_dead(nbd->disk);
nbd_clear_sock(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
}
static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
{
nbd->tag_set.timeout = timeout * HZ;
if (timeout)
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
else
blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
}
/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
struct nbd_config *config = nbd->config;
loff_t bytesize;
switch (cmd) {
case NBD_DISCONNECT:
return nbd_disconnect(nbd);
case NBD_CLEAR_SOCK:
nbd_clear_sock_ioctl(nbd);
return 0;
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);
case NBD_SET_BLKSIZE:
return nbd_set_size(nbd, config->bytesize, arg);
case NBD_SET_SIZE:
return nbd_set_size(nbd, arg, nbd_blksize(config));
case NBD_SET_SIZE_BLOCKS:
if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
return -EINVAL;
return nbd_set_size(nbd, bytesize, nbd_blksize(config));
case NBD_SET_TIMEOUT:
nbd_set_cmd_timeout(nbd, arg);
return 0;
case NBD_SET_FLAGS:
config->flags = arg;
return 0;
case NBD_DO_IT:
return nbd_start_device_ioctl(nbd);
case NBD_CLEAR_QUE:
/*
* This is for compatibility only. The queue is always cleared
* by NBD_DO_IT or NBD_CLEAR_SOCK.
*/
return 0;
case NBD_PRINT_DEBUG:
/*
* For compatibility only, we no longer keep a list of
* outstanding requests.
*/
return 0;
}
return -ENOTTY;
}
static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nbd_device *nbd = bdev->bd_disk->private_data;
struct nbd_config *config = nbd->config;
int error = -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* The block layer will pass back some non-nbd ioctls in case we have
* special handling for them, but we don't so just return an error.
*/
if (_IOC_TYPE(cmd) != 0xab)
return -EINVAL;
mutex_lock(&nbd->config_lock);
/* Don't allow ioctl operations on a nbd device that was created with
* netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
*/
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
(cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
error = __nbd_ioctl(bdev, nbd, cmd, arg);
else
dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
mutex_unlock(&nbd->config_lock);
return error;
}
static struct nbd_config *nbd_alloc_config(void)
{
struct nbd_config *config;
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENODEV);
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config) {
module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM);
}
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
return config;
}
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct nbd_device *nbd;
int ret = 0;
mutex_lock(&nbd_index_mutex);
nbd = disk->private_data;
if (!nbd) {
ret = -ENXIO;
goto out;
}
if (!refcount_inc_not_zero(&nbd->refs)) {
ret = -ENXIO;
goto out;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
struct nbd_config *config;
mutex_lock(&nbd->config_lock);
if (refcount_inc_not_zero(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock);
goto out;
}
config = nbd_alloc_config();
if (IS_ERR(config)) {
ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock);
goto out;
}
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
} else if (nbd_disconnected(nbd->config)) {
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
}
out:
mutex_unlock(&nbd_index_mutex);
return ret;
}
static void nbd_release(struct gendisk *disk)
{
struct nbd_device *nbd = disk->private_data;
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
disk_openers(disk) == 0)
nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
nbd_put(nbd);
}
static const struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
.open = nbd_open,
.release = nbd_release,
.ioctl = nbd_ioctl,
.compat_ioctl = nbd_ioctl,
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
if (nbd->pid)
seq_printf(s, "recv: %d\n", nbd->pid);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
u32 flags = nbd->config->flags;
seq_printf(s, "Hex: 0x%08x\n\n", flags);
seq_puts(s, "Known flags:\n");
if (flags & NBD_FLAG_HAS_FLAGS)
seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
if (flags & NBD_FLAG_READ_ONLY)
seq_puts(s, "NBD_FLAG_READ_ONLY\n");
if (flags & NBD_FLAG_SEND_FLUSH)
seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
if (flags & NBD_FLAG_SEND_FUA)
seq_puts(s, "NBD_FLAG_SEND_FUA\n");
if (flags & NBD_FLAG_SEND_TRIM)
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
struct dentry *dir;
struct nbd_config *config = nbd->config;
if (!nbd_dbg_dir)
return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
if (IS_ERR(dir)) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
}
config->dbg_dir = dir;
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
return 0;
}
static void nbd_dev_dbg_close(struct nbd_device *nbd)
{
debugfs_remove_recursive(nbd->config->dbg_dir);
}
static int nbd_dbg_init(void)
{
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
if (IS_ERR(dbg_dir))
return -EIO;
nbd_dbg_dir = dbg_dir;
return 0;
}
static void nbd_dbg_close(void)
{
debugfs_remove_recursive(nbd_dbg_dir);
}
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
return 0;
}
static void nbd_dev_dbg_close(struct nbd_device *nbd)
{
}
static int nbd_dbg_init(void)
{
return 0;
}
static void nbd_dbg_close(void)
{
}
#endif
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = set->driver_data;
cmd->flags = 0;
mutex_init(&cmd->lock);
return 0;
}
static const struct blk_mq_ops nbd_mq_ops = {
.queue_rq = nbd_queue_rq,
.complete = nbd_complete_rq,
.init_request = nbd_init_request,
.timeout = nbd_xmit_timeout,
};
static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
struct nbd_device *nbd;
struct gendisk *disk;
int err = -ENOMEM;
nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
if (!nbd)
goto out;
nbd->tag_set.ops = &nbd_mq_ops;
nbd->tag_set.nr_hw_queues = 1;
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
err = blk_mq_alloc_tag_set(&nbd->tag_set);
if (err)
goto out_free_nbd;
mutex_lock(&nbd_index_mutex);
if (index >= 0) {
err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
GFP_KERNEL);
if (err == -ENOSPC)
err = -EEXIST;
} else {
err = idr_alloc(&nbd_index_idr, nbd, 0,
(MINORMASK >> part_shift) + 1, GFP_KERNEL);
if (err >= 0)
index = err;
}
nbd->index = index;
mutex_unlock(&nbd_index_mutex);
if (err < 0)
goto out_free_tags;
disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_free_idr;
}
nbd->disk = disk;
nbd->recv_workq = alloc_workqueue("nbd%d-recv",
WQ_MEM_RECLAIM | WQ_HIGHPRI |
WQ_UNBOUND, 0, nbd->index);
if (!nbd->recv_workq) {
dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
err = -ENOMEM;
goto out_err_disk;
}
/*
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
blk_queue_max_segments(disk->queue, USHRT_MAX);
blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256;
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
/*
* Start out with a zero references to keep other threads from using
* this device until it is fully initialized.
*/
refcount_set(&nbd->refs, 0);
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
disk->first_minor = index << part_shift;
disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
err = add_disk(disk);
if (err)
goto out_free_work;
/*
* Now publish the device.
*/
refcount_set(&nbd->refs, refs);
nbd_total_devices++;
return nbd;
out_free_work:
destroy_workqueue(nbd->recv_workq);
out_err_disk:
put_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
mutex_unlock(&nbd_index_mutex);
out_free_tags:
blk_mq_free_tag_set(&nbd->tag_set);
out_free_nbd:
kfree(nbd);
out:
return ERR_PTR(err);
}
static struct nbd_device *nbd_find_get_unused(void)
{
struct nbd_device *nbd;
int id;
lockdep_assert_held(&nbd_index_mutex);
idr_for_each_entry(&nbd_index_idr, nbd, id) {
if (refcount_read(&nbd->config_refs) ||
test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
continue;
if (refcount_inc_not_zero(&nbd->refs))
return nbd;
}
return NULL;
}
/* Netlink interface. */
static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_INDEX] = { .type = NLA_U32 },
[NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
[NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
[NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
[NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
[NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
[NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
[NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
[NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
[NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
};
static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
[NBD_SOCK_FD] = { .type = NLA_U32 },
};
/* We don't use this right now since we don't parse the incoming list, but we
* still want it here so userspace knows what to expect.
*/
static const struct nla_policy __attribute__((unused))
nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
[NBD_DEVICE_INDEX] = { .type = NLA_U32 },
[NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
};
static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
u64 bsize = nbd_blksize(config);
u64 bytes = config->bytesize;
if (info->attrs[NBD_ATTR_SIZE_BYTES])
bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
if (bytes != config->bytesize || bsize != nbd_blksize(config))
return nbd_set_size(nbd, bytes, bsize);
return 0;
}
static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
{
struct nbd_device *nbd;
struct nbd_config *config;
int index = -1;
int ret;
bool put_dev = false;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
if (info->attrs[NBD_ATTR_INDEX]) {
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
/*
* Too big first_minor can cause duplicate creation of
* sysfs files/links, since index << part_shift might overflow, or
* MKDEV() expect that the max bits of first_minor is 20.
*/
if (index < 0 || index > MINORMASK >> part_shift) {
pr_err("illegal input index %d\n", index);
return -EINVAL;
}
}
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
pr_err("must specify at least one socket\n");
return -EINVAL;
}
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
pr_err("must specify a size in bytes for the device\n");
return -EINVAL;
}
again:
mutex_lock(&nbd_index_mutex);
if (index == -1) {
nbd = nbd_find_get_unused();
} else {
nbd = idr_find(&nbd_index_idr, index);
if (nbd) {
if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
pr_err("device at index %d is going down\n",
index);
return -EINVAL;
}
}
}
mutex_unlock(&nbd_index_mutex);
if (!nbd) {
nbd = nbd_dev_add(index, 2);
if (IS_ERR(nbd)) {
pr_err("failed to add new device\n");
return PTR_ERR(nbd);
}
}
mutex_lock(&nbd->config_lock);
if (refcount_read(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
if (index == -1)
goto again;
pr_err("nbd%d already in use\n", index);
return -EBUSY;
}
if (WARN_ON(nbd->config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
return -EINVAL;
}
config = nbd_alloc_config();
if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
pr_err("couldn't allocate config\n");
return PTR_ERR(config);
}
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd);
if (ret)
goto out;
if (info->attrs[NBD_ATTR_TIMEOUT])
nbd_set_cmd_timeout(nbd,
nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
config->dead_conn_timeout =
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
config->dead_conn_timeout *= HZ;
}
if (info->attrs[NBD_ATTR_SERVER_FLAGS])
config->flags =
nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
/*
* We have 1 ref to keep the device around, and then 1
* ref for our current operation here, which will be
* inherited by the config. If we already have
* DESTROY_ON_DISCONNECT set then we know we don't have
* that extra ref already held so we don't need the
* put_dev.
*/
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
&nbd->flags))
put_dev = true;
} else {
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
&nbd->flags))
refcount_inc(&nbd->refs);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags);
}
}
if (info->attrs[NBD_ATTR_SOCKETS]) {
struct nlattr *attr;
int rem, fd;
nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
rem) {
struct nlattr *socks[NBD_SOCK_MAX+1];
if (nla_type(attr) != NBD_SOCK_ITEM) {
pr_err("socks must be embedded in a SOCK_ITEM attr\n");
ret = -EINVAL;
goto out;
}
ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
attr,
nbd_sock_policy,
info->extack);
if (ret != 0) {
pr_err("error processing sock list\n");
ret = -EINVAL;
goto out;
}
if (!socks[NBD_SOCK_FD])
continue;
fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
ret = nbd_add_socket(nbd, fd, true);
if (ret)
goto out;
}
}
ret = nbd_start_device(nbd);
if (ret)
goto out;
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
GFP_KERNEL);
if (!nbd->backend) {
ret = -ENOMEM;
goto out;
}
}
ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk),
"device_create_file failed for backend!\n");
goto out;
}
set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
out:
mutex_unlock(&nbd->config_lock);
if (!ret) {
set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index);
}
nbd_config_put(nbd);
if (put_dev)
nbd_put(nbd);
return ret;
}
static void nbd_disconnect_and_put(struct nbd_device *nbd)
{
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
sock_shutdown(nbd);
wake_up(&nbd->config->conn_wait);
/*
* Make sure recv thread has finished, we can safely call nbd_clear_que()
* to cancel the inflight I/Os.
*/
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
}
static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct nbd_device *nbd;
int index;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
pr_err("must specify an index to disconnect\n");
return -EINVAL;
}
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
mutex_lock(&nbd_index_mutex);
nbd = idr_find(&nbd_index_idr, index);
if (!nbd) {
mutex_unlock(&nbd_index_mutex);
pr_err("couldn't find device at index %d\n", index);
return -EINVAL;
}
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
pr_err("device at index %d is going down\n", index);
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
if (!refcount_inc_not_zero(&nbd->config_refs))
goto put_nbd;
nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
put_nbd:
nbd_put(nbd);
return 0;
}
static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
{
struct nbd_device *nbd = NULL;
struct nbd_config *config;
int index;
int ret = 0;
bool put_dev = false;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
pr_err("must specify a device to reconfigure\n");
return -EINVAL;
}
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
mutex_lock(&nbd_index_mutex);
nbd = idr_find(&nbd_index_idr, index);
if (!nbd) {
mutex_unlock(&nbd_index_mutex);
pr_err("couldn't find a device at index %d\n", index);
return -EINVAL;
}
if (nbd->backend) {
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
nbd->backend)) {
mutex_unlock(&nbd_index_mutex);
dev_err(nbd_to_dev(nbd),
"backend image doesn't match with %s\n",
nbd->backend);
return -EINVAL;
}
} else {
mutex_unlock(&nbd_index_mutex);
dev_err(nbd_to_dev(nbd), "must specify backend\n");
return -EINVAL;
}
}
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
pr_err("device at index %d is going down\n", index);
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
nbd_put(nbd);
return -EINVAL;
}
mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->pid) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
ret = -EINVAL;
goto out;
}
ret = nbd_genl_size_set(info, nbd);
if (ret)
goto out;
if (info->attrs[NBD_ATTR_TIMEOUT])
nbd_set_cmd_timeout(nbd,
nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
config->dead_conn_timeout =
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
config->dead_conn_timeout *= HZ;
}
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
&nbd->flags))
put_dev = true;
} else {
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
&nbd->flags))
refcount_inc(&nbd->refs);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags);
} else {
clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags);
}
}
if (info->attrs[NBD_ATTR_SOCKETS]) {
struct nlattr *attr;
int rem, fd;
nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
rem) {
struct nlattr *socks[NBD_SOCK_MAX+1];
if (nla_type(attr) != NBD_SOCK_ITEM) {
pr_err("socks must be embedded in a SOCK_ITEM attr\n");
ret = -EINVAL;
goto out;
}
ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
attr,
nbd_sock_policy,
info->extack);
if (ret != 0) {
pr_err("error processing sock list\n");
ret = -EINVAL;
goto out;
}
if (!socks[NBD_SOCK_FD])
continue;
fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
ret = nbd_reconnect_socket(nbd, fd);
if (ret) {
if (ret == -ENOSPC)
ret = 0;
goto out;
}
dev_info(nbd_to_dev(nbd), "reconnected socket\n");
}
}
out:
mutex_unlock(&nbd->config_lock);
nbd_config_put(nbd);
nbd_put(nbd);
if (put_dev)
nbd_put(nbd);
return ret;
}
static const struct genl_small_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_connect,
},
{
.cmd = NBD_CMD_DISCONNECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_disconnect,
},
{
.cmd = NBD_CMD_RECONFIGURE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_reconfigure,
},
{
.cmd = NBD_CMD_STATUS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_status,
},
};
static const struct genl_multicast_group nbd_mcast_grps[] = {
{ .name = NBD_GENL_MCAST_GROUP_NAME, },
};
static struct genl_family nbd_genl_family __ro_after_init = {
.hdrsize = 0,
.name = NBD_GENL_FAMILY_NAME,
.version = NBD_GENL_VERSION,
.module = THIS_MODULE,
.small_ops = nbd_connect_genl_ops,
.n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.resv_start_op = NBD_CMD_STATUS + 1,
.maxattr = NBD_ATTR_MAX,
.netnsok = 1,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
.n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
};
MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
{
struct nlattr *dev_opt;
u8 connected = 0;
int ret;
/* This is a little racey, but for status it's ok. The
* reason we don't take a ref here is because we can't
* take a ref in the index == -1 case as we would need
* to put under the nbd_index_mutex, which could
* deadlock if we are configured to remove ourselves
* once we're disconnected.
*/
if (refcount_read(&nbd->config_refs))
connected = 1;
dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
if (!dev_opt)
return -EMSGSIZE;
ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
if (ret)
return -EMSGSIZE;
ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
connected);
if (ret)
return -EMSGSIZE;
nla_nest_end(reply, dev_opt);
return 0;
}
static int status_cb(int id, void *ptr, void *data)
{
struct nbd_device *nbd = ptr;
return populate_nbd_status(nbd, (struct sk_buff *)data);
}
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *dev_list;
struct sk_buff *reply;
void *reply_head;
size_t msg_size;
int index = -1;
int ret = -ENOMEM;
if (info->attrs[NBD_ATTR_INDEX])
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
mutex_lock(&nbd_index_mutex);
msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
nla_attr_size(sizeof(u8)));
msg_size *= (index == -1) ? nbd_total_devices : 1;
reply = genlmsg_new(msg_size, GFP_KERNEL);
if (!reply)
goto out;
reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
NBD_CMD_STATUS);
if (!reply_head) {
nlmsg_free(reply);
goto out;
}
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
nlmsg_free(reply);
goto out;
}
} else {
struct nbd_device *nbd;
nbd = idr_find(&nbd_index_idr, index);
if (nbd) {
ret = populate_nbd_status(nbd, reply);
if (ret) {
nlmsg_free(reply);
goto out;
}
}
}
nla_nest_end(reply, dev_list);
genlmsg_end(reply, reply_head);
ret = genlmsg_reply(reply, info);
out:
mutex_unlock(&nbd_index_mutex);
return ret;
}
static void nbd_connect_reply(struct genl_info *info, int index)
{
struct sk_buff *skb;
void *msg_head;
int ret;
skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
if (!skb)
return;
msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
NBD_CMD_CONNECT);
if (!msg_head) {
nlmsg_free(skb);
return;
}
ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
if (ret) {
nlmsg_free(skb);
return;
}
genlmsg_end(skb, msg_head);
genlmsg_reply(skb, info);
}
static void nbd_mcast_index(int index)
{
struct sk_buff *skb;
void *msg_head;
int ret;
skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
if (!skb)
return;
msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
NBD_CMD_LINK_DEAD);
if (!msg_head) {
nlmsg_free(skb);
return;
}
ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
if (ret) {
nlmsg_free(skb);
return;
}
genlmsg_end(skb, msg_head);
genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
}
static void nbd_dead_link_work(struct work_struct *work)
{
struct link_dead_args *args = container_of(work, struct link_dead_args,
work);
nbd_mcast_index(args->index);
kfree(args);
}
static int __init nbd_init(void)
{
int i;
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
if (max_part < 0) {
pr_err("max_part must be >= 0\n");
return -EINVAL;
}
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
/*
* Adjust max_part according to part_shift as it is exported
* to user space so that user can know the max number of
* partition kernel should be able to manage.
*
* Note that -1 is required because partition 0 is reserved
* for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
if ((1UL << part_shift) > DISK_MAX_PARTS)
return -EINVAL;
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (register_blkdev(NBD_MAJOR, "nbd"))
return -EIO;
nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
if (!nbd_del_wq) {
unregister_blkdev(NBD_MAJOR, "nbd");
return -ENOMEM;
}
if (genl_register_family(&nbd_genl_family)) {
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
}
nbd_dbg_init();
for (i = 0; i < nbds_max; i++)
nbd_dev_add(i, 1);
return 0;
}
static int nbd_exit_cb(int id, void *ptr, void *data)
{
struct list_head *list = (struct list_head *)data;
struct nbd_device *nbd = ptr;
/* Skip nbd that is being removed asynchronously */
if (refcount_read(&nbd->refs))
list_add_tail(&nbd->list, list);
return 0;
}
static void __exit nbd_cleanup(void)
{
struct nbd_device *nbd;
LIST_HEAD(del_list);
/*
* Unregister netlink interface prior to waiting
* for the completion of netlink commands.
*/
genl_unregister_family(&nbd_genl_family);
nbd_dbg_close();
mutex_lock(&nbd_index_mutex);
idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
mutex_unlock(&nbd_index_mutex);
while (!list_empty(&del_list)) {
nbd = list_first_entry(&del_list, struct nbd_device, list);
list_del_init(&nbd->list);
if (refcount_read(&nbd->config_refs))
pr_err("possibly leaking nbd_config (ref %d)\n",
refcount_read(&nbd->config_refs));
if (refcount_read(&nbd->refs) != 1)
pr_err("possibly leaking a device\n");
nbd_put(nbd);
}
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
idr_destroy(&nbd_index_idr);
unregister_blkdev(NBD_MAJOR, "nbd");
}
module_init(nbd_init);
module_exit(nbd_cleanup);
MODULE_DESCRIPTION("Network Block Device");
MODULE_LICENSE("GPL");
module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
| linux-master | drivers/block/nbd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the SWIM3 (Super Woz Integrated Machine 3)
* floppy controller found on Power Macintoshes.
*
* Copyright (C) 1996 Paul Mackerras.
*/
/*
* TODO:
* handle 2 drives
* handle GCR disks
*/
#undef DEBUG
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/fd.h>
#include <linux/ioctl.h>
#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/major.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#define MAX_FLOPPIES 2
static DEFINE_MUTEX(swim3_mutex);
static struct gendisk *disks[MAX_FLOPPIES];
enum swim_state {
idle,
locating,
seeking,
settling,
do_transfer,
jogging,
available,
revalidating,
ejecting
};
#define REG(x) unsigned char x; char x ## _pad[15];
/*
* The names for these registers mostly represent speculation on my part.
* It will be interesting to see how close they are to the names Apple uses.
*/
struct swim3 {
REG(data);
REG(timer); /* counts down at 1MHz */
REG(error);
REG(mode);
REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */
REG(setup);
REG(control); /* writing bits clears them */
REG(status); /* writing bits sets them in control */
REG(intr);
REG(nseek); /* # tracks to seek */
REG(ctrack); /* current track number */
REG(csect); /* current sector number */
REG(gap3); /* size of gap 3 in track format */
REG(sector); /* sector # to read or write */
REG(nsect); /* # sectors to read or write */
REG(intr_enable);
};
#define control_bic control
#define control_bis status
/* Bits in select register */
#define CA_MASK 7
#define LSTRB 8
/* Bits in control register */
#define DO_SEEK 0x80
#define FORMAT 0x40
#define SELECT 0x20
#define WRITE_SECTORS 0x10
#define DO_ACTION 0x08
#define DRIVE2_ENABLE 0x04
#define DRIVE_ENABLE 0x02
#define INTR_ENABLE 0x01
/* Bits in status register */
#define FIFO_1BYTE 0x80
#define FIFO_2BYTE 0x40
#define ERROR 0x20
#define DATA 0x08
#define RDDATA 0x04
#define INTR_PENDING 0x02
#define MARK_BYTE 0x01
/* Bits in intr and intr_enable registers */
#define ERROR_INTR 0x20
#define DATA_CHANGED 0x10
#define TRANSFER_DONE 0x08
#define SEEN_SECTOR 0x04
#define SEEK_DONE 0x02
#define TIMER_DONE 0x01
/* Bits in error register */
#define ERR_DATA_CRC 0x80
#define ERR_ADDR_CRC 0x40
#define ERR_OVERRUN 0x04
#define ERR_UNDERRUN 0x01
/* Bits in setup register */
#define S_SW_RESET 0x80
#define S_GCR_WRITE 0x40
#define S_IBM_DRIVE 0x20
#define S_TEST_MODE 0x10
#define S_FCLK_DIV2 0x08
#define S_GCR 0x04
#define S_COPY_PROT 0x02
#define S_INV_WDATA 0x01
/* Select values for swim3_action */
#define SEEK_POSITIVE 0
#define SEEK_NEGATIVE 4
#define STEP 1
#define MOTOR_ON 2
#define MOTOR_OFF 6
#define INDEX 3
#define EJECT 7
#define SETMFM 9
#define SETGCR 13
/* Select values for swim3_select and swim3_readbit */
#define STEP_DIR 0
#define STEPPING 1
#define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4
#define ONEMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7
#define DISK_IN 8
#define WRITE_PROT 9
#define TRACK_ZERO 10
#define TACHO 11
#define READ_DATA_1 12
#define GCR_MODE 13
#define SEEK_COMPLETE 14
#define TWOMEG_MEDIA 15
/* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99
#define GCR_SYNC_EXC 0x3f
#define GCR_SYNC_CONV 0x80
#define GCR_FIRST_MARK 0xd5
#define GCR_SECOND_MARK 0xaa
#define GCR_ADDR_MARK "\xd5\xaa\x00"
#define GCR_DATA_MARK "\xd5\xaa\x0b"
#define GCR_SLIP_BYTE "\x27\xaa"
#define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f"
#define DATA_99 "\x99\x99"
#define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
#define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
#define MFM_GAP_LEN 12
struct floppy_state {
enum swim_state state;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
int dma_intr; /* interrupt number for DMA channel */
int cur_cyl; /* cylinder head is on, or -1 */
int cur_sector; /* last sector we saw go past */
int req_cyl; /* the cylinder for the current r/w request */
int head; /* head number ditto */
int req_sector; /* sector number ditto */
int scount; /* # sectors we're transferring at present */
int retries;
int settle_time;
int secpercyl; /* disk geometry information */
int secpertrack;
int total_secs;
int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
struct dbdma_cmd *dma_cmd;
int ref_count;
int expect_cyl;
struct timer_list timeout;
int timeout_pending;
int ejected;
wait_queue_head_t wait;
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
int index;
struct request *cur_req;
struct blk_mq_tag_set tag_set;
};
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#ifdef DEBUG
#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#else
#define swim3_dbg(fmt, arg...) do { } while(0)
#endif
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
static unsigned short write_preamble[] = {
0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
0, 0, 0, 0, 0, 0, /* sync field */
0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */
0x990f /* no escape for 512 bytes */
};
static unsigned short write_postamble[] = {
0x9904, /* insert CRC */
0x4e4e, 0x4e4e,
0x9908, /* stop writing */
0, 0, 0, 0, 0, 0
};
static void seek_track(struct floppy_state *fs, int n);
static void act(struct floppy_state *fs);
static void scan_timeout(struct timer_list *t);
static void seek_timeout(struct timer_list *t);
static void settle_timeout(struct timer_list *t);
static void xfer_timeout(struct timer_list *t);
static irqreturn_t swim3_interrupt(int irq, void *dev_id);
/*static void fd_dma_interrupt(int irq, void *dev_id);*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible);
static void release_drive(struct floppy_state *fs);
static int fd_eject(struct floppy_state *fs);
static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param);
static int floppy_open(struct gendisk *disk, blk_mode_t mode);
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
struct request *req = fs->cur_req;
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
err, nr_bytes, req);
if (err)
nr_bytes = blk_rq_cur_bytes(req);
if (blk_update_request(req, err, nr_bytes))
return true;
__blk_mq_end_request(req, err);
fs->cur_req = NULL;
return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
out_8(&sw->select, RELAX);
if (sel & 8)
out_8(&sw->control_bis, SELECT);
else
out_8(&sw->control_bic, SELECT);
out_8(&sw->select, sel & CA_MASK);
}
static void swim3_action(struct floppy_state *fs, int action)
{
struct swim3 __iomem *sw = fs->swim3;
swim3_select(fs, action);
udelay(1);
out_8(&sw->select, sw->select | LSTRB);
udelay(2);
out_8(&sw->select, sw->select & ~LSTRB);
udelay(1);
}
static int swim3_readbit(struct floppy_state *fs, int bit)
{
struct swim3 __iomem *sw = fs->swim3;
int stat;
swim3_select(fs, bit);
udelay(1);
stat = in_8(&sw->status);
return (stat & DATA) == 0;
}
static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct floppy_state *fs = hctx->queue->queuedata;
struct request *req = bd->rq;
unsigned long x;
spin_lock_irq(&swim3_lock);
if (fs->cur_req || fs->state != idle) {
spin_unlock_irq(&swim3_lock);
return BLK_STS_DEV_RESOURCE;
}
blk_mq_start_request(req);
fs->cur_req = req;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD) {
swim3_dbg("%s", " media bay absent, dropping req\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
goto out;
}
if (fs->ejected) {
swim3_dbg("%s", " disk ejected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
goto out;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
goto out;
}
}
/*
* Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
* 64 bits, but it will never go past 32 bits for this driver anyway, so
* we can safely cast it down and not have to do a 64/32 division
*/
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fs->state = do_transfer;
fs->retries = 0;
act(fs);
out:
spin_unlock_irq(&swim3_lock);
return BLK_STS_OK;
}
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(struct timer_list *t))
{
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
static inline void scan_track(struct floppy_state *fs)
{
struct swim3 __iomem *sw = fs->swim3;
swim3_select(fs, READ_DATA_0);
in_8(&sw->intr); /* clear SEEN_SECTOR bit */
in_8(&sw->error);
out_8(&sw->intr_enable, SEEN_SECTOR);
out_8(&sw->control_bis, DO_ACTION);
/* enable intr when track found */
set_timeout(fs, HZ, scan_timeout); /* enable timeout */
}
static inline void seek_track(struct floppy_state *fs, int n)
{
struct swim3 __iomem *sw = fs->swim3;
if (n >= 0) {
swim3_action(fs, SEEK_POSITIVE);
sw->nseek = n;
} else {
swim3_action(fs, SEEK_NEGATIVE);
sw->nseek = -n;
}
fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
swim3_select(fs, STEP);
in_8(&sw->error);
/* enable intr when seek finished */
out_8(&sw->intr_enable, SEEK_DONE);
out_8(&sw->control_bis, DO_SEEK);
set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */
fs->settle_time = 0;
}
/*
* XXX: this is a horrible hack, but at least allows ppc32 to get
* out of defining virt_to_bus, and this driver out of using the
* deprecated block layer bounce buffering for highmem addresses
* for no good reason.
*/
static unsigned long swim3_phys_to_bus(phys_addr_t paddr)
{
return paddr + PCI_DRAM_OFFSET;
}
static phys_addr_t swim3_bio_phys(struct bio *bio)
{
return page_to_phys(bio_page(bio)) + bio_offset(bio);
}
static inline void init_dma(struct dbdma_cmd *cp, int cmd,
phys_addr_t paddr, int count)
{
cp->req_count = cpu_to_le16(count);
cp->command = cpu_to_le16(cmd);
cp->phy_addr = cpu_to_le32(swim3_phys_to_bus(paddr));
cp->xfer_status = 0;
}
static inline void setup_transfer(struct floppy_state *fs)
{
int n;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
struct request *req = fs->cur_req;
if (blk_rq_cur_sectors(req) <= 0) {
swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
if (n > blk_rq_cur_sectors(req))
n = blk_rq_cur_sectors(req);
}
swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
fs->req_sector, fs->secpertrack, fs->head, n);
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, swim3_phys_to_bus(virt_to_phys(cp)));
if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, virt_to_phys(write_preamble),
sizeof(write_preamble));
++cp;
init_dma(cp, OUTPUT_MORE, swim3_bio_phys(req->bio), 512);
++cp;
init_dma(cp, OUTPUT_LAST, virt_to_phys(write_postamble),
sizeof(write_postamble));
} else {
init_dma(cp, INPUT_LAST, swim3_bio_phys(req->bio), n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
/* enable intr when transfer complete */
out_8(&sw->intr_enable, TRANSFER_DONE);
out_8(&sw->control_bis, DO_ACTION);
set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */
}
static void act(struct floppy_state *fs)
{
for (;;) {
swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
fs->state, fs->req_cyl, fs->cur_cyl);
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
else
fs->state = seeking;
break;
}
scan_track(fs);
return;
case seeking:
if (fs->cur_cyl < 0) {
fs->expect_cyl = -1;
fs->state = locating;
break;
}
if (fs->req_cyl == fs->cur_cyl) {
swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
seek_track(fs, fs->req_cyl - fs->cur_cyl);
return;
case settling:
/* check for SEEK_COMPLETE after 30ms */
fs->settle_time = (HZ + 32) / 33;
set_timeout(fs, fs->settle_time, settle_timeout);
return;
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
fs->req_cyl, fs->cur_cyl);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
return;
}
fs->state = seeking;
break;
}
setup_transfer(fs);
return;
case jogging:
seek_track(fs, -5);
return;
default:
swim3_err("Unknown state %d\n", fs->state);
return;
}
}
}
static void scan_timeout(struct timer_list *t)
{
struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* scan timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
} else {
fs->state = jogging;
act(fs);
}
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(struct timer_list *t)
{
struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* seek timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
swim3_err("%s", "Seek timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(struct timer_list *t)
{
struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* settle timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
goto unlock;
}
swim3_err("%s", "Seek settle timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
unlock:
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(struct timer_list *t)
{
struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
int n;
swim3_dbg("* xfer timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
udelay(1);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
swim3_err("Timeout %sing sector %ld\n",
(rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fs->cur_req));
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
struct floppy_state *fs = (struct floppy_state *) dev_id;
struct swim3 __iomem *sw = fs->swim3;
int intr, err, n;
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
unsigned long flags;
struct request *req = fs->cur_req;
swim3_dbg("* interrupt, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
} else {
fs->state = jogging;
act(fs);
}
break;
}
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
swim3_err("Expected cyl %d, got %d\n",
fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
break;
case seeking:
case jogging:
if (sw->nseek == 0) {
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (fs->state == seeking)
++fs->retries;
fs->state = settling;
act(fs);
}
break;
case settling:
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
act(fs);
break;
case do_transfer:
if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
break;
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
* On writing, the swim3 sometimes doesn't use
* up all the bytes of the postamble, so we can still
* see DMA active here. That doesn't matter as long
* as all the sector data has been transferred.
*/
if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
/* wait a little while for DMA to complete */
for (n = 0; n < 100; ++n) {
if (cp->xfer_status != 0)
break;
udelay(1);
barrier();
}
}
/* turn off DMA */
out_le32(&dr->control, (RUN | PAUSE) << 16);
stat = le16_to_cpu(cp->xfer_status);
resid = le16_to_cpu(cp->res_count);
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
swim3_err("Error %sing block %ld (err=%x)\n",
rq_data_dir(req) == WRITE? "writ": "read",
(long)blk_rq_pos(req), err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
break;
}
fs->retries = 0;
if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
if (++fs->head > 1) {
fs->head = 0;
++fs->req_cyl;
}
}
act(fs);
} else
fs->state = idle;
}
break;
default:
swim3_err("Don't know what to do in state %d\n", fs->state);
}
spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
/*
static void fd_dma_interrupt(int irq, void *dev_id)
{
}
*/
/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
swim3_dbg("%s", "-> grab drive\n");
spin_lock_irqsave(&swim3_lock, flags);
if (fs->state != idle && fs->state != available) {
++fs->wanted;
/* this will enable irqs in order to sleep */
if (!interruptible)
wait_event_lock_irq(fs->wait,
fs->state == available,
swim3_lock);
else if (wait_event_interruptible_lock_irq(fs->wait,
fs->state == available,
swim3_lock)) {
--fs->wanted;
spin_unlock_irqrestore(&swim3_lock, flags);
return -EINTR;
}
--fs->wanted;
}
fs->state = state;
spin_unlock_irqrestore(&swim3_lock, flags);
return 0;
}
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
}
static int fd_eject(struct floppy_state *fs)
{
int err, n;
err = grab_drive(fs, ejecting, 1);
if (err)
return err;
swim3_action(fs, EJECT);
for (n = 20; n > 0; --n) {
if (signal_pending(current)) {
err = -EINTR;
break;
}
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
if (swim3_readbit(fs, DISK_IN) == 0)
break;
}
swim3_select(fs, RELAX);
udelay(150);
fs->ejected = 1;
release_drive(fs);
return err;
}
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
static int floppy_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
int err;
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
switch (cmd) {
case FDEJECT:
if (fs->ref_count != 1)
return -EBUSY;
err = fd_eject(fs);
return err;
case FDGETPRM:
if (copy_to_user((void __user *) param, &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
return 0;
}
return -ENOTTY;
}
static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&swim3_mutex);
ret = floppy_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&swim3_mutex);
return ret;
}
static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
int n, err = 0;
if (fs->ref_count == 0) {
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
out_8(&sw->control_bic, 0xff);
out_8(&sw->mode, 0x95);
udelay(10);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
swim3_action(fs, MOTOR_ON);
fs->write_prot = -1;
fs->cur_cyl = -1;
for (n = 0; n < 2 * HZ; ++n) {
if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
}
if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
|| swim3_readbit(fs, DISK_IN) == 0))
err = -ENXIO;
swim3_action(fs, SETMFM);
swim3_select(fs, RELAX);
} else if (fs->ref_count == -1 || mode & BLK_OPEN_EXCL)
return -EBUSY;
if (err == 0 && !(mode & BLK_OPEN_NDELAY) &&
(mode & (BLK_OPEN_READ | BLK_OPEN_WRITE))) {
if (disk_check_media_change(disk))
floppy_revalidate(disk);
if (fs->ejected)
err = -ENXIO;
}
if (err == 0 && (mode & BLK_OPEN_WRITE)) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot)
err = -EROFS;
}
if (err) {
if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
swim3_select(fs, RELAX);
}
return err;
}
if (mode & BLK_OPEN_EXCL)
fs->ref_count = -1;
else
++fs->ref_count;
return 0;
}
static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
mutex_lock(&swim3_mutex);
ret = floppy_open(disk, mode);
mutex_unlock(&swim3_mutex);
return ret;
}
static void floppy_release(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0)
--fs->ref_count;
else if (fs->ref_count == -1)
fs->ref_count = 0;
if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
}
mutex_unlock(&swim3_mutex);
}
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct floppy_state *fs = disk->private_data;
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int floppy_revalidate(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw;
int ret, n;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
sw = fs->swim3;
grab_drive(fs, revalidating, 0);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bis, DRIVE_ENABLE);
swim3_action(fs, MOTOR_ON); /* necessary? */
fs->write_prot = -1;
fs->cur_cyl = -1;
mdelay(1);
for (n = HZ; n > 0; --n) {
if (swim3_readbit(fs, SEEK_COMPLETE))
break;
if (signal_pending(current))
break;
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
}
ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
|| swim3_readbit(fs, DISK_IN) == 0;
if (ret)
swim3_action(fs, MOTOR_OFF);
else {
fs->ejected = 0;
swim3_action(fs, SETMFM);
}
swim3_select(fs, RELAX);
release_drive(fs);
return ret;
}
static const struct block_device_operations floppy_fops = {
.open = floppy_unlocked_open,
.release = floppy_release,
.ioctl = floppy_ioctl,
.check_events = floppy_check_events,
};
static const struct blk_mq_ops swim3_mq_ops = {
.queue_rq = swim3_queue_rq,
};
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
struct swim3 __iomem *sw;
if (!fs)
return;
sw = fs->swim3;
if (mb_state != MB_FD)
return;
/* Clear state */
out_8(&sw->intr_enable, 0);
in_8(&sw->intr);
in_8(&sw->error);
}
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
fs->mdev = mdev;
fs->index = index;
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
if (macio_irq_count(mdev) < 1) {
swim3_err("%s", "No interrupt in device-tree\n");
return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
dev_set_drvdata(&mdev->ofdev.dev, fs);
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
}
fs->swim3_intr = macio_irq(mdev, 0);
fs->dma_intr = macio_irq(mdev, 1);
fs->cur_cyl = -1;
fs->cur_sector = -1;
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
swim3_mb_event(mdev, MB_FD);
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
}
timer_setup(&fs->timeout, NULL, 0);
swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
out_unmap:
iounmap(fs->dma);
iounmap(fs->swim3);
out_release:
macio_release_resource(mdev, 0);
macio_release_resource(mdev, 1);
return rc;
}
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
struct floppy_state *fs;
struct gendisk *disk;
int rc;
if (floppy_count >= MAX_FLOPPIES)
return -ENXIO;
if (floppy_count == 0) {
rc = register_blkdev(FLOPPY_MAJOR, "fd");
if (rc)
return rc;
}
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (rc)
goto out_unregister;
disk = blk_mq_alloc_disk(&fs->tag_set, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;
}
rc = swim3_add_device(mdev, floppy_count);
if (rc)
goto out_cleanup_disk;
disk->major = FLOPPY_MAJOR;
disk->first_minor = floppy_count;
disk->minors = 1;
disk->fops = &floppy_fops;
disk->private_data = fs;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
rc = add_disk(disk);
if (rc)
goto out_cleanup_disk;
disks[floppy_count++] = disk;
return 0;
out_cleanup_disk:
put_disk(disk);
out_free_tag_set:
blk_mq_free_tag_set(&fs->tag_set);
out_unregister:
if (floppy_count == 0)
unregister_blkdev(FLOPPY_MAJOR, "fd");
return rc;
}
static const struct of_device_id swim3_match[] =
{
{
.name = "swim3",
},
{
.compatible = "ohare-swim3"
},
{
.compatible = "swim3"
},
{ /* end of list */ }
};
static struct macio_driver swim3_driver =
{
.driver = {
.name = "swim3",
.of_match_table = swim3_match,
},
.probe = swim3_attach,
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = swim3_mb_event,
#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
#endif
};
static int swim3_init(void)
{
macio_register_driver(&swim3_driver);
return 0;
}
module_init(swim3_init)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul Mackerras");
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| linux-master | drivers/block/swim3.c |
/*
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
* 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
*
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
*/
#define KMSG_COMPONENT "zram"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include <linux/part_stat.h>
#include "zram_drv.h"
static DEFINE_IDR(zram_index_idr);
/* idr index must be protected */
static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
* Pages that compress to sizes equals or greater than this are stored
* uncompressed in memory.
*/
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent);
static int zram_slot_trylock(struct zram *zram, u32 index)
{
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
static inline bool init_done(struct zram *zram)
{
return zram->disksize;
}
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
}
static unsigned long zram_get_handle(struct zram *zram, u32 index)
{
return zram->table[index].handle;
}
static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
{
zram->table[index].handle = handle;
}
/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
return zram->table[index].flags & BIT(flag);
}
static void zram_set_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
zram->table[index].flags |= BIT(flag);
}
static void zram_clear_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
zram->table[index].flags &= ~BIT(flag);
}
static inline void zram_set_element(struct zram *zram, u32 index,
unsigned long element)
{
zram->table[index].element = element;
}
static unsigned long zram_get_element(struct zram *zram, u32 index)
{
return zram->table[index].element;
}
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
}
static void zram_set_obj_size(struct zram *zram,
u32 index, size_t size)
{
unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
}
static inline bool zram_allocated(struct zram *zram, u32 index)
{
return zram_get_obj_size(zram, index) ||
zram_test_flag(zram, index, ZRAM_SAME) ||
zram_test_flag(zram, index, ZRAM_WB);
}
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
#define ZRAM_PARTIAL_IO 1
#else
static inline bool is_partial_io(struct bio_vec *bvec)
{
return false;
}
#endif
static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
{
prio &= ZRAM_COMP_PRIORITY_MASK;
/*
* Clear previous priority value first, in case if we recompress
* further an already recompressed page
*/
zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
ZRAM_COMP_PRIORITY_BIT1);
zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
}
static inline u32 zram_get_priority(struct zram *zram, u32 index)
{
u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
return prio & ZRAM_COMP_PRIORITY_MASK;
}
static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
do {
if (cur_max >= pages)
return;
} while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
&cur_max, pages));
}
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
memset_l(ptr, value, len / sizeof(unsigned long));
}
static bool page_same_filled(void *ptr, unsigned long *element)
{
unsigned long *page;
unsigned long val;
unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
page = (unsigned long *)ptr;
val = page[0];
if (val != page[last_pos])
return false;
for (pos = 1; pos < last_pos; pos++) {
if (val != page[pos])
return false;
}
*element = val;
return true;
}
static ssize_t initstate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
val = init_done(zram);
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
}
static ssize_t disksize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}
static ssize_t mem_limit_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
u64 limit;
char *tmp;
struct zram *zram = dev_to_zram(dev);
limit = memparse(buf, &tmp);
if (buf == tmp) /* no chars parsed, invalid input */
return -EINVAL;
down_write(&zram->init_lock);
zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
up_write(&zram->init_lock);
return len;
}
static ssize_t mem_used_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int err;
unsigned long val;
struct zram *zram = dev_to_zram(dev);
err = kstrtoul(buf, 10, &val);
if (err || val != 0)
return -EINVAL;
down_read(&zram->init_lock);
if (init_done(zram)) {
atomic_long_set(&zram->stats.max_used_pages,
zs_get_total_pages(zram->mem_pool));
}
up_read(&zram->init_lock);
return len;
}
/*
* Mark all pages which are older than or equal to cutoff as IDLE.
* Callers should hold the zram init lock in read mode
*/
static void mark_idle(struct zram *zram, ktime_t cutoff)
{
int is_idle = 1;
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
int index;
for (index = 0; index < nr_pages; index++) {
/*
* Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
* See the comment in writeback_store.
*/
zram_slot_lock(zram, index);
if (zram_allocated(zram, index) &&
!zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time);
#endif
if (is_idle)
zram_set_flag(zram, index, ZRAM_IDLE);
}
zram_slot_unlock(zram, index);
}
}
static ssize_t idle_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
ktime_t cutoff_time = 0;
ssize_t rv = -EINVAL;
if (!sysfs_streq(buf, "all")) {
/*
* If it did not parse as 'all' try to treat it as an integer
* when we have memory tracking enabled.
*/
u64 age_sec;
if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(buf, 0, &age_sec))
cutoff_time = ktime_sub(ktime_get_boottime(),
ns_to_ktime(age_sec * NSEC_PER_SEC));
else
goto out;
}
down_read(&zram->init_lock);
if (!init_done(zram))
goto out_unlock;
/*
* A cutoff_time of 0 marks everything as idle, this is the
* "all" behavior.
*/
mark_idle(zram, cutoff_time);
rv = len;
out_unlock:
up_read(&zram->init_lock);
out:
return rv;
}
#ifdef CONFIG_ZRAM_WRITEBACK
static ssize_t writeback_limit_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
ssize_t ret = -EINVAL;
if (kstrtoull(buf, 10, &val))
return ret;
down_read(&zram->init_lock);
spin_lock(&zram->wb_limit_lock);
zram->wb_limit_enable = val;
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
spin_lock(&zram->wb_limit_lock);
val = zram->wb_limit_enable;
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
ssize_t ret = -EINVAL;
if (kstrtoull(buf, 10, &val))
return ret;
down_read(&zram->init_lock);
spin_lock(&zram->wb_limit_lock);
zram->bd_wb_limit = val;
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
spin_lock(&zram->wb_limit_lock);
val = zram->bd_wb_limit;
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
static void reset_bdev(struct zram *zram)
{
struct block_device *bdev;
if (!zram->backing_dev)
return;
bdev = zram->bdev;
blkdev_put(bdev, zram);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
zram->bdev = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct file *file;
struct zram *zram = dev_to_zram(dev);
char *p;
ssize_t ret;
down_read(&zram->init_lock);
file = zram->backing_dev;
if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;
}
p = file_path(file, buf, PAGE_SIZE - 1);
if (IS_ERR(p)) {
ret = PTR_ERR(p);
goto out;
}
ret = strlen(p);
memmove(buf, p, ret);
buf[ret++] = '\n';
out:
up_read(&zram->init_lock);
return ret;
}
static ssize_t backing_dev_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
char *file_name;
size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
struct block_device *bdev = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
file_name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!file_name)
return -ENOMEM;
down_write(&zram->init_lock);
if (init_done(zram)) {
pr_info("Can't setup backing device for initialized device\n");
err = -EBUSY;
goto out;
}
strscpy(file_name, buf, PATH_MAX);
/* ignore trailing newline */
sz = strlen(file_name);
if (sz > 0 && file_name[sz - 1] == '\n')
file_name[sz - 1] = 0x00;
backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
if (IS_ERR(backing_dev)) {
err = PTR_ERR(backing_dev);
backing_dev = NULL;
goto out;
}
mapping = backing_dev->f_mapping;
inode = mapping->host;
/* Support only block device in this moment */
if (!S_ISBLK(inode->i_mode)) {
err = -ENOTBLK;
goto out;
}
bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
zram, NULL);
if (IS_ERR(bdev)) {
err = PTR_ERR(bdev);
bdev = NULL;
goto out;
}
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
if (!bitmap) {
err = -ENOMEM;
goto out;
}
reset_bdev(zram);
zram->bdev = bdev;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
kfree(file_name);
return len;
out:
kvfree(bitmap);
if (bdev)
blkdev_put(bdev, zram);
if (backing_dev)
filp_close(backing_dev, NULL);
up_write(&zram->init_lock);
kfree(file_name);
return err;
}
static unsigned long alloc_block_bdev(struct zram *zram)
{
unsigned long blk_idx = 1;
retry:
/* skip 0 bit to confuse zram.handle = 0 */
blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
if (blk_idx == zram->nr_pages)
return 0;
if (test_and_set_bit(blk_idx, zram->bitmap))
goto retry;
atomic64_inc(&zram->stats.bd_count);
return blk_idx;
}
static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
{
int was_set;
was_set = test_and_clear_bit(blk_idx, zram->bitmap);
WARN_ON_ONCE(!was_set);
atomic64_dec(&zram->stats.bd_count);
}
static void read_from_bdev_async(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent)
{
struct bio *bio;
bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
submit_bio(bio);
}
#define PAGE_WB_SIG "page_index="
#define PAGE_WRITEBACK 0
#define HUGE_WRITEBACK (1<<0)
#define IDLE_WRITEBACK (1<<1)
#define INCOMPRESSIBLE_WRITEBACK (1<<2)
static ssize_t writeback_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
unsigned long index = 0;
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
ssize_t ret = len;
int mode, err;
unsigned long blk_idx = 0;
if (sysfs_streq(buf, "idle"))
mode = IDLE_WRITEBACK;
else if (sysfs_streq(buf, "huge"))
mode = HUGE_WRITEBACK;
else if (sysfs_streq(buf, "huge_idle"))
mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
else if (sysfs_streq(buf, "incompressible"))
mode = INCOMPRESSIBLE_WRITEBACK;
else {
if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
return -EINVAL;
if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
index >= nr_pages)
return -EINVAL;
nr_pages = 1;
mode = PAGE_WRITEBACK;
}
down_read(&zram->init_lock);
if (!init_done(zram)) {
ret = -EINVAL;
goto release_init_lock;
}
if (!zram->backing_dev) {
ret = -ENODEV;
goto release_init_lock;
}
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto release_init_lock;
}
for (; nr_pages != 0; index++, nr_pages--) {
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
spin_unlock(&zram->wb_limit_lock);
if (!blk_idx) {
blk_idx = alloc_block_bdev(zram);
if (!blk_idx) {
ret = -ENOSPC;
break;
}
}
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
goto next;
if (zram_test_flag(zram, index, ZRAM_WB) ||
zram_test_flag(zram, index, ZRAM_SAME) ||
zram_test_flag(zram, index, ZRAM_UNDER_WB))
goto next;
if (mode & IDLE_WRITEBACK &&
!zram_test_flag(zram, index, ZRAM_IDLE))
goto next;
if (mode & HUGE_WRITEBACK &&
!zram_test_flag(zram, index, ZRAM_HUGE))
goto next;
if (mode & INCOMPRESSIBLE_WRITEBACK &&
!zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
/*
* Clearing ZRAM_UNDER_WB is duty of caller.
* IOW, zram_free_page never clear it.
*/
zram_set_flag(zram, index, ZRAM_UNDER_WB);
/* Need for hugepage writeback racing */
zram_set_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
if (zram_read_page(zram, page, index, NULL)) {
zram_slot_lock(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
continue;
}
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
/*
* XXX: A single page IO would be inefficient for write
* but it would be not bad as starter.
*/
err = submit_bio_wait(&bio);
if (err) {
zram_slot_lock(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
/*
* BIO errors are not fatal, we continue and simply
* attempt to writeback the remaining objects (pages).
* At the same time we need to signal user-space that
* some writes (at least one, but also could be all of
* them) were not successful and we do so by returning
* the most recent BIO error.
*/
ret = err;
continue;
}
atomic64_inc(&zram->stats.bd_writes);
/*
* We released zram_slot_lock so need to check if the slot was
* changed. If there is freeing for the slot, we can catch it
* easily by zram_allocated.
* A subtle case is the slot is freed/reallocated/marked as
* ZRAM_IDLE again. To close the race, idle_store doesn't
* mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
* Thus, we could close the race by checking ZRAM_IDLE bit.
*/
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index) ||
!zram_test_flag(zram, index, ZRAM_IDLE)) {
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE);
goto next;
}
zram_free_page(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_set_flag(zram, index, ZRAM_WB);
zram_set_element(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
}
if (blk_idx)
free_block_bdev(zram, blk_idx);
__free_page(page);
release_init_lock:
up_read(&zram->init_lock);
return ret;
}
struct zram_work {
struct work_struct work;
struct zram *zram;
unsigned long entry;
struct page *page;
int error;
};
static void zram_sync_read(struct work_struct *work)
{
struct zram_work *zw = container_of(work, struct zram_work, work);
struct bio_vec bv;
struct bio bio;
bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
}
/*
* Block layer want one ->submit_bio to be active at a time, so if we use
* chained IO with parent IO in same context, it's a deadlock. To avoid that,
* use a worker thread context.
*/
static int read_from_bdev_sync(struct zram *zram, struct page *page,
unsigned long entry)
{
struct zram_work work;
work.page = page;
work.zram = zram;
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
queue_work(system_unbound_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
return work.error;
}
static int read_from_bdev(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent)
{
atomic64_inc(&zram->stats.bd_reads);
if (!parent) {
if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
return -EIO;
return read_from_bdev_sync(zram, page, entry);
}
read_from_bdev_async(zram, page, entry, parent);
return 0;
}
#else
static inline void reset_bdev(struct zram *zram) {};
static int read_from_bdev(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent)
{
return -EIO;
}
static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
static struct dentry *zram_debugfs_root;
static void zram_debugfs_create(void)
{
zram_debugfs_root = debugfs_create_dir("zram", NULL);
}
static void zram_debugfs_destroy(void)
{
debugfs_remove_recursive(zram_debugfs_root);
}
static void zram_accessed(struct zram *zram, u32 index)
{
zram_clear_flag(zram, index, ZRAM_IDLE);
zram->table[index].ac_time = ktime_get_boottime();
}
static ssize_t read_block_state(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
char *kbuf;
ssize_t index, written = 0;
struct zram *zram = file->private_data;
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
struct timespec64 ts;
kbuf = kvmalloc(count, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
down_read(&zram->init_lock);
if (!init_done(zram)) {
up_read(&zram->init_lock);
kvfree(kbuf);
return -EINVAL;
}
for (index = *ppos; index < nr_pages; index++) {
int copied;
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
goto next;
ts = ktime_to_timespec64(zram->table[index].ac_time);
copied = snprintf(kbuf + written, count,
"%12zd %12lld.%06lu %c%c%c%c%c%c\n",
index, (s64)ts.tv_sec,
ts.tv_nsec / NSEC_PER_USEC,
zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
zram_get_priority(zram, index) ? 'r' : '.',
zram_test_flag(zram, index,
ZRAM_INCOMPRESSIBLE) ? 'n' : '.');
if (count <= copied) {
zram_slot_unlock(zram, index);
break;
}
written += copied;
count -= copied;
next:
zram_slot_unlock(zram, index);
*ppos += 1;
}
up_read(&zram->init_lock);
if (copy_to_user(buf, kbuf, written))
written = -EFAULT;
kvfree(kbuf);
return written;
}
static const struct file_operations proc_zram_block_state_op = {
.open = simple_open,
.read = read_block_state,
.llseek = default_llseek,
};
static void zram_debugfs_register(struct zram *zram)
{
if (!zram_debugfs_root)
return;
zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
zram_debugfs_root);
debugfs_create_file("block_state", 0400, zram->debugfs_dir,
zram, &proc_zram_block_state_op);
}
static void zram_debugfs_unregister(struct zram *zram)
{
debugfs_remove_recursive(zram->debugfs_dir);
}
#else
static void zram_debugfs_create(void) {};
static void zram_debugfs_destroy(void) {};
static void zram_accessed(struct zram *zram, u32 index)
{
zram_clear_flag(zram, index, ZRAM_IDLE);
};
static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
/*
* We switched to per-cpu streams and this attr is not needed anymore.
* However, we will keep it around for some time, because:
* a) we may revert per-cpu streams in the future
* b) it's visible to user space and we need to follow our 2 years
* retirement rule; but we already have a number of 'soon to be
* altered' attrs, so max_comp_streams need to wait for the next
* layoff cycle.
*/
static ssize_t max_comp_streams_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
}
static ssize_t max_comp_streams_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
return len;
}
static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
{
/* Do not free statically defined compression algorithms */
if (zram->comp_algs[prio] != default_compressor)
kfree(zram->comp_algs[prio]);
zram->comp_algs[prio] = alg;
}
static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
{
ssize_t sz;
down_read(&zram->init_lock);
sz = zcomp_available_show(zram->comp_algs[prio], buf);
up_read(&zram->init_lock);
return sz;
}
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
size_t sz;
sz = strlen(buf);
if (sz >= CRYPTO_MAX_ALG_NAME)
return -E2BIG;
compressor = kstrdup(buf, GFP_KERNEL);
if (!compressor)
return -ENOMEM;
/* ignore trailing newline */
if (sz > 0 && compressor[sz - 1] == '\n')
compressor[sz - 1] = 0x00;
if (!zcomp_available_algorithm(compressor)) {
kfree(compressor);
return -EINVAL;
}
down_write(&zram->init_lock);
if (init_done(zram)) {
up_write(&zram->init_lock);
kfree(compressor);
pr_info("Can't change algorithm for initialized device\n");
return -EBUSY;
}
comp_algorithm_set(zram, prio, compressor);
up_write(&zram->init_lock);
return 0;
}
static ssize_t comp_algorithm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
}
static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct zram *zram = dev_to_zram(dev);
int ret;
ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
return ret ? ret : len;
}
#ifdef CONFIG_ZRAM_MULTI_COMP
static ssize_t recomp_algorithm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
ssize_t sz = 0;
u32 prio;
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
sz += __comp_algorithm_show(zram, prio, buf + sz);
}
return sz;
}
static ssize_t recomp_algorithm_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct zram *zram = dev_to_zram(dev);
int prio = ZRAM_SECONDARY_COMP;
char *args, *param, *val;
char *alg = NULL;
int ret;
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, ¶m, &val);
if (!val || !*val)
return -EINVAL;
if (!strcmp(param, "algo")) {
alg = val;
continue;
}
if (!strcmp(param, "priority")) {
ret = kstrtoint(val, 10, &prio);
if (ret)
return ret;
continue;
}
}
if (!alg)
return -EINVAL;
if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS)
return -EINVAL;
ret = __comp_algorithm_store(zram, prio, alg);
return ret ? ret : len;
}
#endif
static ssize_t compact_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
if (!init_done(zram)) {
up_read(&zram->init_lock);
return -EINVAL;
}
zs_compact(zram->mem_pool);
up_read(&zram->init_lock);
return len;
}
static ssize_t io_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
"%8llu %8llu 0 %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
(u64)atomic64_read(&zram->stats.notify_free));
up_read(&zram->init_lock);
return ret;
}
static ssize_t mm_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
struct zs_pool_stats pool_stats;
u64 orig_size, mem_used = 0;
long max_used;
ssize_t ret;
memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
down_read(&zram->init_lock);
if (init_done(zram)) {
mem_used = zs_get_total_pages(zram->mem_pool);
zs_pool_stats(zram->mem_pool, &pool_stats);
}
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
ret = scnprintf(buf, PAGE_SIZE,
"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages),
atomic_long_read(&pool_stats.pages_compacted),
(u64)atomic64_read(&zram->stats.huge_pages),
(u64)atomic64_read(&zram->stats.huge_pages_since));
up_read(&zram->init_lock);
return ret;
}
#ifdef CONFIG_ZRAM_WRITEBACK
#define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
static ssize_t bd_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
"%8llu %8llu %8llu\n",
FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
up_read(&zram->init_lock);
return ret;
}
#endif
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int version = 1;
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
"version: %d\n%8llu %8llu\n",
version,
(u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
return ret;
}
static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RO(bd_stat);
#endif
static DEVICE_ATTR_RO(debug_stat);
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);
zs_destroy_pool(zram->mem_pool);
vfree(zram->table);
}
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
size_t num_pages;
num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
if (!zram->table)
return false;
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
return false;
}
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
return true;
}
/*
* To protect concurrent access to the same index entry,
* caller should hold this table index entry's bit_spinlock to
* indicate this index entry is accessing.
*/
static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
zram->table[index].ac_time = 0;
#endif
if (zram_test_flag(zram, index, ZRAM_IDLE))
zram_clear_flag(zram, index, ZRAM_IDLE);
if (zram_test_flag(zram, index, ZRAM_HUGE)) {
zram_clear_flag(zram, index, ZRAM_HUGE);
atomic64_dec(&zram->stats.huge_pages);
}
if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
zram_set_priority(zram, index, 0);
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
free_block_bdev(zram, zram_get_element(zram, index));
goto out;
}
/*
* No memory is allocated for same element filled pages.
* Simply clear same page flag.
*/
if (zram_test_flag(zram, index, ZRAM_SAME)) {
zram_clear_flag(zram, index, ZRAM_SAME);
atomic64_dec(&zram->stats.same_pages);
goto out;
}
handle = zram_get_handle(zram, index);
if (!handle)
return;
zs_free(zram->mem_pool, handle);
atomic64_sub(zram_get_obj_size(zram, index),
&zram->stats.compr_data_size);
out:
atomic64_dec(&zram->stats.pages_stored);
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
WARN_ON_ONCE(zram->table[index].flags &
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
/*
* Reads (decompresses if needed) a page from zspool (zsmalloc).
* Corresponding ZRAM slot should be locked.
*/
static int zram_read_from_zspool(struct zram *zram, struct page *page,
u32 index)
{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
u32 prio;
int ret;
handle = zram_get_handle(zram, index);
if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
unsigned long value;
void *mem;
value = handle ? zram_get_element(zram, index) : 0;
mem = kmap_atomic(page);
zram_fill_page(mem, PAGE_SIZE, value);
kunmap_atomic(mem);
return 0;
}
size = zram_get_obj_size(zram, index);
if (size != PAGE_SIZE) {
prio = zram_get_priority(zram, index);
zstrm = zcomp_stream_get(zram->comps[prio]);
}
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst);
ret = 0;
} else {
dst = kmap_atomic(page);
ret = zcomp_decompress(zstrm, src, size, dst);
kunmap_atomic(dst);
zcomp_stream_put(zram->comps[prio]);
}
zs_unmap_object(zram->mem_pool, handle);
return ret;
}
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
int ret;
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_WB)) {
/* Slot should be locked through out the function call */
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
/*
* The slot should be unlocked before reading from the backing
* device.
*/
zram_slot_unlock(zram, index);
ret = read_from_bdev(zram, page, zram_get_element(zram, index),
parent);
}
/* Should NEVER happen. Return bio error if it does. */
if (WARN_ON(ret < 0))
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
return ret;
}
/*
* Use a temporary buffer to decompress the page, as the decompressor
* always expects a full page for the output.
*/
static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset)
{
struct page *page = alloc_page(GFP_NOIO);
int ret;
if (!page)
return -ENOMEM;
ret = zram_read_page(zram, page, index, NULL);
if (likely(!ret))
memcpy_to_bvec(bvec, page_address(page) + offset);
__free_page(page);
return ret;
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio)
{
if (is_partial_io(bvec))
return zram_bvec_read_partial(zram, bvec, index, offset);
return zram_read_page(zram, bvec->bv_page, index, bio);
}
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
unsigned long alloced_pages;
unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
unsigned long element = 0;
enum zram_pageflags flags = 0;
mem = kmap_atomic(page);
if (page_same_filled(mem, &element)) {
kunmap_atomic(mem);
/* Free memory associated with this sector now. */
flags = ZRAM_SAME;
atomic64_inc(&zram->stats.same_pages);
goto out;
}
kunmap_atomic(mem);
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
kunmap_atomic(src);
if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
pr_err("Compression failed! err=%d\n", ret);
zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
* per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
* since we can't sleep;
* b) slow path enables preemption and attempts to allocate
* the page with __GFP_DIRECT_RECLAIM bit set. we have to
* put per-cpu compression stream and, thus, to re-do
* the compression once handle is allocated.
*
* if we have a 'non-null' handle here then we are coming
* from the slow path and handle has already been allocated.
*/
if (IS_ERR_VALUE(handle))
handle = zs_malloc(zram->mem_pool, comp_len,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
if (comp_len != PAGE_SIZE)
goto compress_again;
/*
* If the page is not compressible, you need to acquire the
* lock and execute the code below. The zcomp_stream_get()
* call is needed to disable the cpu hotplug and grab the
* zstrm buffer back. It is necessary that the dereferencing
* of the zstrm variable below occurs correctly.
*/
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
src = kmap_atomic(page);
memcpy(dst, src, comp_len);
if (comp_len == PAGE_SIZE)
kunmap_atomic(src);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
atomic64_add(comp_len, &zram->stats.compr_data_size);
out:
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
zram_slot_lock(zram, index);
zram_free_page(zram, index);
if (comp_len == PAGE_SIZE) {
zram_set_flag(zram, index, ZRAM_HUGE);
atomic64_inc(&zram->stats.huge_pages);
atomic64_inc(&zram->stats.huge_pages_since);
}
if (flags) {
zram_set_flag(zram, index, flags);
zram_set_element(zram, index, element);
} else {
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
}
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
return ret;
}
/*
* This is a partial IO. Read the full page before writing the changes.
*/
static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio)
{
struct page *page = alloc_page(GFP_NOIO);
int ret;
if (!page)
return -ENOMEM;
ret = zram_read_page(zram, page, index, bio);
if (!ret) {
memcpy_from_bvec(page_address(page) + offset, bvec);
ret = zram_write_page(zram, page, index);
}
__free_page(page);
return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio)
{
if (is_partial_io(bvec))
return zram_bvec_write_partial(zram, bvec, index, offset, bio);
return zram_write_page(zram, bvec->bv_page, index);
}
#ifdef CONFIG_ZRAM_MULTI_COMP
/*
* This function will decompress (unless it's ZRAM_HUGE) the page and then
* attempt to compress it using provided compression algorithm priority
* (which is potentially more effective).
*
* Corresponding ZRAM slot should be locked.
*/
static int zram_recompress(struct zram *zram, u32 index, struct page *page,
u32 threshold, u32 prio, u32 prio_max)
{
struct zcomp_strm *zstrm = NULL;
unsigned long handle_old;
unsigned long handle_new;
unsigned int comp_len_old;
unsigned int comp_len_new;
unsigned int class_index_old;
unsigned int class_index_new;
u32 num_recomps = 0;
void *src, *dst;
int ret;
handle_old = zram_get_handle(zram, index);
if (!handle_old)
return -EINVAL;
comp_len_old = zram_get_obj_size(zram, index);
/*
* Do not recompress objects that are already "small enough".
*/
if (comp_len_old < threshold)
return 0;
ret = zram_read_from_zspool(zram, page, index);
if (ret)
return ret;
class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
/*
* Iterate the secondary comp algorithms list (in order of priority)
* and try to recompress the page.
*/
for (; prio < prio_max; prio++) {
if (!zram->comps[prio])
continue;
/*
* Skip if the object is already re-compressed with a higher
* priority algorithm (or same algorithm).
*/
if (prio <= zram_get_priority(zram, index))
continue;
num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len_new);
kunmap_atomic(src);
if (ret) {
zcomp_stream_put(zram->comps[prio]);
return ret;
}
class_index_new = zs_lookup_class_index(zram->mem_pool,
comp_len_new);
/* Continue until we make progress */
if (class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
zcomp_stream_put(zram->comps[prio]);
continue;
}
/* Recompression was successful so break out */
break;
}
/*
* We did not try to recompress, e.g. when we have only one
* secondary algorithm and the page is already recompressed
* using that algorithm
*/
if (!zstrm)
return 0;
if (class_index_new >= class_index_old) {
/*
* Secondary algorithms failed to re-compress the page
* in a way that would save memory, mark the object as
* incompressible so that we will not try to compress
* it again.
*
* We need to make sure that all secondary algorithms have
* failed, so we test if the number of recompressions matches
* the number of active secondary algorithms.
*/
if (num_recomps == zram->num_active_comps - 1)
zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
return 0;
}
/* Successful recompression but above threshold */
if (threshold && comp_len_new >= threshold)
return 0;
/*
* No direct reclaim (slow path) for handle allocation and no
* re-compression attempt (unlike in zram_write_bvec()) since
* we already have stored that object in zsmalloc. If we cannot
* alloc memory for recompressed object then we bail out and
* simply keep the old (existing) object in zsmalloc.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle_new)) {
zcomp_stream_put(zram->comps[prio]);
return PTR_ERR((void *)handle_new);
}
dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
memcpy(dst, zstrm->buffer, comp_len_new);
zcomp_stream_put(zram->comps[prio]);
zs_unmap_object(zram->mem_pool, handle_new);
zram_free_page(zram, index);
zram_set_handle(zram, index, handle_new);
zram_set_obj_size(zram, index, comp_len_new);
zram_set_priority(zram, index, prio);
atomic64_add(comp_len_new, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
return 0;
}
#define RECOMPRESS_IDLE (1 << 0)
#define RECOMPRESS_HUGE (1 << 1)
static ssize_t recompress_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
char *args, *param, *val, *algo = NULL;
u32 mode = 0, threshold = 0;
unsigned long index;
struct page *page;
ssize_t ret;
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, ¶m, &val);
if (!val || !*val)
return -EINVAL;
if (!strcmp(param, "type")) {
if (!strcmp(val, "idle"))
mode = RECOMPRESS_IDLE;
if (!strcmp(val, "huge"))
mode = RECOMPRESS_HUGE;
if (!strcmp(val, "huge_idle"))
mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE;
continue;
}
if (!strcmp(param, "threshold")) {
/*
* We will re-compress only idle objects equal or
* greater in size than watermark.
*/
ret = kstrtouint(val, 10, &threshold);
if (ret)
return ret;
continue;
}
if (!strcmp(param, "algo")) {
algo = val;
continue;
}
}
if (threshold >= huge_class_size)
return -EINVAL;
down_read(&zram->init_lock);
if (!init_done(zram)) {
ret = -EINVAL;
goto release_init_lock;
}
if (algo) {
bool found = false;
for (; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
if (!strcmp(zram->comp_algs[prio], algo)) {
prio_max = min(prio + 1, ZRAM_MAX_COMPS);
found = true;
break;
}
}
if (!found) {
ret = -EINVAL;
goto release_init_lock;
}
}
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto release_init_lock;
}
ret = len;
for (index = 0; index < nr_pages; index++) {
int err = 0;
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
goto next;
if (mode & RECOMPRESS_IDLE &&
!zram_test_flag(zram, index, ZRAM_IDLE))
goto next;
if (mode & RECOMPRESS_HUGE &&
!zram_test_flag(zram, index, ZRAM_HUGE))
goto next;
if (zram_test_flag(zram, index, ZRAM_WB) ||
zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
zram_test_flag(zram, index, ZRAM_SAME) ||
zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
err = zram_recompress(zram, index, page, threshold,
prio, prio_max);
next:
zram_slot_unlock(zram, index);
if (err) {
ret = err;
break;
}
cond_resched();
}
__free_page(page);
release_init_lock:
up_read(&zram->init_lock);
return ret;
}
#endif
static void zram_bio_discard(struct zram *zram, struct bio *bio)
{
size_t n = bio->bi_iter.bi_size;
u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
/*
* zram manages data in physical block size units. Because logical block
* size isn't identical with physical block size on some arch, we
* could get a discard request pointing to a specific offset within a
* certain physical block. Although we can handle this request by
* reading that physiclal block and decompressing and partially zeroing
* and re-compressing and then re-storing it, this isn't reasonable
* because our intent with a discard request is to save memory. So
* skipping this logical block is appropriate here.
*/
if (offset) {
if (n <= (PAGE_SIZE - offset))
return;
n -= (PAGE_SIZE - offset);
index++;
}
while (n >= PAGE_SIZE) {
zram_slot_lock(zram, index);
zram_free_page(zram, index);
zram_slot_unlock(zram, index);
atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
}
bio_endio(bio);
}
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
bio->bi_status = BLK_STS_IOERR;
break;
}
flush_dcache_page(bv.bv_page);
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
bio->bi_status = BLK_STS_IOERR;
break;
}
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
/*
* Handler function for all zram I/O requests.
*/
static void zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
switch (bio_op(bio)) {
case REQ_OP_READ:
zram_bio_read(zram, bio);
break;
case REQ_OP_WRITE:
zram_bio_write(zram, bio);
break;
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
zram_bio_discard(zram, bio);
break;
default:
WARN_ON_ONCE(1);
bio_endio(bio);
}
}
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
zram = bdev->bd_disk->private_data;
atomic64_inc(&zram->stats.notify_free);
if (!zram_slot_trylock(zram, index)) {
atomic64_inc(&zram->stats.miss_free);
return;
}
zram_free_page(zram, index);
zram_slot_unlock(zram, index);
}
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
struct zcomp *comp = zram->comps[prio];
zram->comps[prio] = NULL;
if (!comp)
continue;
zcomp_destroy(comp);
zram->num_active_comps--;
}
}
static void zram_reset_device(struct zram *zram)
{
down_write(&zram->init_lock);
zram->limit_pages = 0;
if (!init_done(zram)) {
up_write(&zram->init_lock);
return;
}
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
/* I/O operation under all of CPU are done so let's free */
zram_meta_free(zram, zram->disksize);
zram->disksize = 0;
zram_destroy_comps(zram);
memset(&zram->stats, 0, sizeof(zram->stats));
reset_bdev(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
up_write(&zram->init_lock);
}
static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
u64 disksize;
struct zcomp *comp;
struct zram *zram = dev_to_zram(dev);
int err;
u32 prio;
disksize = memparse(buf, NULL);
if (!disksize)
return -EINVAL;
down_write(&zram->init_lock);
if (init_done(zram)) {
pr_info("Cannot change disksize for initialized device\n");
err = -EBUSY;
goto out_unlock;
}
disksize = PAGE_ALIGN(disksize);
if (!zram_meta_alloc(zram, disksize)) {
err = -ENOMEM;
goto out_unlock;
}
for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
comp = zcomp_create(zram->comp_algs[prio]);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->comp_algs[prio]);
err = PTR_ERR(comp);
goto out_free_comps;
}
zram->comps[prio] = comp;
zram->num_active_comps++;
}
zram->disksize = disksize;
set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
up_write(&zram->init_lock);
return len;
out_free_comps:
zram_destroy_comps(zram);
zram_meta_free(zram, disksize);
out_unlock:
up_write(&zram->init_lock);
return err;
}
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
unsigned short do_reset;
struct zram *zram;
struct gendisk *disk;
ret = kstrtou16(buf, 10, &do_reset);
if (ret)
return ret;
if (!do_reset)
return -EINVAL;
zram = dev_to_zram(dev);
disk = zram->disk;
mutex_lock(&disk->open_mutex);
/* Do not reset an active device or claimed device */
if (disk_openers(disk) || zram->claim) {
mutex_unlock(&disk->open_mutex);
return -EBUSY;
}
/* From now on, anyone can't open /dev/zram[0-9] */
zram->claim = true;
mutex_unlock(&disk->open_mutex);
/* Make sure all the pending I/O are finished */
sync_blockdev(disk->part0);
zram_reset_device(zram);
mutex_lock(&disk->open_mutex);
zram->claim = false;
mutex_unlock(&disk->open_mutex);
return len;
}
static int zram_open(struct gendisk *disk, blk_mode_t mode)
{
struct zram *zram = disk->private_data;
WARN_ON(!mutex_is_locked(&disk->open_mutex));
/* zram was claimed to reset so open request fails */
if (zram->claim)
return -EBUSY;
return 0;
}
static const struct block_device_operations zram_devops = {
.open = zram_open,
.submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
.owner = THIS_MODULE
};
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_WO(idle);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
static DEVICE_ATTR_WO(recompress);
#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
&dev_attr_initstate.attr,
&dev_attr_reset.attr,
&dev_attr_compact.attr,
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
&dev_attr_idle.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_bd_stat.attr,
#endif
&dev_attr_debug_stat.attr,
#ifdef CONFIG_ZRAM_MULTI_COMP
&dev_attr_recomp_algorithm.attr,
&dev_attr_recompress.attr,
#endif
NULL,
};
ATTRIBUTE_GROUPS(zram_disk);
/*
* Allocate and initialize new zram device. the function returns
* '>= 0' device_id upon success, and negative value otherwise.
*/
static int zram_add(void)
{
struct zram *zram;
int ret, device_id;
zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
if (!zram)
return -ENOMEM;
ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out_free_dev;
device_id = ret;
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
spin_lock_init(&zram->wb_limit_lock);
#endif
/* gendisk structure */
zram->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!zram->disk) {
pr_err("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
goto out_free_idr;
}
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->minors = 1;
zram->disk->flags |= GENHD_FL_NO_PART;
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
*/
blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
blk_queue_logical_block_size(zram->disk->queue,
ZRAM_LOGICAL_BLOCK_SIZE);
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
* zram_bio_discard() will clear all logical blocks if logical block
* size is identical with physical block size(PAGE_SIZE). But if it is
* different, we will skip discarding some parts of logical blocks in
* the part of the request range which isn't aligned to physical block
* size. So we can't ensure that all discarded logical blocks are
* zeroed.
*/
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
out_cleanup_disk:
put_disk(zram->disk);
out_free_idr:
idr_remove(&zram_index_idr, device_id);
out_free_dev:
kfree(zram);
return ret;
}
static int zram_remove(struct zram *zram)
{
bool claimed;
mutex_lock(&zram->disk->open_mutex);
if (disk_openers(zram->disk)) {
mutex_unlock(&zram->disk->open_mutex);
return -EBUSY;
}
claimed = zram->claim;
if (!claimed)
zram->claim = true;
mutex_unlock(&zram->disk->open_mutex);
zram_debugfs_unregister(zram);
if (claimed) {
/*
* If we were claimed by reset_store(), del_gendisk() will
* wait until reset_store() is done, so nothing need to do.
*/
;
} else {
/* Make sure all the pending I/O are finished */
sync_blockdev(zram->disk->part0);
zram_reset_device(zram);
}
pr_info("Removed device: %s\n", zram->disk->disk_name);
del_gendisk(zram->disk);
/* del_gendisk drains pending reset_store */
WARN_ON_ONCE(claimed && zram->claim);
/*
* disksize_store() may be called in between zram_reset_device()
* and del_gendisk(), so run the last reset to avoid leaking
* anything allocated with disksize_store()
*/
zram_reset_device(zram);
put_disk(zram->disk);
kfree(zram);
return 0;
}
/* zram-control sysfs attributes */
/*
* NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
* sense that reading from this file does alter the state of your system -- it
* creates a new un-initialized zram device and returns back this device's
* device_id (or an error code if it fails to create a new device).
*/
static ssize_t hot_add_show(const struct class *class,
const struct class_attribute *attr,
char *buf)
{
int ret;
mutex_lock(&zram_index_mutex);
ret = zram_add();
mutex_unlock(&zram_index_mutex);
if (ret < 0)
return ret;
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
__ATTR(hot_add, 0400, hot_add_show, NULL);
static ssize_t hot_remove_store(const struct class *class,
const struct class_attribute *attr,
const char *buf,
size_t count)
{
struct zram *zram;
int ret, dev_id;
/* dev_id is gendisk->first_minor, which is `int' */
ret = kstrtoint(buf, 10, &dev_id);
if (ret)
return ret;
if (dev_id < 0)
return -EINVAL;
mutex_lock(&zram_index_mutex);
zram = idr_find(&zram_index_idr, dev_id);
if (zram) {
ret = zram_remove(zram);
if (!ret)
idr_remove(&zram_index_idr, dev_id);
} else {
ret = -ENODEV;
}
mutex_unlock(&zram_index_mutex);
return ret ? ret : count;
}
static CLASS_ATTR_WO(hot_remove);
static struct attribute *zram_control_class_attrs[] = {
&class_attr_hot_add.attr,
&class_attr_hot_remove.attr,
NULL,
};
ATTRIBUTE_GROUPS(zram_control_class);
static struct class zram_control_class = {
.name = "zram-control",
.class_groups = zram_control_class_groups,
};
static int zram_remove_cb(int id, void *ptr, void *data)
{
WARN_ON_ONCE(zram_remove(ptr));
return 0;
}
static void destroy_devices(void)
{
class_unregister(&zram_control_class);
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
zram_debugfs_destroy();
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
}
static int __init zram_init(void)
{
int ret;
BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);
if (ret < 0)
return ret;
ret = class_register(&zram_control_class);
if (ret) {
pr_err("Unable to register zram-control class\n");
cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return ret;
}
zram_debugfs_create();
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return -EBUSY;
}
while (num_devices != 0) {
mutex_lock(&zram_index_mutex);
ret = zram_add();
mutex_unlock(&zram_index_mutex);
if (ret < 0)
goto out_error;
num_devices--;
}
return 0;
out_error:
destroy_devices();
return ret;
}
static void __exit zram_exit(void)
{
destroy_devices();
}
module_init(zram_init);
module_exit(zram_exit);
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <[email protected]>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
| linux-master | drivers/block/zram/zram_drv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Sergey Senozhatsky.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/crypto.h>
#include "zcomp.h"
static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_LZO)
"lzo",
"lzo-rle",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
"lz4",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
"lz4hc",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_842)
"842",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
"zstd",
#endif
};
static void zcomp_strm_free(struct zcomp_strm *zstrm)
{
if (!IS_ERR_OR_NULL(zstrm->tfm))
crypto_free_comp(zstrm->tfm);
free_pages((unsigned long)zstrm->buffer, 1);
zstrm->tfm = NULL;
zstrm->buffer = NULL;
}
/*
* Initialize zcomp_strm structure with ->tfm initialized by backend, and
* ->buffer. Return a negative value on error.
*/
static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
{
zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
zcomp_strm_free(zstrm);
return -ENOMEM;
}
return 0;
}
bool zcomp_available_algorithm(const char *comp)
{
/*
* Crypto does not ignore a trailing new line symbol,
* so make sure you don't supply a string containing
* one.
* This also means that we permit zcomp initialisation
* with any compressing algorithm known to crypto api.
*/
return crypto_has_comp(comp, 0, 0) == 1;
}
/* show available compressors */
ssize_t zcomp_available_show(const char *comp, char *buf)
{
bool known_algorithm = false;
ssize_t sz = 0;
int i;
for (i = 0; i < ARRAY_SIZE(backends); i++) {
if (!strcmp(comp, backends[i])) {
known_algorithm = true;
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
"[%s] ", backends[i]);
} else {
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
"%s ", backends[i]);
}
}
/*
* Out-of-tree module known to crypto api or a missing
* entry in `backends'.
*/
if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
"[%s] ", comp);
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
return sz;
}
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
local_lock(&comp->stream->lock);
return this_cpu_ptr(comp->stream);
}
void zcomp_stream_put(struct zcomp *comp)
{
local_unlock(&comp->stream->lock);
}
int zcomp_compress(struct zcomp_strm *zstrm,
const void *src, unsigned int *dst_len)
{
/*
* Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
* because sometimes we can endup having a bigger compressed data
* due to various reasons: for example compression algorithms tend
* to add some padding to the compressed buffer. Speaking of padding,
* comp algorithm `842' pads the compressed length to multiple of 8
* and returns -ENOSP when the dst memory is not big enough, which
* is not something that ZRAM wants to see. We can handle the
* `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
* receive -ERRNO from the compressing backend we can't help it
* anymore. To make `842' happy we need to tell the exact size of
* the dst buffer, zram_drv will take care of the fact that
* compressed buffer is too big.
*/
*dst_len = PAGE_SIZE * 2;
return crypto_comp_compress(zstrm->tfm,
src, PAGE_SIZE,
zstrm->buffer, dst_len);
}
int zcomp_decompress(struct zcomp_strm *zstrm,
const void *src, unsigned int src_len, void *dst)
{
unsigned int dst_len = PAGE_SIZE;
return crypto_comp_decompress(zstrm->tfm,
src, src_len,
dst, &dst_len);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
int ret;
zstrm = per_cpu_ptr(comp->stream, cpu);
local_lock_init(&zstrm->lock);
ret = zcomp_strm_init(zstrm, comp);
if (ret)
pr_err("Can't allocate a compression stream\n");
return ret;
}
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
zstrm = per_cpu_ptr(comp->stream, cpu);
zcomp_strm_free(zstrm);
return 0;
}
static int zcomp_init(struct zcomp *comp)
{
int ret;
comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
return 0;
cleanup:
free_percpu(comp->stream);
return ret;
}
void zcomp_destroy(struct zcomp *comp)
{
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
free_percpu(comp->stream);
kfree(comp);
}
/*
* search available compressors for requested algorithm.
* allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
* case of allocation error, or any other error potentially
* returned by zcomp_init().
*/
struct zcomp *zcomp_create(const char *alg)
{
struct zcomp *comp;
int error;
/*
* Crypto API will execute /sbin/modprobe if the compression module
* is not loaded yet. We must do it here, otherwise we are about to
* call /sbin/modprobe under CPU hot-plug lock.
*/
if (!zcomp_available_algorithm(alg))
return ERR_PTR(-EINVAL);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
comp->name = alg;
error = zcomp_init(comp);
if (error) {
kfree(comp);
return ERR_PTR(error);
}
return comp;
}
| linux-master | drivers/block/zram/zcomp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* null_blk trace related helpers.
*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*/
#include "trace.h"
/*
* Helper to use for all null_blk traces to extract disk name.
*/
const char *nullb_trace_disk_name(struct trace_seq *p, char *name)
{
const char *ret = trace_seq_buffer_ptr(p);
if (name && *name)
trace_seq_printf(p, "disk=%s, ", name);
trace_seq_putc(p, 0);
return ret;
}
| linux-master | drivers/block/null_blk/trace.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
#include <linux/bitmap.h>
#include "null_blk.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
static inline sector_t mb_to_sects(unsigned long mb)
{
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
return sect >> ilog2(dev->zone_size_sects);
}
static inline void null_lock_zone_res(struct nullb_device *dev)
{
if (dev->need_zone_res_mgmt)
spin_lock_irq(&dev->zone_res_lock);
}
static inline void null_unlock_zone_res(struct nullb_device *dev)
{
if (dev->need_zone_res_mgmt)
spin_unlock_irq(&dev->zone_res_lock);
}
static inline void null_init_zone_lock(struct nullb_device *dev,
struct nullb_zone *zone)
{
if (!dev->memory_backed)
spin_lock_init(&zone->spinlock);
else
mutex_init(&zone->mutex);
}
static inline void null_lock_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
if (!dev->memory_backed)
spin_lock_irq(&zone->spinlock);
else
mutex_lock(&zone->mutex);
}
static inline void null_unlock_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
if (!dev->memory_backed)
spin_unlock_irq(&zone->spinlock);
else
mutex_unlock(&zone->mutex);
}
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
{
sector_t dev_capacity_sects, zone_capacity_sects;
struct nullb_zone *zone;
sector_t sector = 0;
unsigned int i;
if (!is_power_of_2(dev->zone_size)) {
pr_err("zone_size must be power-of-two\n");
return -EINVAL;
}
if (dev->zone_size > dev->size) {
pr_err("Zone size larger than device capacity\n");
return -EINVAL;
}
if (!dev->zone_capacity)
dev->zone_capacity = dev->zone_size;
if (dev->zone_capacity > dev->zone_size) {
pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
dev->zone_capacity, dev->zone_size);
return -EINVAL;
}
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
dev_capacity_sects = mb_to_sects(dev->size);
dev->zone_size_sects = mb_to_sects(dev->zone_size);
dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
>> ilog2(dev->zone_size_sects);
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
GFP_KERNEL | __GFP_ZERO);
if (!dev->zones)
return -ENOMEM;
spin_lock_init(&dev->zone_res_lock);
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u",
dev->zone_nr_conv);
}
/* Max active zones has to be < nbr of seq zones in order to be enforceable */
if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_active = 0;
pr_info("zone_max_active limit disabled, limit >= zone count\n");
}
/* Max open zones has to be <= max active zones */
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
dev->nr_zones);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");
}
dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
dev->imp_close_zone_no = dev->zone_nr_conv;
for (i = 0; i < dev->zone_nr_conv; i++) {
zone = &dev->zones[i];
null_init_zone_lock(dev, zone);
zone->start = sector;
zone->len = dev->zone_size_sects;
zone->capacity = zone->len;
zone->wp = zone->start + zone->len;
zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
zone->cond = BLK_ZONE_COND_NOT_WP;
sector += dev->zone_size_sects;
}
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
zone = &dev->zones[i];
null_init_zone_lock(dev, zone);
zone->start = zone->wp = sector;
if (zone->start + dev->zone_size_sects > dev_capacity_sects)
zone->len = dev_capacity_sects - zone->start;
else
zone->len = dev->zone_size_sects;
zone->capacity =
min_t(sector_t, zone->len, zone_capacity_sects);
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone->cond = BLK_ZONE_COND_EMPTY;
sector += dev->zone_size_sects;
}
return 0;
}
int null_register_zoned_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
blk_queue_chunk_sectors(q, dev->zone_size_sects);
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
if (queue_is_mq(q))
return blk_revalidate_disk_zones(nullb->disk, NULL);
return 0;
}
void null_free_zoned_dev(struct nullb_device *dev)
{
kvfree(dev->zones);
dev->zones = NULL;
}
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
unsigned int first_zone, i;
struct nullb_zone *zone;
struct blk_zone blkz;
int error;
first_zone = null_zone_no(dev, sector);
if (first_zone >= dev->nr_zones)
return 0;
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones);
memset(&blkz, 0, sizeof(struct blk_zone));
zone = &dev->zones[first_zone];
for (i = 0; i < nr_zones; i++, zone++) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone
* array.
*/
null_lock_zone(dev, zone);
blkz.start = zone->start;
blkz.len = zone->len;
blkz.wp = zone->wp;
blkz.type = zone->type;
blkz.cond = zone->cond;
blkz.capacity = zone->capacity;
null_unlock_zone(dev, zone);
error = cb(&blkz, i, data);
if (error)
return error;
}
return nr_zones;
}
/*
* This is called in the case of memory backing from null_process_cmd()
* with the target zone already locked.
*/
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
struct nullb_device *dev = nullb->dev;
struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
unsigned int nr_sectors = len >> SECTOR_SHIFT;
/* Read must be below the write pointer position */
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
sector + nr_sectors <= zone->wp)
return len;
if (sector > zone->wp)
return 0;
return (zone->wp - sector) << SECTOR_SHIFT;
}
static blk_status_t __null_close_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
switch (zone->cond) {
case BLK_ZONE_COND_CLOSED:
/* close operation on closed is not an error */
return BLK_STS_OK;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
break;
case BLK_ZONE_COND_EXP_OPEN:
dev->nr_zones_exp_open--;
break;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_FULL:
default:
return BLK_STS_IOERR;
}
if (zone->wp == zone->start) {
zone->cond = BLK_ZONE_COND_EMPTY;
} else {
zone->cond = BLK_ZONE_COND_CLOSED;
dev->nr_zones_closed++;
}
return BLK_STS_OK;
}
static void null_close_imp_open_zone(struct nullb_device *dev)
{
struct nullb_zone *zone;
unsigned int zno, i;
zno = dev->imp_close_zone_no;
if (zno >= dev->nr_zones)
zno = dev->zone_nr_conv;
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
zone = &dev->zones[zno];
zno++;
if (zno >= dev->nr_zones)
zno = dev->zone_nr_conv;
if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
__null_close_zone(dev, zone);
dev->imp_close_zone_no = zno;
return;
}
}
}
static blk_status_t null_check_active(struct nullb_device *dev)
{
if (!dev->zone_max_active)
return BLK_STS_OK;
if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
dev->nr_zones_closed < dev->zone_max_active)
return BLK_STS_OK;
return BLK_STS_ZONE_ACTIVE_RESOURCE;
}
static blk_status_t null_check_open(struct nullb_device *dev)
{
if (!dev->zone_max_open)
return BLK_STS_OK;
if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
return BLK_STS_OK;
if (dev->nr_zones_imp_open) {
if (null_check_active(dev) == BLK_STS_OK) {
null_close_imp_open_zone(dev);
return BLK_STS_OK;
}
}
return BLK_STS_ZONE_OPEN_RESOURCE;
}
/*
* This function matches the manage open zone resources function in the ZBC standard,
* with the addition of max active zones support (added in the ZNS standard).
*
* The function determines if a zone can transition to implicit open or explicit open,
* while maintaining the max open zone (and max active zone) limit(s). It may close an
* implicit open zone in order to make additional zone resources available.
*
* ZBC states that an implicit open zone shall be closed only if there is not
* room within the open limit. However, with the addition of an active limit,
* it is not certain that closing an implicit open zone will allow a new zone
* to be opened, since we might already be at the active limit capacity.
*/
static blk_status_t null_check_zone_resources(struct nullb_device *dev,
struct nullb_zone *zone)
{
blk_status_t ret;
switch (zone->cond) {
case BLK_ZONE_COND_EMPTY:
ret = null_check_active(dev);
if (ret != BLK_STS_OK)
return ret;
fallthrough;
case BLK_ZONE_COND_CLOSED:
return null_check_open(dev);
default:
/* Should never be called for other states */
WARN_ON(1);
return BLK_STS_IOERR;
}
}
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors, bool append)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct nullb_zone *zone = &dev->zones[zno];
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
if (append)
return BLK_STS_IOERR;
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
}
null_lock_zone(dev, zone);
if (zone->cond == BLK_ZONE_COND_FULL ||
zone->cond == BLK_ZONE_COND_READONLY ||
zone->cond == BLK_ZONE_COND_OFFLINE) {
/* Cannot write to the zone */
ret = BLK_STS_IOERR;
goto unlock;
}
/*
* Regular writes must be at the write pointer position.
* Zone append writes are automatically issued at the write
* pointer and the position returned using the request or BIO
* sector.
*/
if (append) {
sector = zone->wp;
if (dev->queue_mode == NULL_Q_MQ)
cmd->rq->__sector = sector;
else
cmd->bio->bi_iter.bi_sector = sector;
} else if (sector != zone->wp) {
ret = BLK_STS_IOERR;
goto unlock;
}
if (zone->wp + nr_sectors > zone->start + zone->capacity) {
ret = BLK_STS_IOERR;
goto unlock;
}
if (zone->cond == BLK_ZONE_COND_CLOSED ||
zone->cond == BLK_ZONE_COND_EMPTY) {
null_lock_zone_res(dev);
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK) {
null_unlock_zone_res(dev);
goto unlock;
}
if (zone->cond == BLK_ZONE_COND_CLOSED) {
dev->nr_zones_closed--;
dev->nr_zones_imp_open++;
} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
dev->nr_zones_imp_open++;
}
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
null_unlock_zone_res(dev);
}
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
if (ret != BLK_STS_OK)
goto unlock;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
null_lock_zone_res(dev);
if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
dev->nr_zones_exp_open--;
else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
dev->nr_zones_imp_open--;
zone->cond = BLK_ZONE_COND_FULL;
null_unlock_zone_res(dev);
}
ret = BLK_STS_OK;
unlock:
null_unlock_zone(dev, zone);
return ret;
}
static blk_status_t null_open_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
blk_status_t ret = BLK_STS_OK;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
null_lock_zone_res(dev);
switch (zone->cond) {
case BLK_ZONE_COND_EXP_OPEN:
/* open operation on exp open is not an error */
goto unlock;
case BLK_ZONE_COND_EMPTY:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
goto unlock;
break;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
break;
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
goto unlock;
dev->nr_zones_closed--;
break;
case BLK_ZONE_COND_FULL:
default:
ret = BLK_STS_IOERR;
goto unlock;
}
zone->cond = BLK_ZONE_COND_EXP_OPEN;
dev->nr_zones_exp_open++;
unlock:
null_unlock_zone_res(dev);
return ret;
}
static blk_status_t null_close_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
blk_status_t ret;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
null_lock_zone_res(dev);
ret = __null_close_zone(dev, zone);
null_unlock_zone_res(dev);
return ret;
}
static blk_status_t null_finish_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
blk_status_t ret = BLK_STS_OK;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
null_lock_zone_res(dev);
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* finish operation on full is not an error */
goto unlock;
case BLK_ZONE_COND_EMPTY:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
goto unlock;
break;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
break;
case BLK_ZONE_COND_EXP_OPEN:
dev->nr_zones_exp_open--;
break;
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
goto unlock;
dev->nr_zones_closed--;
break;
default:
ret = BLK_STS_IOERR;
goto unlock;
}
zone->cond = BLK_ZONE_COND_FULL;
zone->wp = zone->start + zone->len;
unlock:
null_unlock_zone_res(dev);
return ret;
}
static blk_status_t null_reset_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
null_lock_zone_res(dev);
switch (zone->cond) {
case BLK_ZONE_COND_EMPTY:
/* reset operation on empty is not an error */
null_unlock_zone_res(dev);
return BLK_STS_OK;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
break;
case BLK_ZONE_COND_EXP_OPEN:
dev->nr_zones_exp_open--;
break;
case BLK_ZONE_COND_CLOSED:
dev->nr_zones_closed--;
break;
case BLK_ZONE_COND_FULL:
break;
default:
null_unlock_zone_res(dev);
return BLK_STS_IOERR;
}
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
null_unlock_zone_res(dev);
if (dev->memory_backed)
return null_handle_discard(dev, zone->start, zone->len);
return BLK_STS_OK;
}
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zone_no;
struct nullb_zone *zone;
blk_status_t ret;
size_t i;
if (op == REQ_OP_ZONE_RESET_ALL) {
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
zone = &dev->zones[i];
null_lock_zone(dev, zone);
if (zone->cond != BLK_ZONE_COND_EMPTY &&
zone->cond != BLK_ZONE_COND_READONLY &&
zone->cond != BLK_ZONE_COND_OFFLINE) {
null_reset_zone(dev, zone);
trace_nullb_zone_op(cmd, i, zone->cond);
}
null_unlock_zone(dev, zone);
}
return BLK_STS_OK;
}
zone_no = null_zone_no(dev, sector);
zone = &dev->zones[zone_no];
null_lock_zone(dev, zone);
if (zone->cond == BLK_ZONE_COND_READONLY ||
zone->cond == BLK_ZONE_COND_OFFLINE) {
ret = BLK_STS_IOERR;
goto unlock;
}
switch (op) {
case REQ_OP_ZONE_RESET:
ret = null_reset_zone(dev, zone);
break;
case REQ_OP_ZONE_OPEN:
ret = null_open_zone(dev, zone);
break;
case REQ_OP_ZONE_CLOSE:
ret = null_close_zone(dev, zone);
break;
case REQ_OP_ZONE_FINISH:
ret = null_finish_zone(dev, zone);
break;
default:
ret = BLK_STS_NOTSUPP;
break;
}
if (ret == BLK_STS_OK)
trace_nullb_zone_op(cmd, zone_no, zone->cond);
unlock:
null_unlock_zone(dev, zone);
return ret;
}
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev;
struct nullb_zone *zone;
blk_status_t sts;
switch (op) {
case REQ_OP_WRITE:
return null_zone_write(cmd, sector, nr_sectors, false);
case REQ_OP_ZONE_APPEND:
return null_zone_write(cmd, sector, nr_sectors, true);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
return null_zone_mgmt(cmd, op, sector);
default:
dev = cmd->nq->dev;
zone = &dev->zones[null_zone_no(dev, sector)];
if (zone->cond == BLK_ZONE_COND_OFFLINE)
return BLK_STS_IOERR;
null_lock_zone(dev, zone);
sts = null_process_cmd(cmd, op, sector, nr_sectors);
null_unlock_zone(dev, zone);
return sts;
}
}
/*
* Set a zone in the read-only or offline condition.
*/
static void null_set_zone_cond(struct nullb_device *dev,
struct nullb_zone *zone, enum blk_zone_cond cond)
{
if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
cond != BLK_ZONE_COND_OFFLINE))
return;
null_lock_zone(dev, zone);
/*
* If the read-only condition is requested again to zones already in
* read-only condition, restore back normal empty condition. Do the same
* if the offline condition is requested for offline zones. Otherwise,
* set the specified zone condition to the zones. Finish the zones
* beforehand to free up zone resources.
*/
if (zone->cond == cond) {
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
if (dev->memory_backed)
null_handle_discard(dev, zone->start, zone->len);
} else {
if (zone->cond != BLK_ZONE_COND_READONLY &&
zone->cond != BLK_ZONE_COND_OFFLINE)
null_finish_zone(dev, zone);
zone->cond = cond;
zone->wp = (sector_t)-1;
}
null_unlock_zone(dev, zone);
}
/*
* Identify a zone from the sector written to configfs file. Then set zone
* condition to the zone.
*/
ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
size_t count, enum blk_zone_cond cond)
{
unsigned long long sector;
unsigned int zone_no;
int ret;
if (!dev->zoned) {
pr_err("null_blk device is not zoned\n");
return -EINVAL;
}
if (!dev->zones) {
pr_err("null_blk device is not yet powered\n");
return -EINVAL;
}
ret = kstrtoull(page, 0, §or);
if (ret < 0)
return ret;
zone_no = null_zone_no(dev, sector);
if (zone_no >= dev->nr_zones) {
pr_err("Sector out of range\n");
return -EINVAL;
}
if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
pr_err("Can not change condition of conventional zones\n");
return -EINVAL;
}
null_set_zone_cond(dev, &dev->zones[zone_no], cond);
return count;
}
| linux-master | drivers/block/null_blk/zoned.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Add configfs and memory store: Kyungchan Koh <[email protected]> and
* Shaohua Li <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/init.h>
#include "null_blk.h"
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
#define FREE_BATCH 16
#define TICKS_PER_SEC 50ULL
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static DECLARE_FAULT_ATTR(null_timeout_attr);
static DECLARE_FAULT_ATTR(null_requeue_attr);
static DECLARE_FAULT_ATTR(null_init_hctx_attr);
#endif
static inline u64 mb_per_tick(int mbps)
{
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
}
/*
* Status flags for nullb_device.
*
* CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
* UP: Device is currently on and visible in userspace.
* THROTTLED: Device is being throttled.
* CACHE: Device is using a write-back cache.
*/
enum nullb_device_flags {
NULLB_DEV_FL_CONFIGURED = 0,
NULLB_DEV_FL_UP = 1,
NULLB_DEV_FL_THROTTLED = 2,
NULLB_DEV_FL_CACHE = 3,
};
#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
/*
* nullb_page is a page in memory for nullb devices.
*
* @page: The page holding the data.
* @bitmap: The bitmap represents which sector in the page has data.
* Each bit represents one block size. For example, sector 8
* will use the 7th bit
* The highest 2 bits of bitmap are for special purpose. LOCK means the cache
* page is being flushing to storage. FREE means the cache page is freed and
* should be skipped from flushing to storage. Please see
* null_make_cache_space
*/
struct nullb_page {
struct page *page;
DECLARE_BITMAP(bitmap, MAP_SZ);
};
#define NULLB_PAGE_LOCK (MAP_SZ - 1)
#define NULLB_PAGE_FREE (MAP_SZ - 2)
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
static struct blk_mq_tag_set tag_set;
enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
};
static bool g_virt_boundary = false;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
static int g_no_sched;
module_param_named(no_sched, g_no_sched, int, 0444);
MODULE_PARM_DESC(no_sched, "No io scheduler");
static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
static int g_poll_queues = 1;
module_param_named(poll_queues, g_poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
/*
* For more details about fault injection, please refer to
* Documentation/fault-injection/fault-injection.rst.
*/
static char g_timeout_str[80];
module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
static char g_requeue_str[80];
module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
static char g_init_hctx_str[80];
module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
#endif
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
{
int ret, new_val;
ret = kstrtoint(str, 10, &new_val);
if (ret)
return -EINVAL;
if (new_val < min || new_val > max)
return -EINVAL;
*val = new_val;
return 0;
}
static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
{
return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
}
static const struct kernel_param_ops null_queue_mode_param_ops = {
.set = null_set_queue_mode,
.get = param_get_int,
};
device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
static int g_gb = 250;
module_param_named(gb, g_gb, int, 0444);
MODULE_PARM_DESC(gb, "Size in GB");
static int g_bs = 512;
module_param_named(bs, g_bs, int, 0444);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
static int g_max_sectors;
module_param_named(max_sectors, g_max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
static unsigned int nr_devices = 1;
module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
static bool g_blocking;
module_param_named(blocking, g_blocking, bool, 0444);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
static bool shared_tags;
module_param(shared_tags, bool, 0444);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
static bool g_shared_tag_bitmap;
module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
static int g_irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
{
return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
NULL_IRQ_TIMER);
}
static const struct kernel_param_ops null_irqmode_param_ops = {
.set = null_set_irqmode,
.get = param_get_int,
};
device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
static unsigned long g_completion_nsec = 10000;
module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
static int g_hw_queue_depth = 64;
module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
static bool g_memory_backed;
module_param_named(memory_backed, g_memory_backed, bool, 0444);
MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
static bool g_discard;
module_param_named(discard, g_discard, bool, 0444);
MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static unsigned int g_mbps;
module_param_named(mbps, g_mbps, uint, 0444);
MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
static bool g_zoned;
module_param_named(zoned, g_zoned, bool, S_IRUGO);
MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
static unsigned long g_zone_size = 256;
module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
static unsigned long g_zone_capacity;
module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
static unsigned int g_zone_nr_conv;
module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
static unsigned int g_zone_max_open;
module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
static unsigned int g_zone_max_active;
module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
static int null_add_dev(struct nullb_device *dev);
static struct nullb *null_find_dev_by_name(const char *name);
static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
static inline struct nullb_device *to_nullb_device(struct config_item *item)
{
return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL;
}
static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", val);
}
static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
char *page)
{
return snprintf(page, PAGE_SIZE, "%lu\n", val);
}
static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", val);
}
static ssize_t nullb_device_uint_attr_store(unsigned int *val,
const char *page, size_t count)
{
unsigned int tmp;
int result;
result = kstrtouint(page, 0, &tmp);
if (result < 0)
return result;
*val = tmp;
return count;
}
static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
const char *page, size_t count)
{
int result;
unsigned long tmp;
result = kstrtoul(page, 0, &tmp);
if (result < 0)
return result;
*val = tmp;
return count;
}
static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
size_t count)
{
bool tmp;
int result;
result = kstrtobool(page, &tmp);
if (result < 0)
return result;
*val = tmp;
return count;
}
/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
static ssize_t \
nullb_device_##NAME##_show(struct config_item *item, char *page) \
{ \
return nullb_device_##TYPE##_attr_show( \
to_nullb_device(item)->NAME, page); \
} \
static ssize_t \
nullb_device_##NAME##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
struct nullb_device *dev = to_nullb_device(item); \
TYPE new_value = 0; \
int ret; \
\
ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
if (ret < 0) \
return ret; \
if (apply_fn) \
ret = apply_fn(dev, new_value); \
else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
ret = -EBUSY; \
if (ret < 0) \
return ret; \
dev->NAME = new_value; \
return count; \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
static int nullb_update_nr_hw_queues(struct nullb_device *dev,
unsigned int submit_queues,
unsigned int poll_queues)
{
struct blk_mq_tag_set *set;
int ret, nr_hw_queues;
if (!dev->nullb)
return 0;
/*
* Make sure at least one submit queue exists.
*/
if (!submit_queues)
return -EINVAL;
/*
* Make sure that null_init_hctx() does not access nullb->queues[] past
* the end of that array.
*/
if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
return -EINVAL;
/*
* Keep previous and new queue numbers in nullb_device for reference in
* the call back function null_map_queues().
*/
dev->prev_submit_queues = dev->submit_queues;
dev->prev_poll_queues = dev->poll_queues;
dev->submit_queues = submit_queues;
dev->poll_queues = poll_queues;
set = dev->nullb->tag_set;
nr_hw_queues = submit_queues + poll_queues;
blk_mq_update_nr_hw_queues(set, nr_hw_queues);
ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
if (ret) {
/* on error, revert the queue numbers */
dev->submit_queues = dev->prev_submit_queues;
dev->poll_queues = dev->prev_poll_queues;
}
return ret;
}
static int nullb_apply_submit_queues(struct nullb_device *dev,
unsigned int submit_queues)
{
return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
}
static int nullb_apply_poll_queues(struct nullb_device *dev,
unsigned int poll_queues)
{
return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
NULLB_DEVICE_ATTR(index, uint, NULL);
NULLB_DEVICE_ATTR(blocking, bool, NULL);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
NULLB_DEVICE_ATTR(discard, bool, NULL);
NULLB_DEVICE_ATTR(mbps, uint, NULL);
NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
NULLB_DEVICE_ATTR(zoned, bool, NULL);
NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
}
static ssize_t nullb_device_power_store(struct config_item *item,
const char *page, size_t count)
{
struct nullb_device *dev = to_nullb_device(item);
bool newp = false;
ssize_t ret;
ret = nullb_device_bool_attr_store(&newp, page, count);
if (ret < 0)
return ret;
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
return count;
ret = null_add_dev(dev);
if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
return ret;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
} else if (dev->power && !newp) {
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
mutex_lock(&lock);
dev->power = newp;
null_del_dev(dev->nullb);
mutex_unlock(&lock);
}
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
return count;
}
CONFIGFS_ATTR(nullb_device_, power);
static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
{
struct nullb_device *t_dev = to_nullb_device(item);
return badblocks_show(&t_dev->badblocks, page, 0);
}
static ssize_t nullb_device_badblocks_store(struct config_item *item,
const char *page, size_t count)
{
struct nullb_device *t_dev = to_nullb_device(item);
char *orig, *buf, *tmp;
u64 start, end;
int ret;
orig = kstrndup(page, count, GFP_KERNEL);
if (!orig)
return -ENOMEM;
buf = strstrip(orig);
ret = -EINVAL;
if (buf[0] != '+' && buf[0] != '-')
goto out;
tmp = strchr(&buf[1], '-');
if (!tmp)
goto out;
*tmp = '\0';
ret = kstrtoull(buf + 1, 0, &start);
if (ret)
goto out;
ret = kstrtoull(tmp + 1, 0, &end);
if (ret)
goto out;
ret = -EINVAL;
if (start > end)
goto out;
/* enable badblocks */
cmpxchg(&t_dev->badblocks.shift, -1, 0);
if (buf[0] == '+')
ret = badblocks_set(&t_dev->badblocks, start,
end - start + 1, 1);
else
ret = badblocks_clear(&t_dev->badblocks, start,
end - start + 1);
if (ret == 0)
ret = count;
out:
kfree(orig);
return ret;
}
CONFIGFS_ATTR(nullb_device_, badblocks);
static ssize_t nullb_device_zone_readonly_store(struct config_item *item,
const char *page, size_t count)
{
struct nullb_device *dev = to_nullb_device(item);
return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY);
}
CONFIGFS_ATTR_WO(nullb_device_, zone_readonly);
static ssize_t nullb_device_zone_offline_store(struct config_item *item,
const char *page, size_t count)
{
struct nullb_device *dev = to_nullb_device(item);
return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE);
}
CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_size,
&nullb_device_attr_completion_nsec,
&nullb_device_attr_submit_queues,
&nullb_device_attr_poll_queues,
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
&nullb_device_attr_max_sectors,
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
&nullb_device_attr_blocking,
&nullb_device_attr_use_per_node_hctx,
&nullb_device_attr_power,
&nullb_device_attr_memory_backed,
&nullb_device_attr_discard,
&nullb_device_attr_mbps,
&nullb_device_attr_cache_size,
&nullb_device_attr_badblocks,
&nullb_device_attr_zoned,
&nullb_device_attr_zone_size,
&nullb_device_attr_zone_capacity,
&nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
&nullb_device_attr_zone_readonly,
&nullb_device_attr_zone_offline,
&nullb_device_attr_virt_boundary,
&nullb_device_attr_no_sched,
&nullb_device_attr_shared_tag_bitmap,
NULL,
};
static void nullb_device_release(struct config_item *item)
{
struct nullb_device *dev = to_nullb_device(item);
null_free_device_storage(dev, false);
null_free_dev(dev);
}
static struct configfs_item_operations nullb_device_ops = {
.release = nullb_device_release,
};
static const struct config_item_type nullb_device_type = {
.ct_item_ops = &nullb_device_ops,
.ct_attrs = nullb_device_attrs,
.ct_owner = THIS_MODULE,
};
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static void nullb_add_fault_config(struct nullb_device *dev)
{
fault_config_init(&dev->timeout_config, "timeout_inject");
fault_config_init(&dev->requeue_config, "requeue_inject");
fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject");
configfs_add_default_group(&dev->timeout_config.group, &dev->group);
configfs_add_default_group(&dev->requeue_config.group, &dev->group);
configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
}
#else
static void nullb_add_fault_config(struct nullb_device *dev)
{
}
#endif
static struct
config_group *nullb_group_make_group(struct config_group *group, const char *name)
{
struct nullb_device *dev;
if (null_find_dev_by_name(name))
return ERR_PTR(-EEXIST);
dev = null_alloc_dev();
if (!dev)
return ERR_PTR(-ENOMEM);
config_group_init_type_name(&dev->group, name, &nullb_device_type);
nullb_add_fault_config(dev);
return &dev->group;
}
static void
nullb_group_drop_item(struct config_group *group, struct config_item *item)
{
struct nullb_device *dev = to_nullb_device(item);
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
mutex_lock(&lock);
dev->power = false;
null_del_dev(dev->nullb);
mutex_unlock(&lock);
}
config_item_put(item);
}
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
"badblocks,blocking,blocksize,cache_size,"
"completion_nsec,discard,home_node,hw_queue_depth,"
"irqmode,max_sectors,mbps,memory_backed,no_sched,"
"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
"zone_capacity,zone_max_active,zone_max_open,"
"zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
static struct configfs_attribute *nullb_group_attrs[] = {
&memb_group_attr_features,
NULL,
};
static struct configfs_group_operations nullb_group_ops = {
.make_group = nullb_group_make_group,
.drop_item = nullb_group_drop_item,
};
static const struct config_item_type nullb_group_type = {
.ct_group_ops = &nullb_group_ops,
.ct_attrs = nullb_group_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem nullb_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "nullb",
.ci_type = &nullb_group_type,
},
},
};
static inline int null_cache_active(struct nullb *nullb)
{
return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
}
static struct nullb_device *null_alloc_dev(void)
{
struct nullb_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
dev->timeout_config.attr = null_timeout_attr;
dev->requeue_config.attr = null_requeue_attr;
dev->init_hctx_fault_config.attr = null_init_hctx_attr;
#endif
INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
if (badblocks_init(&dev->badblocks, 0)) {
kfree(dev);
return NULL;
}
dev->size = g_gb * 1024;
dev->completion_nsec = g_completion_nsec;
dev->submit_queues = g_submit_queues;
dev->prev_submit_queues = g_submit_queues;
dev->poll_queues = g_poll_queues;
dev->prev_poll_queues = g_poll_queues;
dev->home_node = g_home_node;
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
dev->max_sectors = g_max_sectors;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
dev->memory_backed = g_memory_backed;
dev->discard = g_discard;
dev->cache_size = g_cache_size;
dev->mbps = g_mbps;
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
dev->zone_capacity = g_zone_capacity;
dev->zone_nr_conv = g_zone_nr_conv;
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
static void null_free_dev(struct nullb_device *dev)
{
if (!dev)
return;
null_free_zoned_dev(dev);
badblocks_exit(&dev->badblocks);
kfree(dev);
}
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
clear_bit_unlock(tag, nq->tag_map);
if (waitqueue_active(&nq->wait))
wake_up(&nq->wait);
}
static unsigned int get_tag(struct nullb_queue *nq)
{
unsigned int tag;
do {
tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
if (tag >= nq->queue_depth)
return -1U;
} while (test_and_set_bit_lock(tag, nq->tag_map));
return tag;
}
static void free_cmd(struct nullb_cmd *cmd)
{
put_tag(cmd->nq, cmd->tag);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
unsigned int tag;
tag = get_tag(nq);
if (tag != -1U) {
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
return cmd;
}
return NULL;
}
static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
{
struct nullb_cmd *cmd;
DEFINE_WAIT(wait);
do {
/*
* This avoids multiple return statements, multiple calls to
* __alloc_cmd() and a fast path call to prepare_to_wait().
*/
cmd = __alloc_cmd(nq);
if (cmd) {
cmd->bio = bio;
return cmd;
}
prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
io_schedule();
finish_wait(&nq->wait, &wait);
} while (1);
}
static void end_cmd(struct nullb_cmd *cmd)
{
int queue_mode = cmd->nq->dev->queue_mode;
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, cmd->error);
return;
case NULL_Q_BIO:
cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
break;
}
free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
end_cmd(container_of(timer, struct nullb_cmd, timer));
return HRTIMER_NORESTART;
}
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
ktime_t kt = cmd->nq->dev->completion_nsec;
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_complete_rq(struct request *rq)
{
end_cmd(blk_mq_rq_to_pdu(rq));
}
static struct nullb_page *null_alloc_page(void)
{
struct nullb_page *t_page;
t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
if (!t_page)
return NULL;
t_page->page = alloc_pages(GFP_NOIO, 0);
if (!t_page->page) {
kfree(t_page);
return NULL;
}
memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
return t_page;
}
static void null_free_page(struct nullb_page *t_page)
{
__set_bit(NULLB_PAGE_FREE, t_page->bitmap);
if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
return;
__free_page(t_page->page);
kfree(t_page);
}
static bool null_page_empty(struct nullb_page *page)
{
int size = MAP_SZ - 2;
return find_first_bit(page->bitmap, size) == size;
}
static void null_free_sector(struct nullb *nullb, sector_t sector,
bool is_cache)
{
unsigned int sector_bit;
u64 idx;
struct nullb_page *t_page, *ret;
struct radix_tree_root *root;
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
idx = sector >> PAGE_SECTORS_SHIFT;
sector_bit = (sector & SECTOR_MASK);
t_page = radix_tree_lookup(root, idx);
if (t_page) {
__clear_bit(sector_bit, t_page->bitmap);
if (null_page_empty(t_page)) {
ret = radix_tree_delete_item(root, idx, t_page);
WARN_ON(ret != t_page);
null_free_page(ret);
if (is_cache)
nullb->dev->curr_cache -= PAGE_SIZE;
}
}
}
static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
struct nullb_page *t_page, bool is_cache)
{
struct radix_tree_root *root;
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
WARN_ON(!t_page || t_page->page->index != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
return t_page;
}
static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
{
unsigned long pos = 0;
int nr_pages;
struct nullb_page *ret, *t_pages[FREE_BATCH];
struct radix_tree_root *root;
root = is_cache ? &dev->cache : &dev->data;
do {
int i;
nr_pages = radix_tree_gang_lookup(root,
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
pos = t_pages[i]->page->index;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
}
pos++;
} while (nr_pages == FREE_BATCH);
if (is_cache)
dev->curr_cache = 0;
}
static struct nullb_page *__null_lookup_page(struct nullb *nullb,
sector_t sector, bool for_write, bool is_cache)
{
unsigned int sector_bit;
u64 idx;
struct nullb_page *t_page;
struct radix_tree_root *root;
idx = sector >> PAGE_SECTORS_SHIFT;
sector_bit = (sector & SECTOR_MASK);
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
WARN_ON(t_page && t_page->page->index != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
return NULL;
}
static struct nullb_page *null_lookup_page(struct nullb *nullb,
sector_t sector, bool for_write, bool ignore_cache)
{
struct nullb_page *page = NULL;
if (!ignore_cache)
page = __null_lookup_page(nullb, sector, for_write, true);
if (page)
return page;
return __null_lookup_page(nullb, sector, for_write, false);
}
static struct nullb_page *null_insert_page(struct nullb *nullb,
sector_t sector, bool ignore_cache)
__releases(&nullb->lock)
__acquires(&nullb->lock)
{
u64 idx;
struct nullb_page *t_page;
t_page = null_lookup_page(nullb, sector, true, ignore_cache);
if (t_page)
return t_page;
spin_unlock_irq(&nullb->lock);
t_page = null_alloc_page();
if (!t_page)
goto out_lock;
if (radix_tree_preload(GFP_NOIO))
goto out_freepage;
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
t_page->page->index = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
return t_page;
out_freepage:
null_free_page(t_page);
out_lock:
spin_lock_irq(&nullb->lock);
return null_lookup_page(nullb, sector, true, ignore_cache);
}
static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
{
int i;
unsigned int offset;
u64 idx;
struct nullb_page *t_page, *ret;
void *dst, *src;
idx = c_page->page->index;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
__clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
null_free_page(c_page);
if (t_page && null_page_empty(t_page)) {
ret = radix_tree_delete_item(&nullb->dev->data,
idx, t_page);
null_free_page(t_page);
}
return 0;
}
if (!t_page)
return -ENOMEM;
src = kmap_local_page(c_page->page);
dst = kmap_local_page(t_page->page);
for (i = 0; i < PAGE_SECTORS;
i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
if (test_bit(i, c_page->bitmap)) {
offset = (i << SECTOR_SHIFT);
memcpy(dst + offset, src + offset,
nullb->dev->blocksize);
__set_bit(i, t_page->bitmap);
}
}
kunmap_local(dst);
kunmap_local(src);
ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
null_free_page(ret);
nullb->dev->curr_cache -= PAGE_SIZE;
return 0;
}
static int null_make_cache_space(struct nullb *nullb, unsigned long n)
{
int i, err, nr_pages;
struct nullb_page *c_pages[FREE_BATCH];
unsigned long flushed = 0, one_round;
again:
if ((nullb->dev->cache_size * 1024 * 1024) >
nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
return 0;
nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
(void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
/*
* nullb_flush_cache_page could unlock before using the c_pages. To
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
nullb->cache_flush_pos = c_pages[i]->page->index;
/*
* We found the page which is being flushed to disk by other
* threads
*/
if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
c_pages[i] = NULL;
else
__set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
}
one_round = 0;
for (i = 0; i < nr_pages; i++) {
if (c_pages[i] == NULL)
continue;
err = null_flush_cache_page(nullb, c_pages[i]);
if (err)
return err;
one_round++;
}
flushed += one_round << PAGE_SHIFT;
if (n > flushed) {
if (nr_pages == 0)
nullb->cache_flush_pos = 0;
if (one_round == 0) {
/* give other threads a chance */
spin_unlock_irq(&nullb->lock);
spin_lock_irq(&nullb->lock);
}
goto again;
}
return 0;
}
static int copy_to_nullb(struct nullb *nullb, struct page *source,
unsigned int off, sector_t sector, size_t n, bool is_fua)
{
size_t temp, count = 0;
unsigned int offset;
struct nullb_page *t_page;
while (count < n) {
temp = min_t(size_t, nullb->dev->blocksize, n - count);
if (null_cache_active(nullb) && !is_fua)
null_make_cache_space(nullb, PAGE_SIZE);
offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_insert_page(nullb, sector,
!null_cache_active(nullb) || is_fua);
if (!t_page)
return -ENOSPC;
memcpy_page(t_page->page, offset, source, off + count, temp);
__set_bit(sector & SECTOR_MASK, t_page->bitmap);
if (is_fua)
null_free_sector(nullb, sector, true);
count += temp;
sector += temp >> SECTOR_SHIFT;
}
return 0;
}
static int copy_from_nullb(struct nullb *nullb, struct page *dest,
unsigned int off, sector_t sector, size_t n)
{
size_t temp, count = 0;
unsigned int offset;
struct nullb_page *t_page;
while (count < n) {
temp = min_t(size_t, nullb->dev->blocksize, n - count);
offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_lookup_page(nullb, sector, false,
!null_cache_active(nullb));
if (t_page)
memcpy_page(dest, off + count, t_page->page, offset,
temp);
else
zero_user(dest, off + count, temp);
count += temp;
sector += temp >> SECTOR_SHIFT;
}
return 0;
}
static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off)
{
memset_page(page, off, 0xff, len);
}
blk_status_t null_handle_discard(struct nullb_device *dev,
sector_t sector, sector_t nr_sectors)
{
struct nullb *nullb = dev->nullb;
size_t n = nr_sectors << SECTOR_SHIFT;
size_t temp;
spin_lock_irq(&nullb->lock);
while (n > 0) {
temp = min_t(size_t, n, dev->blocksize);
null_free_sector(nullb, sector, false);
if (null_cache_active(nullb))
null_free_sector(nullb, sector, true);
sector += temp >> SECTOR_SHIFT;
n -= temp;
}
spin_unlock_irq(&nullb->lock);
return BLK_STS_OK;
}
static int null_handle_flush(struct nullb *nullb)
{
int err;
if (!null_cache_active(nullb))
return 0;
spin_lock_irq(&nullb->lock);
while (true) {
err = null_make_cache_space(nullb,
nullb->dev->cache_size * 1024 * 1024);
if (err || nullb->dev->curr_cache == 0)
break;
}
WARN_ON(!radix_tree_empty(&nullb->dev->cache));
spin_unlock_irq(&nullb->lock);
return err;
}
static int null_transfer(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off, bool is_write, sector_t sector,
bool is_fua)
{
struct nullb_device *dev = nullb->dev;
unsigned int valid_len = len;
int err = 0;
if (!is_write) {
if (dev->zoned)
valid_len = null_zone_valid_read_len(nullb,
sector, len);
if (valid_len) {
err = copy_from_nullb(nullb, page, off,
sector, valid_len);
off += valid_len;
len -= valid_len;
}
if (len)
nullb_fill_pattern(nullb, page, len, off);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
}
return err;
}
static int null_handle_rq(struct nullb_cmd *cmd)
{
struct request *rq = cmd->rq;
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
struct bio_vec bvec;
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
rq->cmd_flags & REQ_FUA);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
}
sector += len >> SECTOR_SHIFT;
}
spin_unlock_irq(&nullb->lock);
return 0;
}
static int null_handle_bio(struct nullb_cmd *cmd)
{
struct bio *bio = cmd->bio;
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
struct bvec_iter iter;
spin_lock_irq(&nullb->lock);
bio_for_each_segment(bvec, bio, iter) {
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(bio_op(bio)), sector,
bio->bi_opf & REQ_FUA);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
}
sector += len >> SECTOR_SHIFT;
}
spin_unlock_irq(&nullb->lock);
return 0;
}
static void null_stop_queue(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_stop_hw_queues(q);
}
static void null_restart_queue_async(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_start_stopped_hw_queues(q, true);
}
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts = BLK_STS_OK;
struct request *rq = cmd->rq;
if (!hrtimer_active(&nullb->bw_timer))
hrtimer_restart(&nullb->bw_timer);
if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
null_stop_queue(nullb);
/* race with timer */
if (atomic_long_read(&nullb->cur_bytes) > 0)
null_restart_queue_async(nullb);
/* requeue request */
sts = BLK_STS_DEV_RESOURCE;
}
return sts;
}
static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
sector_t sector,
sector_t nr_sectors)
{
struct badblocks *bb = &cmd->nq->dev->badblocks;
sector_t first_bad;
int bad_sectors;
if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
return BLK_STS_IOERR;
return BLK_STS_OK;
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
enum req_op op,
sector_t sector,
sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
int err;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
if (dev->queue_mode == NULL_Q_BIO)
err = null_handle_bio(cmd);
else
err = null_handle_rq(cmd);
return errno_to_blk_status(err);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct bio *bio;
if (dev->memory_backed)
return;
if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
zero_fill_bio(cmd->bio);
} else if (req_op(cmd->rq) == REQ_OP_READ) {
__rq_for_each_bio(bio, cmd->rq)
zero_fill_bio(bio);
}
}
static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
{
/*
* Since root privileges are required to configure the null_blk
* driver, it is fine that this driver does not initialize the
* data buffers of read commands. Zero-initialize these buffers
* anyway if KMSAN is enabled to prevent that KMSAN complains
* about null_blk not initializing read data buffers.
*/
if (IS_ENABLED(CONFIG_KMSAN))
nullb_zero_read_cmd_buffer(cmd);
/* Complete IO by inline, softirq or timer */
switch (cmd->nq->dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
* XXX: no proper submitting cpu information available.
*/
end_cmd(cmd);
break;
}
break;
case NULL_IRQ_NONE:
end_cmd(cmd);
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
break;
}
}
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
blk_status_t ret;
if (dev->badblocks.shift != -1) {
ret = null_handle_badblocks(cmd, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
}
if (dev->memory_backed)
return null_handle_memory_backed(cmd, op, sector, nr_sectors);
return BLK_STS_OK;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts;
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out;
}
if (dev->zoned)
sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
else
sts = null_process_cmd(cmd, op, sector, nr_sectors);
/* Do not overwrite errors (e.g. timeout errors) */
if (cmd->error == BLK_STS_OK)
cmd->error = sts;
out:
nullb_complete_cmd(cmd);
return BLK_STS_OK;
}
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
{
struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
unsigned int mbps = nullb->dev->mbps;
if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
return HRTIMER_NORESTART;
atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
null_restart_queue_async(nullb);
hrtimer_forward_now(&nullb->bw_timer, timer_interval);
return HRTIMER_RESTART;
}
static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
nullb->bw_timer.function = nullb_bwtimer_fn;
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
{
int index = 0;
if (nullb->nr_queues != 1)
index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
return &nullb->queues[index];
}
static void null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
}
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static bool should_timeout_request(struct request *rq)
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct nullb_device *dev = cmd->nq->dev;
return should_fail(&dev->timeout_config.attr, 1);
}
static bool should_requeue_request(struct request *rq)
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct nullb_device *dev = cmd->nq->dev;
return should_fail(&dev->requeue_config.attr, 1);
}
static bool should_init_hctx_fail(struct nullb_device *dev)
{
return should_fail(&dev->init_hctx_fault_config.attr, 1);
}
#else
static bool should_timeout_request(struct request *rq)
{
return false;
}
static bool should_requeue_request(struct request *rq)
{
return false;
}
static bool should_init_hctx_fail(struct nullb_device *dev)
{
return false;
}
#endif
static void null_map_queues(struct blk_mq_tag_set *set)
{
struct nullb *nullb = set->driver_data;
int i, qoff;
unsigned int submit_queues = g_submit_queues;
unsigned int poll_queues = g_poll_queues;
if (nullb) {
struct nullb_device *dev = nullb->dev;
/*
* Refer nr_hw_queues of the tag set to check if the expected
* number of hardware queues are prepared. If block layer failed
* to prepare them, use previous numbers of submit queues and
* poll queues to map queues.
*/
if (set->nr_hw_queues ==
dev->submit_queues + dev->poll_queues) {
submit_queues = dev->submit_queues;
poll_queues = dev->poll_queues;
} else if (set->nr_hw_queues ==
dev->prev_submit_queues + dev->prev_poll_queues) {
submit_queues = dev->prev_submit_queues;
poll_queues = dev->prev_poll_queues;
} else {
pr_warn("tag set has unexpected nr_hw_queues: %d\n",
set->nr_hw_queues);
WARN_ON_ONCE(true);
submit_queues = 1;
poll_queues = 0;
}
}
for (i = 0, qoff = 0; i < set->nr_maps; i++) {
struct blk_mq_queue_map *map = &set->map[i];
switch (i) {
case HCTX_TYPE_DEFAULT:
map->nr_queues = submit_queues;
break;
case HCTX_TYPE_READ:
map->nr_queues = 0;
continue;
case HCTX_TYPE_POLL:
map->nr_queues = poll_queues;
break;
}
map->queue_offset = qoff;
qoff += map->nr_queues;
blk_mq_map_queues(map);
}
}
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
struct request *rq;
spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
list_for_each_entry(rq, &list, queuelist)
blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) {
struct nullb_cmd *cmd;
struct request *req;
req = list_first_entry(&list, struct request, queuelist);
list_del_init(&req->queuelist);
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
blk_mq_end_request_batch))
end_cmd(cmd);
nr++;
}
return nr;
}
static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock);
/* The request may have completed meanwhile. */
if (blk_mq_request_completed(rq)) {
spin_unlock(&nq->poll_lock);
return BLK_EH_DONE;
}
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}
pr_info("rq %p timed out\n", rq);
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
* and cause real timeouts. For these real timeouts, the submission
* path will complete the request using blk_mq_complete_request().
* Only fake timeouts need to execute blk_mq_complete_request() here.
*/
cmd->error = BLK_STS_TIMEOUT;
if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct nullb_queue *nq = hctx->driver_data;
sector_t nr_sectors = blk_rq_sectors(rq);
sector_t sector = blk_rq_pos(rq);
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
cmd->fake_timeout = should_timeout_request(rq) ||
blk_should_fake_timeout(rq->q);
blk_mq_start_request(rq);
if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
* driver driven requeue path
*/
nq->requeue_selection++;
if (nq->requeue_selection & 1)
return BLK_STS_RESOURCE;
blk_mq_requeue_request(rq, true);
return BLK_STS_OK;
}
if (is_poll) {
spin_lock(&nq->poll_lock);
list_add_tail(&rq->queuelist, &nq->poll_list);
spin_unlock(&nq->poll_lock);
return BLK_STS_OK;
}
if (cmd->fake_timeout)
return BLK_STS_OK;
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
}
static void cleanup_queue(struct nullb_queue *nq)
{
bitmap_free(nq->tag_map);
kfree(nq->cmds);
}
static void cleanup_queues(struct nullb *nullb)
{
int i;
for (i = 0; i < nullb->nr_queues; i++)
cleanup_queue(&nullb->queues[i]);
kfree(nullb->queues);
}
static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
struct nullb_queue *nq = hctx->driver_data;
struct nullb *nullb = nq->dev->nullb;
nullb->nr_queues--;
}
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
INIT_LIST_HEAD(&nq->poll_list);
spin_lock_init(&nq->poll_lock);
}
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
struct nullb *nullb = hctx->queue->queuedata;
struct nullb_queue *nq;
if (should_init_hctx_fail(nullb->dev))
return -EFAULT;
nq = &nullb->queues[hctx_idx];
hctx->driver_data = nq;
null_init_queue(nullb, nq);
nullb->nr_queues++;
return 0;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
.poll = null_poll,
.map_queues = null_map_queues,
.init_hctx = null_init_hctx,
.exit_hctx = null_exit_hctx,
};
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev;
if (!nullb)
return;
dev = nullb->dev;
ida_simple_remove(&nullb_indexes, nullb->index);
list_del_init(&nullb->list);
del_gendisk(nullb->disk);
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
atomic_long_set(&nullb->cur_bytes, LONG_MAX);
null_restart_queue_async(nullb);
}
put_disk(nullb->disk);
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
kfree(nullb);
dev->nullb = NULL;
}
static void null_config_discard(struct nullb *nullb)
{
if (nullb->dev->discard == false)
return;
if (!nullb->dev->memory_backed) {
nullb->dev->discard = false;
pr_info("discard option is ignored without memory backing\n");
return;
}
if (nullb->dev->zoned) {
nullb->dev->discard = false;
pr_info("discard option is ignored in zoned mode\n");
return;
}
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
}
static const struct block_device_operations null_bio_ops = {
.owner = THIS_MODULE,
.submit_bio = null_submit_bio,
.report_zones = null_report_zones,
};
static const struct block_device_operations null_rq_ops = {
.owner = THIS_MODULE,
.report_zones = null_report_zones,
};
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
int i;
nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
return -ENOMEM;
nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
return -ENOMEM;
}
for (i = 0; i < nq->queue_depth; i++) {
cmd = &nq->cmds[i];
cmd->tag = -1U;
}
return 0;
}
static int setup_queues(struct nullb *nullb)
{
int nqueues = nr_cpu_ids;
if (g_poll_queues)
nqueues += g_poll_queues;
nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
static int init_driver_queues(struct nullb *nullb)
{
struct nullb_queue *nq;
int i, ret = 0;
for (i = 0; i < nullb->dev->submit_queues; i++) {
nq = &nullb->queues[i];
null_init_queue(nullb, nq);
ret = setup_commands(nq);
if (ret)
return ret;
nullb->nr_queues++;
}
return 0;
}
static int null_gendisk_register(struct nullb *nullb)
{
sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
struct gendisk *disk = nullb->disk;
set_capacity(disk, size);
disk->major = null_major;
disk->first_minor = nullb->index;
disk->minors = 1;
if (queue_is_mq(nullb->q))
disk->fops = &null_rq_ops;
else
disk->fops = &null_bio_ops;
disk->private_data = nullb;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
if (nullb->dev->zoned) {
int ret = null_register_zoned_dev(nullb);
if (ret)
return ret;
}
return add_disk(disk);
}
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
int hw_queues, numa_node;
unsigned int queue_depth;
int poll_queues;
if (nullb) {
hw_queues = nullb->dev->submit_queues;
poll_queues = nullb->dev->poll_queues;
queue_depth = nullb->dev->hw_queue_depth;
numa_node = nullb->dev->home_node;
if (nullb->dev->no_sched)
flags |= BLK_MQ_F_NO_SCHED;
if (nullb->dev->shared_tag_bitmap)
flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
flags |= BLK_MQ_F_BLOCKING;
} else {
hw_queues = g_submit_queues;
poll_queues = g_poll_queues;
queue_depth = g_hw_queue_depth;
numa_node = g_home_node;
if (g_no_sched)
flags |= BLK_MQ_F_NO_SCHED;
if (g_shared_tag_bitmap)
flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
flags |= BLK_MQ_F_BLOCKING;
}
set->ops = &null_mq_ops;
set->cmd_size = sizeof(struct nullb_cmd);
set->flags = flags;
set->driver_data = nullb;
set->nr_hw_queues = hw_queues;
set->queue_depth = queue_depth;
set->numa_node = numa_node;
if (poll_queues) {
set->nr_hw_queues += poll_queues;
set->nr_maps = 3;
} else {
set->nr_maps = 1;
}
return blk_mq_alloc_tag_set(set);
}
static int null_validate_conf(struct nullb_device *dev)
{
if (dev->queue_mode == NULL_Q_RQ) {
pr_err("legacy IO path is no longer available\n");
return -EINVAL;
}
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
dev->submit_queues = nr_online_nodes;
} else if (dev->submit_queues > nr_cpu_ids)
dev->submit_queues = nr_cpu_ids;
else if (dev->submit_queues == 0)
dev->submit_queues = 1;
dev->prev_submit_queues = dev->submit_queues;
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
/* Do memory allocation, so set blocking */
if (dev->memory_backed)
dev->blocking = true;
else /* cache is meaningless */
dev->cache_size = 0;
dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
dev->cache_size);
dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
/* can not stop a queue */
if (dev->queue_mode == NULL_Q_BIO)
dev->mbps = 0;
if (dev->zoned &&
(!dev->zone_size || !is_power_of_2(dev->zone_size))) {
pr_err("zone_size must be power-of-two\n");
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static bool __null_setup_fault(struct fault_attr *attr, char *str)
{
if (!str[0])
return true;
if (!setup_fault_attr(attr, str))
return false;
attr->verbose = 0;
return true;
}
#endif
static bool null_setup_fault(void)
{
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
return false;
if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
return false;
if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
return false;
#endif
return true;
}
static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
int rv;
rv = null_validate_conf(dev);
if (rv)
return rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
if (!nullb) {
rv = -ENOMEM;
goto out;
}
nullb->dev = dev;
dev->nullb = nullb;
spin_lock_init(&nullb->lock);
rv = setup_queues(nullb);
if (rv)
goto out_free_nullb;
if (dev->queue_mode == NULL_Q_MQ) {
if (shared_tags) {
nullb->tag_set = &tag_set;
rv = 0;
} else {
nullb->tag_set = &nullb->__tag_set;
rv = null_init_tag_set(nullb, nullb->tag_set);
}
if (rv)
goto out_cleanup_queues;
nullb->tag_set->timeout = 5 * HZ;
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
goto out_cleanup_tags;
}
nullb->q = nullb->disk->queue;
} else if (dev->queue_mode == NULL_Q_BIO) {
rv = -ENOMEM;
nullb->disk = blk_alloc_disk(nullb->dev->home_node);
if (!nullb->disk)
goto out_cleanup_queues;
nullb->q = nullb->disk->queue;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_disk;
}
if (dev->mbps) {
set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
nullb_setup_bwtimer(nullb);
}
if (dev->cache_size > 0) {
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
blk_queue_write_cache(nullb->q, true, true);
}
if (dev->zoned) {
rv = null_init_zoned_dev(dev, nullb->q);
if (rv)
goto out_cleanup_disk;
}
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
mutex_lock(&lock);
rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
if (rv < 0) {
mutex_unlock(&lock);
goto out_cleanup_zone;
}
nullb->index = rv;
dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
blk_queue_physical_block_size(nullb->q, dev->blocksize);
if (!dev->max_sectors)
dev->max_sectors = queue_max_hw_sectors(nullb->q);
dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
if (dev->virt_boundary)
blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
null_config_discard(nullb);
if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
snprintf(nullb->disk_name, sizeof(nullb->disk_name),
"%s", config_item_name(&dev->group.cg_item));
} else {
sprintf(nullb->disk_name, "nullb%d", nullb->index);
}
rv = null_gendisk_register(nullb);
if (rv)
goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
pr_info("disk %s created\n", nullb->disk_name);
return 0;
out_ida_free:
ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
put_disk(nullb->disk);
out_cleanup_tags:
if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
cleanup_queues(nullb);
out_free_nullb:
kfree(nullb);
dev->nullb = NULL;
out:
return rv;
}
static struct nullb *null_find_dev_by_name(const char *name)
{
struct nullb *nullb = NULL, *nb;
mutex_lock(&lock);
list_for_each_entry(nb, &nullb_list, list) {
if (strcmp(nb->disk_name, name) == 0) {
nullb = nb;
break;
}
}
mutex_unlock(&lock);
return nullb;
}
static int null_create_dev(void)
{
struct nullb_device *dev;
int ret;
dev = null_alloc_dev();
if (!dev)
return -ENOMEM;
ret = null_add_dev(dev);
if (ret) {
null_free_dev(dev);
return ret;
}
return 0;
}
static void null_destroy_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
null_del_dev(nullb);
null_free_device_storage(dev, false);
null_free_dev(dev);
}
static int __init null_init(void)
{
int ret = 0;
unsigned int i;
struct nullb *nullb;
if (g_bs > PAGE_SIZE) {
pr_warn("invalid block size\n");
pr_warn("defaults block size to %lu\n", PAGE_SIZE);
g_bs = PAGE_SIZE;
}
if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
pr_warn("invalid max sectors\n");
pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
g_max_sectors = BLK_DEF_MAX_SECTORS;
}
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
pr_err("invalid home_node value\n");
g_home_node = NUMA_NO_NODE;
}
if (!null_setup_fault())
return -EINVAL;
if (g_queue_mode == NULL_Q_RQ) {
pr_err("legacy IO path is no longer available\n");
return -EINVAL;
}
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("submit_queues param is set to %u.\n",
nr_online_nodes);
g_submit_queues = nr_online_nodes;
}
} else if (g_submit_queues > nr_cpu_ids) {
g_submit_queues = nr_cpu_ids;
} else if (g_submit_queues <= 0) {
g_submit_queues = 1;
}
if (g_queue_mode == NULL_Q_MQ && shared_tags) {
ret = null_init_tag_set(NULL, &tag_set);
if (ret)
return ret;
}
config_group_init(&nullb_subsys.su_group);
mutex_init(&nullb_subsys.su_mutex);
ret = configfs_register_subsystem(&nullb_subsys);
if (ret)
goto err_tagset;
mutex_init(&lock);
null_major = register_blkdev(0, "nullb");
if (null_major < 0) {
ret = null_major;
goto err_conf;
}
for (i = 0; i < nr_devices; i++) {
ret = null_create_dev();
if (ret)
goto err_dev;
}
pr_info("module loaded\n");
return 0;
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
null_destroy_dev(nullb);
}
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
err_tagset:
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
return ret;
}
static void __exit null_exit(void)
{
struct nullb *nullb;
configfs_unregister_subsystem(&nullb_subsys);
unregister_blkdev(null_major, "nullb");
mutex_lock(&lock);
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
null_destroy_dev(nullb);
}
mutex_unlock(&lock);
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
}
module_init(null_init);
module_exit(null_exit);
MODULE_AUTHOR("Jens Axboe <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/block/null_blk/main.c |
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoemain.c
* Module initialization routines, discover timer
*/
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include "aoe.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sam Hopkins <[email protected]>");
MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels");
MODULE_VERSION(VERSION);
static struct timer_list timer;
struct workqueue_struct *aoe_wq;
static void discover_timer(struct timer_list *t)
{
mod_timer(t, jiffies + HZ * 60); /* one minute */
aoecmd_cfg(0xffff, 0xff);
}
static void __exit
aoe_exit(void)
{
del_timer_sync(&timer);
aoenet_exit();
unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
aoecmd_exit();
aoechr_exit();
aoedev_exit();
aoeblk_exit(); /* free cache after de-allocating bufs */
destroy_workqueue(aoe_wq);
}
static int __init
aoe_init(void)
{
int ret;
aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
if (!aoe_wq)
return -ENOMEM;
ret = aoedev_init();
if (ret)
goto dev_fail;
ret = aoechr_init();
if (ret)
goto chr_fail;
ret = aoeblk_init();
if (ret)
goto blk_fail;
ret = aoenet_init();
if (ret)
goto net_fail;
ret = aoecmd_init();
if (ret)
goto cmd_fail;
ret = register_blkdev(AOE_MAJOR, DEVICE_NAME);
if (ret < 0) {
printk(KERN_ERR "aoe: can't register major\n");
goto blkreg_fail;
}
printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION);
timer_setup(&timer, discover_timer, 0);
discover_timer(&timer);
return 0;
blkreg_fail:
aoecmd_exit();
cmd_fail:
aoenet_exit();
net_fail:
aoeblk_exit();
blk_fail:
aoechr_exit();
chr_fail:
aoedev_exit();
dev_fail:
destroy_workqueue(aoe_wq);
printk(KERN_INFO "aoe: initialisation failure.\n");
return ret;
}
module_init(aoe_init);
module_exit(aoe_exit);
| linux-master | drivers/block/aoe/aoemain.c |
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoedev.c
* AoE device utility functions; maintains device list.
*/
#include <linux/hdreg.h>
#include <linux/blk-mq.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/kdev_t.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include "aoe.h"
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
static int aoe_dyndevs = 1;
module_param(aoe_dyndevs, int, 0644);
MODULE_PARM_DESC(aoe_dyndevs, "Use dynamic minor numbers for devices.");
static struct aoedev *devlist;
static DEFINE_SPINLOCK(devlist_lock);
/* Because some systems will have one, many, or no
* - partitions,
* - slots per shelf,
* - or shelves,
* we need some flexibility in the way the minor numbers
* are allocated. So they are dynamic.
*/
#define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS)
static DEFINE_SPINLOCK(used_minors_lock);
static DECLARE_BITMAP(used_minors, N_DEVS);
static int
minor_get_dyn(ulong *sysminor)
{
ulong flags;
ulong n;
int error = 0;
spin_lock_irqsave(&used_minors_lock, flags);
n = find_first_zero_bit(used_minors, N_DEVS);
if (n < N_DEVS)
set_bit(n, used_minors);
else
error = -1;
spin_unlock_irqrestore(&used_minors_lock, flags);
*sysminor = n * AOE_PARTITIONS;
return error;
}
static int
minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
{
ulong flags;
ulong n;
int error = 0;
enum {
/* for backwards compatibility when !aoe_dyndevs,
* a static number of supported slots per shelf */
NPERSHELF = 16,
};
if (aoemin >= NPERSHELF) {
pr_err("aoe: %s %d slots per shelf\n",
"static minor device numbers support only",
NPERSHELF);
error = -1;
goto out;
}
n = aoemaj * NPERSHELF + aoemin;
if (n >= N_DEVS) {
pr_err("aoe: %s with e%ld.%d\n",
"cannot use static minor device numbers",
aoemaj, aoemin);
error = -1;
goto out;
}
spin_lock_irqsave(&used_minors_lock, flags);
if (test_bit(n, used_minors)) {
pr_err("aoe: %s %lu\n",
"existing device already has static minor number",
n);
error = -1;
} else
set_bit(n, used_minors);
spin_unlock_irqrestore(&used_minors_lock, flags);
*sysminor = n * AOE_PARTITIONS;
out:
return error;
}
static int
minor_get(ulong *sysminor, ulong aoemaj, int aoemin)
{
if (aoe_dyndevs)
return minor_get_dyn(sysminor);
else
return minor_get_static(sysminor, aoemaj, aoemin);
}
static void
minor_free(ulong minor)
{
ulong flags;
minor /= AOE_PARTITIONS;
BUG_ON(minor >= N_DEVS);
spin_lock_irqsave(&used_minors_lock, flags);
BUG_ON(!test_bit(minor, used_minors));
clear_bit(minor, used_minors);
spin_unlock_irqrestore(&used_minors_lock, flags);
}
/*
* Users who grab a pointer to the device with aoedev_by_aoeaddr
* automatically get a reference count and must be responsible
* for performing a aoedev_put. With the addition of async
* kthread processing I'm no longer confident that we can
* guarantee consistency in the face of device flushes.
*
* For the time being, we only bother to add extra references for
* frames sitting on the iocq. When the kthreads finish processing
* these frames, they will aoedev_put the device.
*/
void
aoedev_put(struct aoedev *d)
{
ulong flags;
spin_lock_irqsave(&devlist_lock, flags);
d->ref--;
spin_unlock_irqrestore(&devlist_lock, flags);
}
static void
dummy_timer(struct timer_list *t)
{
struct aoedev *d;
d = from_timer(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
}
static void
aoe_failip(struct aoedev *d)
{
struct request *rq;
struct aoe_req *req;
struct bio *bio;
aoe_failbuf(d, d->ip.buf);
rq = d->ip.rq;
if (rq == NULL)
return;
req = blk_mq_rq_to_pdu(rq);
while ((bio = d->ip.nxbio)) {
bio->bi_status = BLK_STS_IOERR;
d->ip.nxbio = bio->bi_next;
req->nr_bios--;
}
if (!req->nr_bios)
aoe_end_request(d, rq, 0);
}
static void
downdev_frame(struct list_head *pos)
{
struct frame *f;
f = list_entry(pos, struct frame, head);
list_del(pos);
if (f->buf) {
f->buf->nframesout--;
aoe_failbuf(f->t->d, f->buf);
}
aoe_freetframe(f);
}
void
aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
int i;
d->flags &= ~DEVFL_UP;
/* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
head = &d->factive[i];
list_for_each_safe(pos, nx, head)
downdev_frame(pos);
}
head = &d->rexmitq;
list_for_each_safe(pos, nx, head)
downdev_frame(pos);
/* reset window dressings */
tt = d->targets;
te = tt + d->ntargets;
for (; tt < te && (t = *tt); tt++) {
aoecmd_wreset(t);
t->nout = 0;
}
/* clean out the in-process request (if any) */
aoe_failip(d);
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
blk_mq_freeze_queue(d->blkq);
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
blk_mq_unfreeze_queue(d->blkq);
}
if (d->gd)
set_capacity(d->gd, 0);
}
/* return whether the user asked for this particular
* device to be flushed
*/
static int
user_req(char *s, size_t slen, struct aoedev *d)
{
const char *p;
size_t lim;
if (!d->gd)
return 0;
p = kbasename(d->gd->disk_name);
lim = sizeof(d->gd->disk_name);
lim -= p - d->gd->disk_name;
if (slen < lim)
lim = slen;
return !strncmp(s, p, lim);
}
static void
freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
int freeing = 0;
unsigned long flags;
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_TKILL
&& !(d->flags & DEVFL_FREEING)) {
d->flags |= DEVFL_FREEING;
freeing = 1;
}
spin_unlock_irqrestore(&d->lock, flags);
if (!freeing)
return;
del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
}
t = d->targets;
e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
spin_lock_irqsave(&d->lock, flags);
d->flags |= DEVFL_FREED;
spin_unlock_irqrestore(&d->lock, flags);
}
enum flush_parms {
NOT_EXITING = 0,
EXITING = 1,
};
static int
flush(const char __user *str, size_t cnt, int exiting)
{
ulong flags;
struct aoedev *d, **dd;
char buf[16];
int all = 0;
int specified = 0; /* flush a specific device */
unsigned int skipflags;
skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;
if (!exiting && cnt >= 3) {
if (cnt > sizeof buf)
cnt = sizeof buf;
if (copy_from_user(buf, str, cnt))
return -EFAULT;
all = !strncmp(buf, "all", 3);
if (!all)
specified = 1;
}
flush_workqueue(aoe_wq);
/* pass one: do aoedev_downdev, which might sleep */
restart1:
spin_lock_irqsave(&devlist_lock, flags);
for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
if (d->flags & DEVFL_TKILL)
goto cont;
if (exiting) {
/* unconditionally take each device down */
} else if (specified) {
if (!user_req(buf, cnt, d))
goto cont;
} else if ((!all && (d->flags & DEVFL_UP))
|| d->flags & skipflags
|| d->nopen
|| d->ref)
goto cont;
spin_unlock(&d->lock);
spin_unlock_irqrestore(&devlist_lock, flags);
aoedev_downdev(d);
d->flags |= DEVFL_TKILL;
goto restart1;
cont:
spin_unlock(&d->lock);
}
spin_unlock_irqrestore(&devlist_lock, flags);
/* pass two: call freedev, which might sleep,
* for aoedevs marked with DEVFL_TKILL
*/
restart2:
spin_lock_irqsave(&devlist_lock, flags);
for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
if (d->flags & DEVFL_TKILL
&& !(d->flags & DEVFL_FREEING)) {
spin_unlock(&d->lock);
spin_unlock_irqrestore(&devlist_lock, flags);
freedev(d);
goto restart2;
}
spin_unlock(&d->lock);
}
/* pass three: remove aoedevs marked with DEVFL_FREED */
for (dd = &devlist, d = *dd; d; d = *dd) {
struct aoedev *doomed = NULL;
spin_lock(&d->lock);
if (d->flags & DEVFL_FREED) {
*dd = d->next;
doomed = d;
} else {
dd = &d->next;
}
spin_unlock(&d->lock);
if (doomed)
kfree(doomed->targets);
kfree(doomed);
}
spin_unlock_irqrestore(&devlist_lock, flags);
return 0;
}
int
aoedev_flush(const char __user *str, size_t cnt)
{
return flush(str, cnt, NOT_EXITING);
}
/* This has been confirmed to occur once with Tms=3*1000 due to the
* driver changing link and not processing its transmit ring. The
* problem is hard enough to solve by returning an error that I'm
* still punting on "solving" this.
*/
static void
skbfree(struct sk_buff *skb)
{
enum { Sms = 250, Tms = 30 * 1000};
int i = Tms / Sms;
if (skb == NULL)
return;
while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
msleep(Sms);
if (i < 0) {
printk(KERN_ERR
"aoe: %s holds ref: %s\n",
skb->dev ? skb->dev->name : "netif",
"cannot free skb -- memory leaked.");
return;
}
skb->truesize -= skb->data_len;
skb_shinfo(skb)->nr_frags = skb->data_len = 0;
skb_trim(skb, 0);
dev_kfree_skb(skb);
}
static void
skbpoolfree(struct aoedev *d)
{
struct sk_buff *skb, *tmp;
skb_queue_walk_safe(&d->skbpool, skb, tmp)
skbfree(skb);
__skb_queue_head_init(&d->skbpool);
}
/* find it or allocate it */
struct aoedev *
aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
{
struct aoedev *d;
int i;
ulong flags;
ulong sysminor = 0;
spin_lock_irqsave(&devlist_lock, flags);
for (d=devlist; d; d=d->next)
if (d->aoemajor == maj && d->aoeminor == min) {
spin_lock(&d->lock);
if (d->flags & DEVFL_TKILL) {
spin_unlock(&d->lock);
d = NULL;
goto out;
}
d->ref++;
spin_unlock(&d->lock);
break;
}
if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
goto out;
d = kcalloc(1, sizeof *d, GFP_ATOMIC);
if (!d)
goto out;
d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC);
if (!d->targets) {
kfree(d);
d = NULL;
goto out;
}
d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->rq_list);
skb_queue_head_init(&d->skbpool);
timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
d->tgt = d->targets;
d->ref = 1;
for (i = 0; i < NFACTIVE; i++)
INIT_LIST_HEAD(&d->factive[i]);
INIT_LIST_HEAD(&d->rexmitq);
d->sysminor = sysminor;
d->aoemajor = maj;
d->aoeminor = min;
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
d->next = devlist;
devlist = d;
out:
spin_unlock_irqrestore(&devlist_lock, flags);
return d;
}
static void
freetgt(struct aoedev *d, struct aoetgt *t)
{
struct frame *f;
struct list_head *pos, *nx, *head;
struct aoeif *ifp;
for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) {
if (!ifp->nd)
break;
dev_put(ifp->nd);
}
head = &t->ffree;
list_for_each_safe(pos, nx, head) {
list_del(pos);
f = list_entry(pos, struct frame, head);
skbfree(f->skb);
kfree(f);
}
kfree(t);
}
void
aoedev_exit(void)
{
flush_workqueue(aoe_wq);
flush(NULL, 0, EXITING);
}
int __init
aoedev_init(void)
{
return 0;
}
| linux-master | drivers/block/aoe/aoedev.c |
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoenet.c
* Ethernet portion of AoE driver
*/
#include <linux/gfp.h>
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <net/net_namespace.h>
#include <asm/unaligned.h>
#include "aoe.h"
#define NECODES 5
static char *aoe_errlist[] =
{
"no such error",
"unrecognized command code",
"bad argument parameter",
"device unavailable",
"config string present",
"unsupported version"
};
enum {
IFLISTSZ = 1024,
};
static char aoe_iflist[IFLISTSZ];
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=dev1[,dev2...]");
static wait_queue_head_t txwq;
static struct ktstate kts;
#ifndef MODULE
static int __init aoe_iflist_setup(char *str)
{
strncpy(aoe_iflist, str, IFLISTSZ);
aoe_iflist[IFLISTSZ - 1] = '\0';
return 1;
}
__setup("aoe_iflist=", aoe_iflist_setup);
#endif
static spinlock_t txlock;
static struct sk_buff_head skbtxq;
/* enters with txlock held */
static int
tx(int id) __must_hold(&txlock)
{
struct sk_buff *skb;
struct net_device *ifp;
while ((skb = skb_dequeue(&skbtxq))) {
spin_unlock_irq(&txlock);
ifp = skb->dev;
if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
pr_warn("aoe: packet could not be sent on %s. %s\n",
ifp ? ifp->name : "netif",
"consider increasing tx_queue_len");
spin_lock_irq(&txlock);
}
return 0;
}
int
is_aoe_netif(struct net_device *ifp)
{
register char *p, *q;
register int len;
if (aoe_iflist[0] == '\0')
return 1;
p = aoe_iflist + strspn(aoe_iflist, WHITESPACE);
for (; *p; p = q + strspn(q, WHITESPACE)) {
q = p + strcspn(p, WHITESPACE);
if (q != p)
len = q - p;
else
len = strlen(p); /* last token in aoe_iflist */
if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len))
return 1;
if (q == p)
break;
}
return 0;
}
int
set_aoe_iflist(const char __user *user_str, size_t size)
{
if (size >= IFLISTSZ)
return -EINVAL;
if (copy_from_user(aoe_iflist, user_str, size)) {
printk(KERN_INFO "aoe: copy from user failed\n");
return -EFAULT;
}
aoe_iflist[size] = 0x00;
return 0;
}
void
aoenet_xmit(struct sk_buff_head *queue)
{
struct sk_buff *skb, *tmp;
ulong flags;
skb_queue_walk_safe(queue, skb, tmp) {
__skb_unlink(skb, queue);
spin_lock_irqsave(&txlock, flags);
skb_queue_tail(&skbtxq, skb);
spin_unlock_irqrestore(&txlock, flags);
wake_up(&txwq);
}
}
/*
* (1) len doesn't include the header by default. I want this.
*/
static int
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
{
struct aoe_hdr *h;
struct aoe_atahdr *ah;
u32 n;
int sn;
if (dev_net(ifp) != &init_net)
goto exit;
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
return 0;
if (!is_aoe_netif(ifp))
goto exit;
skb_push(skb, ETH_HLEN); /* (1) */
sn = sizeof(*h) + sizeof(*ah);
if (skb->len >= sn) {
sn -= skb_headlen(skb);
if (sn > 0 && !__pskb_pull_tail(skb, sn))
goto exit;
}
h = (struct aoe_hdr *) skb->data;
n = get_unaligned_be32(&h->tag);
if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
goto exit;
if (h->verfl & AOEFL_ERR) {
n = h->err;
if (n > NECODES)
n = 0;
if (net_ratelimit())
printk(KERN_ERR
"%s%d.%d@%s; ecode=%d '%s'\n",
"aoe: error packet from ",
get_unaligned_be16(&h->major),
h->minor, skb->dev->name,
h->err, aoe_errlist[n]);
goto exit;
}
switch (h->cmd) {
case AOECMD_ATA:
/* ata_rsp may keep skb for later processing or give it back */
skb = aoecmd_ata_rsp(skb);
break;
case AOECMD_CFG:
aoecmd_cfg_rsp(skb);
break;
default:
if (h->cmd >= AOECMD_VEND_MIN)
break; /* don't complain about vendor commands */
pr_info("aoe: unknown AoE command type 0x%02x\n", h->cmd);
break;
}
if (!skb)
return 0;
exit:
dev_kfree_skb(skb);
return 0;
}
static struct packet_type aoe_pt __read_mostly = {
.type = __constant_htons(ETH_P_AOE),
.func = aoenet_rcv,
};
int __init
aoenet_init(void)
{
skb_queue_head_init(&skbtxq);
init_waitqueue_head(&txwq);
spin_lock_init(&txlock);
kts.lock = &txlock;
kts.fn = tx;
kts.waitq = &txwq;
kts.id = 0;
snprintf(kts.name, sizeof(kts.name), "aoe_tx%d", kts.id);
if (aoe_ktstart(&kts))
return -EAGAIN;
dev_add_pack(&aoe_pt);
return 0;
}
void
aoenet_exit(void)
{
aoe_ktstop(&kts);
skb_queue_purge(&skbtxq);
dev_remove_pack(&aoe_pt);
}
| linux-master | drivers/block/aoe/aoenet.c |
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoeblk.c
* block device routines
*/
#include <linux/kernel.h>
#include <linux/hdreg.h>
#include <linux/blk-mq.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
static struct dentry *aoe_debugfs_dir;
/* GPFS needs a larger value than the default. */
static int aoe_maxsectors;
module_param(aoe_maxsectors, int, 0644);
MODULE_PARM_DESC(aoe_maxsectors,
"When nonzero, set the maximum number of sectors per I/O request");
static ssize_t aoedisk_show_state(struct device *dev,
struct device_attribute *attr, char *page)
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
return sysfs_emit(page, "%s%s\n",
(d->flags & DEVFL_UP) ? "up" : "down",
(d->flags & DEVFL_KICKME) ? ",kickme" :
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
/* I'd rather see nopen exported so we can ditch closewait */
}
static ssize_t aoedisk_show_mac(struct device *dev,
struct device_attribute *attr, char *page)
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
struct aoetgt *t = d->targets[0];
if (t == NULL)
return sysfs_emit(page, "none\n");
return sysfs_emit(page, "%pm\n", t->addr);
}
static ssize_t aoedisk_show_netif(struct device *dev,
struct device_attribute *attr, char *page)
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
struct net_device *nds[8], **nd, **nnd, **ne;
struct aoetgt **t, **te;
struct aoeif *ifp, *e;
char *p;
memset(nds, 0, sizeof nds);
nd = nds;
ne = nd + ARRAY_SIZE(nds);
t = d->targets;
te = t + d->ntargets;
for (; t < te && *t; t++) {
ifp = (*t)->ifs;
e = ifp + NAOEIFS;
for (; ifp < e && ifp->nd; ifp++) {
for (nnd = nds; nnd < nd; nnd++)
if (*nnd == ifp->nd)
break;
if (nnd == nd && nd != ne)
*nd++ = ifp->nd;
}
}
ne = nd;
nd = nds;
if (*nd == NULL)
return sysfs_emit(page, "none\n");
for (p = page; nd < ne; nd++)
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
return p-page;
}
/* firmware version */
static ssize_t aoedisk_show_fwver(struct device *dev,
struct device_attribute *attr, char *page)
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
}
static ssize_t aoedisk_show_payload(struct device *dev,
struct device_attribute *attr, char *page)
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
static int aoe_debugfs_show(struct seq_file *s, void *ignored)
{
struct aoedev *d;
struct aoetgt **t, **te;
struct aoeif *ifp, *ife;
unsigned long flags;
char c;
d = s->private;
seq_printf(s, "rttavg: %d rttdev: %d\n",
d->rttavg >> RTTSCALE,
d->rttdev >> RTTDSCALE);
seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
seq_printf(s, "kicked: %ld\n", d->kicked);
seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
seq_printf(s, "ref: %ld\n", d->ref);
spin_lock_irqsave(&d->lock, flags);
t = d->targets;
te = t + d->ntargets;
for (; t < te && *t; t++) {
c = '\t';
seq_printf(s, "falloc: %ld\n", (*t)->falloc);
seq_printf(s, "ffree: %p\n",
list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
(*t)->maxout, (*t)->nframes);
seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
seq_printf(s, "\ttaint:%d\n", (*t)->taint);
seq_printf(s, "\tr:%d\n", (*t)->rpkts);
seq_printf(s, "\tw:%d\n", (*t)->wpkts);
ifp = (*t)->ifs;
ife = ifp + ARRAY_SIZE((*t)->ifs);
for (; ifp->nd && ifp < ife; ifp++) {
seq_printf(s, "%c%s", c, ifp->nd->name);
c = ',';
}
seq_puts(s, "\n");
}
spin_unlock_irqrestore(&d->lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(aoe_debugfs);
static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL);
static struct device_attribute dev_attr_firmware_version = {
.attr = { .name = "firmware-version", .mode = 0444 },
.show = aoedisk_show_fwver,
};
static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL);
static struct attribute *aoe_attrs[] = {
&dev_attr_state.attr,
&dev_attr_mac.attr,
&dev_attr_netif.attr,
&dev_attr_firmware_version.attr,
&dev_attr_payload.attr,
NULL,
};
static const struct attribute_group aoe_attr_group = {
.attrs = aoe_attrs,
};
static const struct attribute_group *aoe_attr_groups[] = {
&aoe_attr_group,
NULL,
};
static void
aoedisk_add_debugfs(struct aoedev *d)
{
char *p;
if (aoe_debugfs_dir == NULL)
return;
p = strchr(d->gd->disk_name, '/');
if (p == NULL)
p = d->gd->disk_name;
else
p++;
BUG_ON(*p == '\0');
d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
&aoe_debugfs_fops);
}
void
aoedisk_rm_debugfs(struct aoedev *d)
{
debugfs_remove(d->debugfs);
d->debugfs = NULL;
}
static int
aoeblk_open(struct gendisk *disk, blk_mode_t mode)
{
struct aoedev *d = disk->private_data;
ulong flags;
if (!virt_addr_valid(d)) {
pr_crit("aoe: invalid device pointer in %s\n",
__func__);
WARN_ON(1);
return -ENODEV;
}
if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
return -ENODEV;
mutex_lock(&aoeblk_mutex);
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
d->nopen++;
spin_unlock_irqrestore(&d->lock, flags);
mutex_unlock(&aoeblk_mutex);
return 0;
}
spin_unlock_irqrestore(&d->lock, flags);
mutex_unlock(&aoeblk_mutex);
return -ENODEV;
}
static void
aoeblk_release(struct gendisk *disk)
{
struct aoedev *d = disk->private_data;
ulong flags;
spin_lock_irqsave(&d->lock, flags);
if (--d->nopen == 0) {
spin_unlock_irqrestore(&d->lock, flags);
aoecmd_cfg(d->aoemajor, d->aoeminor);
return;
}
spin_unlock_irqrestore(&d->lock, flags);
}
static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct aoedev *d = hctx->queue->queuedata;
spin_lock_irq(&d->lock);
if ((d->flags & DEVFL_UP) == 0) {
pr_info_ratelimited("aoe: device %ld.%d is not up\n",
d->aoemajor, d->aoeminor);
spin_unlock_irq(&d->lock);
blk_mq_start_request(bd->rq);
return BLK_STS_IOERR;
}
list_add_tail(&bd->rq->queuelist, &d->rq_list);
aoecmd_work(d);
spin_unlock_irq(&d->lock);
return BLK_STS_OK;
}
static int
aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct aoedev *d = bdev->bd_disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
return -ENODEV;
}
geo->cylinders = d->geo.cylinders;
geo->heads = d->geo.heads;
geo->sectors = d->geo.sectors;
return 0;
}
static int
aoeblk_ioctl(struct block_device *bdev, blk_mode_t mode, uint cmd, ulong arg)
{
struct aoedev *d;
if (!arg)
return -EINVAL;
d = bdev->bd_disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
pr_err("aoe: disk not up\n");
return -ENODEV;
}
if (cmd == HDIO_GET_IDENTITY) {
if (!copy_to_user((void __user *) arg, &d->ident,
sizeof(d->ident)))
return 0;
return -EFAULT;
}
/* udev calls scsi_id, which uses SG_IO, resulting in noise */
if (cmd != SG_IO)
pr_info("aoe: unknown ioctl 0x%x\n", cmd);
return -ENOTTY;
}
static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
.ioctl = aoeblk_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
static const struct blk_mq_ops aoeblk_mq_ops = {
.queue_rq = aoeblk_queue_rq,
};
/* blk_mq_alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
struct aoedev *d = vp;
struct gendisk *gd;
mempool_t *mp;
struct blk_mq_tag_set *set;
ulong flags;
int late = 0;
int err;
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_GDALLOC
&& !(d->flags & DEVFL_TKILL)
&& !(d->flags & DEVFL_GD_NOW))
d->flags |= DEVFL_GD_NOW;
else
late = 1;
spin_unlock_irqrestore(&d->lock, flags);
if (late)
return;
mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
buf_pool_cache);
if (mp == NULL) {
printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
d->aoemajor, d->aoeminor);
goto err;
}
set = &d->tag_set;
set->ops = &aoeblk_mq_ops;
set->cmd_size = sizeof(struct aoe_req);
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
d->aoemajor, d->aoeminor);
goto err_mempool;
}
gd = blk_mq_alloc_disk(set, d);
if (IS_ERR(gd)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
goto err_tagset;
}
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
WARN_ON(!(d->flags & DEVFL_GDALLOC));
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS);
blk_queue_io_opt(gd->queue, SZ_2M);
d->bufpool = mp;
d->blkq = gd->queue;
d->gd = gd;
if (aoe_maxsectors)
blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
gd->minors = AOE_PARTITIONS;
gd->fops = &aoe_bdops;
gd->private_data = d;
set_capacity(gd, d->ssize);
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
d->flags &= ~DEVFL_GDALLOC;
d->flags |= DEVFL_UP;
spin_unlock_irqrestore(&d->lock, flags);
err = device_add_disk(NULL, gd, aoe_attr_groups);
if (err)
goto out_disk_cleanup;
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
d->flags &= ~DEVFL_GD_NOW;
spin_unlock_irqrestore(&d->lock, flags);
return;
out_disk_cleanup:
put_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:
mempool_destroy(mp);
err:
spin_lock_irqsave(&d->lock, flags);
d->flags &= ~DEVFL_GD_NOW;
queue_work(aoe_wq, &d->work);
spin_unlock_irqrestore(&d->lock, flags);
}
void
aoeblk_exit(void)
{
debugfs_remove_recursive(aoe_debugfs_dir);
aoe_debugfs_dir = NULL;
kmem_cache_destroy(buf_pool_cache);
}
int __init
aoeblk_init(void)
{
buf_pool_cache = kmem_cache_create("aoe_bufs",
sizeof(struct buf),
0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
return 0;
}
| linux-master | drivers/block/aoe/aoeblk.c |
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoecmd.c
* Filesystem request handling methods
*/
#include <linux/ata.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <net/net_namespace.h>
#include <asm/unaligned.h>
#include <linux/uio.h>
#include "aoe.h"
#define MAXIOC (8192) /* default meant to avoid most soft lockups */
static void ktcomplete(struct frame *, struct sk_buff *);
static int count_targets(struct aoedev *d, int *untainted);
static struct buf *nextbuf(struct aoedev *);
static int aoe_deadsecs = 60 * 3;
module_param(aoe_deadsecs, int, 0644);
MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
static int aoe_maxout = 64;
module_param(aoe_maxout, int, 0644);
MODULE_PARM_DESC(aoe_maxout,
"Only aoe_maxout outstanding packets for every MAC on eX.Y.");
/* The number of online cpus during module initialization gives us a
* convenient heuristic cap on the parallelism used for ktio threads
* doing I/O completion. It is not important that the cap equal the
* actual number of running CPUs at any given time, but because of CPU
* hotplug, we take care to use ncpus instead of using
* num_online_cpus() after module initialization.
*/
static int ncpus;
/* mutex lock used for synchronization while thread spawning */
static DEFINE_MUTEX(ktio_spawn_lock);
static wait_queue_head_t *ktiowq;
static struct ktstate *kts;
/* io completion queue */
struct iocq_ktio {
struct list_head head;
spinlock_t lock;
};
static struct iocq_ktio *iocq;
static struct page *empty_page;
static struct sk_buff *
new_skb(ulong len)
{
struct sk_buff *skb;
skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, MAX_HEADER);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);
skb_checksum_none_assert(skb);
}
return skb;
}
static struct frame *
getframe_deferred(struct aoedev *d, u32 tag)
{
struct list_head *head, *pos, *nx;
struct frame *f;
head = &d->rexmitq;
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
if (f->tag == tag) {
list_del(pos);
return f;
}
}
return NULL;
}
static struct frame *
getframe(struct aoedev *d, u32 tag)
{
struct frame *f;
struct list_head *head, *pos, *nx;
u32 n;
n = tag % NFACTIVE;
head = &d->factive[n];
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
if (f->tag == tag) {
list_del(pos);
return f;
}
}
return NULL;
}
/*
* Leave the top bit clear so we have tagspace for userland.
* The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
* This driver reserves tag -1 to mean "unused frame."
*/
static int
newtag(struct aoedev *d)
{
register ulong n;
n = jiffies & 0xffff;
return n | (++d->lasttag & 0x7fff) << 16;
}
static u32
aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
{
u32 host_tag = newtag(d);
memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
memcpy(h->dst, t->addr, sizeof h->dst);
h->type = __constant_cpu_to_be16(ETH_P_AOE);
h->verfl = AOE_HVER;
h->major = cpu_to_be16(d->aoemajor);
h->minor = d->aoeminor;
h->cmd = AOECMD_ATA;
h->tag = cpu_to_be32(host_tag);
return host_tag;
}
static inline void
put_lba(struct aoe_atahdr *ah, sector_t lba)
{
ah->lba0 = lba;
ah->lba1 = lba >>= 8;
ah->lba2 = lba >>= 8;
ah->lba3 = lba >>= 8;
ah->lba4 = lba >>= 8;
ah->lba5 = lba >>= 8;
}
static struct aoeif *
ifrotate(struct aoetgt *t)
{
struct aoeif *ifp;
ifp = t->ifp;
ifp++;
if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
ifp = t->ifs;
if (ifp->nd == NULL)
return NULL;
return t->ifp = ifp;
}
static void
skb_pool_put(struct aoedev *d, struct sk_buff *skb)
{
__skb_queue_tail(&d->skbpool, skb);
}
static struct sk_buff *
skb_pool_get(struct aoedev *d)
{
struct sk_buff *skb = skb_peek(&d->skbpool);
if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
__skb_unlink(skb, &d->skbpool);
return skb;
}
if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
(skb = new_skb(ETH_ZLEN)))
return skb;
return NULL;
}
void
aoe_freetframe(struct frame *f)
{
struct aoetgt *t;
t = f->t;
f->buf = NULL;
memset(&f->iter, 0, sizeof(f->iter));
f->r_skb = NULL;
f->flags = 0;
list_add(&f->head, &t->ffree);
}
static struct frame *
newtframe(struct aoedev *d, struct aoetgt *t)
{
struct frame *f;
struct sk_buff *skb;
struct list_head *pos;
if (list_empty(&t->ffree)) {
if (t->falloc >= NSKBPOOLMAX*2)
return NULL;
f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
if (f == NULL)
return NULL;
t->falloc++;
f->t = t;
} else {
pos = t->ffree.next;
list_del(pos);
f = list_entry(pos, struct frame, head);
}
skb = f->skb;
if (skb == NULL) {
f->skb = skb = new_skb(ETH_ZLEN);
if (!skb) {
bail: aoe_freetframe(f);
return NULL;
}
}
if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
skb = skb_pool_get(d);
if (skb == NULL)
goto bail;
skb_pool_put(d, f->skb);
f->skb = skb;
}
skb->truesize -= skb->data_len;
skb_shinfo(skb)->nr_frags = skb->data_len = 0;
skb_trim(skb, 0);
return f;
}
static struct frame *
newframe(struct aoedev *d)
{
struct frame *f;
struct aoetgt *t, **tt;
int totout = 0;
int use_tainted;
int has_untainted;
if (!d->targets || !d->targets[0]) {
printk(KERN_ERR "aoe: NULL TARGETS!\n");
return NULL;
}
tt = d->tgt; /* last used target */
for (use_tainted = 0, has_untainted = 0;;) {
tt++;
if (tt >= &d->targets[d->ntargets] || !*tt)
tt = d->targets;
t = *tt;
if (!t->taint) {
has_untainted = 1;
totout += t->nout;
}
if (t->nout < t->maxout
&& (use_tainted || !t->taint)
&& t->ifp->nd) {
f = newtframe(d, t);
if (f) {
ifrotate(t);
d->tgt = tt;
return f;
}
}
if (tt == d->tgt) { /* we've looped and found nada */
if (!use_tainted && !has_untainted)
use_tainted = 1;
else
break;
}
}
if (totout == 0) {
d->kicked++;
d->flags |= DEVFL_KICKME;
}
return NULL;
}
static void
skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
{
int frag = 0;
struct bio_vec bv;
__bio_for_each_segment(bv, bio, iter, iter)
skb_fill_page_desc(skb, frag++, bv.bv_page,
bv.bv_offset, bv.bv_len);
}
static void
fhash(struct frame *f)
{
struct aoedev *d = f->t->d;
u32 n;
n = f->tag % NFACTIVE;
list_add_tail(&f->head, &d->factive[n]);
}
static void
ata_rw_frameinit(struct frame *f)
{
struct aoetgt *t;
struct aoe_hdr *h;
struct aoe_atahdr *ah;
struct sk_buff *skb;
char writebit, extbit;
skb = f->skb;
h = (struct aoe_hdr *) skb_mac_header(skb);
ah = (struct aoe_atahdr *) (h + 1);
skb_put(skb, sizeof(*h) + sizeof(*ah));
memset(h, 0, skb->len);
writebit = 0x10;
extbit = 0x4;
t = f->t;
f->tag = aoehdr_atainit(t->d, t, h);
fhash(f);
t->nout++;
f->waited = 0;
f->waited_total = 0;
/* set up ata header */
ah->scnt = f->iter.bi_size >> 9;
put_lba(ah, f->iter.bi_sector);
if (t->d->flags & DEVFL_EXT) {
ah->aflags |= AOEAFL_EXT;
} else {
extbit = 0;
ah->lba3 &= 0x0f;
ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
}
if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
skb_fillup(skb, f->buf->bio, f->iter);
ah->aflags |= AOEAFL_WRITE;
skb->len += f->iter.bi_size;
skb->data_len = f->iter.bi_size;
skb->truesize += f->iter.bi_size;
t->wpkts++;
} else {
t->rpkts++;
writebit = 0;
}
ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
skb->dev = t->ifp->nd;
}
static int
aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
struct buf *buf;
struct sk_buff *skb;
struct sk_buff_head queue;
buf = nextbuf(d);
if (buf == NULL)
return 0;
f = newframe(d);
if (f == NULL)
return 0;
/* initialize the headers & frame */
f->buf = buf;
f->iter = buf->iter;
f->iter.bi_size = min_t(unsigned long,
d->maxbcnt ?: DEFAULTBCNT,
f->iter.bi_size);
bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
if (!buf->iter.bi_size)
d->ip.buf = NULL;
/* mark all tracking fields and load out */
buf->nframesout += 1;
ata_rw_frameinit(f);
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
return 1;
}
/* some callers cannot sleep, and they can call this function,
* transmitting the packets later, when interrupts are on
*/
static void
aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
{
struct aoe_hdr *h;
struct aoe_cfghdr *ch;
struct sk_buff *skb;
struct net_device *ifp;
rcu_read_lock();
for_each_netdev_rcu(&init_net, ifp) {
dev_hold(ifp);
if (!is_aoe_netif(ifp))
goto cont;
skb = new_skb(sizeof *h + sizeof *ch);
if (skb == NULL) {
printk(KERN_INFO "aoe: skb alloc failure\n");
goto cont;
}
skb_put(skb, sizeof *h + sizeof *ch);
skb->dev = ifp;
__skb_queue_tail(queue, skb);
h = (struct aoe_hdr *) skb_mac_header(skb);
memset(h, 0, sizeof *h + sizeof *ch);
memset(h->dst, 0xff, sizeof h->dst);
memcpy(h->src, ifp->dev_addr, sizeof h->src);
h->type = __constant_cpu_to_be16(ETH_P_AOE);
h->verfl = AOE_HVER;
h->major = cpu_to_be16(aoemajor);
h->minor = aoeminor;
h->cmd = AOECMD_CFG;
cont:
dev_put(ifp);
}
rcu_read_unlock();
}
static void
resend(struct aoedev *d, struct frame *f)
{
struct sk_buff *skb;
struct sk_buff_head queue;
struct aoe_hdr *h;
struct aoetgt *t;
char buf[128];
u32 n;
t = f->t;
n = newtag(d);
skb = f->skb;
if (ifrotate(t) == NULL) {
/* probably can't happen, but set it up to fail anyway */
pr_info("aoe: resend: no interfaces to rotate to.\n");
ktcomplete(f, NULL);
return;
}
h = (struct aoe_hdr *) skb_mac_header(skb);
if (!(f->flags & FFL_PROBE)) {
snprintf(buf, sizeof(buf),
"%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
"retransmit", d->aoemajor, d->aoeminor,
f->tag, jiffies, n,
h->src, h->dst, t->nout);
aoechr_error(buf);
}
f->tag = n;
fhash(f);
h->tag = cpu_to_be32(n);
memcpy(h->dst, t->addr, sizeof h->dst);
memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
skb->dev = t->ifp->nd;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
static int
tsince_hr(struct frame *f)
{
u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
/* delta is normally under 4.2 seconds, avoid 64-bit division */
if (likely(delta <= UINT_MAX))
return (u32)delta / NSEC_PER_USEC;
/* avoid overflow after 71 minutes */
if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
return INT_MAX;
return div_u64(delta, NSEC_PER_USEC);
}
static int
tsince(u32 tag)
{
int n;
n = jiffies & 0xffff;
n -= tag & 0xffff;
if (n < 0)
n += 1<<16;
return jiffies_to_usecs(n + 1);
}
static struct aoeif *
getif(struct aoetgt *t, struct net_device *nd)
{
struct aoeif *p, *e;
p = t->ifs;
e = p + NAOEIFS;
for (; p < e; p++)
if (p->nd == nd)
return p;
return NULL;
}
static void
ejectif(struct aoetgt *t, struct aoeif *ifp)
{
struct aoeif *e;
struct net_device *nd;
ulong n;
nd = ifp->nd;
e = t->ifs + NAOEIFS - 1;
n = (e - ifp) * sizeof *ifp;
memmove(ifp, ifp+1, n);
e->nd = NULL;
dev_put(nd);
}
static struct frame *
reassign_frame(struct frame *f)
{
struct frame *nf;
struct sk_buff *skb;
nf = newframe(f->t->d);
if (!nf)
return NULL;
if (nf->t == f->t) {
aoe_freetframe(nf);
return NULL;
}
skb = nf->skb;
nf->skb = f->skb;
nf->buf = f->buf;
nf->iter = f->iter;
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
f->skb = skb;
return nf;
}
static void
probe(struct aoetgt *t)
{
struct aoedev *d;
struct frame *f;
struct sk_buff *skb;
struct sk_buff_head queue;
size_t n, m;
int frag;
d = t->d;
f = newtframe(d, t);
if (!f) {
pr_err("%s %pm for e%ld.%d: %s\n",
"aoe: cannot probe remote address",
t->addr,
(long) d->aoemajor, d->aoeminor,
"no frame available");
return;
}
f->flags |= FFL_PROBE;
ifrotate(t);
f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
ata_rw_frameinit(f);
skb = f->skb;
for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
if (n < PAGE_SIZE)
m = n;
else
m = PAGE_SIZE;
skb_fill_page_desc(skb, frag, empty_page, 0, m);
}
skb->len += f->iter.bi_size;
skb->data_len = f->iter.bi_size;
skb->truesize += f->iter.bi_size;
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
}
static long
rto(struct aoedev *d)
{
long t;
t = 2 * d->rttavg >> RTTSCALE;
t += 8 * d->rttdev >> RTTDSCALE;
if (t == 0)
t = 1;
return t;
}
static void
rexmit_deferred(struct aoedev *d)
{
struct aoetgt *t;
struct frame *f;
struct frame *nf;
struct list_head *pos, *nx, *head;
int since;
int untainted;
count_targets(d, &untainted);
head = &d->rexmitq;
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
t = f->t;
if (t->taint) {
if (!(f->flags & FFL_PROBE)) {
nf = reassign_frame(f);
if (nf) {
if (t->nout_probes == 0
&& untainted > 0) {
probe(t);
t->nout_probes++;
}
list_replace(&f->head, &nf->head);
pos = &nf->head;
aoe_freetframe(f);
f = nf;
t = f->t;
}
} else if (untainted < 1) {
/* don't probe w/o other untainted aoetgts */
goto stop_probe;
} else if (tsince_hr(f) < t->taint * rto(d)) {
/* reprobe slowly when taint is high */
continue;
}
} else if (f->flags & FFL_PROBE) {
stop_probe: /* don't probe untainted aoetgts */
list_del(pos);
aoe_freetframe(f);
/* leaving d->kicked, because this is routine */
f->t->d->flags |= DEVFL_KICKME;
continue;
}
if (t->nout >= t->maxout)
continue;
list_del(pos);
t->nout++;
if (f->flags & FFL_PROBE)
t->nout_probes++;
since = tsince_hr(f);
f->waited += since;
f->waited_total += since;
resend(d, f);
}
}
/* An aoetgt accumulates demerits quickly, and successful
* probing redeems the aoetgt slowly.
*/
static void
scorn(struct aoetgt *t)
{
int n;
n = t->taint++;
t->taint += t->taint * 2;
if (n > t->taint)
t->taint = n;
if (t->taint > MAX_TAINT)
t->taint = MAX_TAINT;
}
static int
count_targets(struct aoedev *d, int *untainted)
{
int i, good;
for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
if (d->targets[i]->taint == 0)
good++;
if (untainted)
*untainted = good;
return i;
}
static void
rexmit_timer(struct timer_list *timer)
{
struct aoedev *d;
struct aoetgt *t;
struct aoeif *ifp;
struct frame *f;
struct list_head *head, *pos, *nx;
LIST_HEAD(flist);
register long timeout;
ulong flags, n;
int i;
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
d = from_timer(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
/* timeout based on observed timings and variations */
timeout = rto(d);
utgts = count_targets(d, NULL);
if (d->flags & DEVFL_TKILL) {
spin_unlock_irqrestore(&d->lock, flags);
return;
}
/* collect all frames to rexmit into flist */
for (i = 0; i < NFACTIVE; i++) {
head = &d->factive[i];
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
if (tsince_hr(f) < timeout)
break; /* end of expired frames */
/* move to flist for later processing */
list_move_tail(pos, &flist);
}
}
/* process expired frames */
while (!list_empty(&flist)) {
pos = flist.next;
f = list_entry(pos, struct frame, head);
since = tsince_hr(f);
n = f->waited_total + since;
n /= USEC_PER_SEC;
if (aoe_deadsecs
&& n > aoe_deadsecs
&& !(f->flags & FFL_PROBE)) {
/* Waited too long. Device failure.
* Hang all frames on first hash bucket for downdev
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
aoedev_downdev(d);
goto out;
}
t = f->t;
n = f->waited + since;
n /= USEC_PER_SEC;
if (aoe_deadsecs && utgts > 0
&& (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
scorn(t); /* avoid this target */
if (t->maxout != 1) {
t->ssthresh = t->maxout / 2;
t->maxout = 1;
}
if (f->flags & FFL_PROBE) {
t->nout_probes--;
} else {
ifp = getif(t, f->skb->dev);
if (ifp && ++ifp->lost > (t->nframes << 1)
&& (ifp != t->ifs || t->ifs[1].nd)) {
ejectif(t, ifp);
ifp = NULL;
}
}
list_move_tail(pos, &d->rexmitq);
t->nout--;
}
rexmit_deferred(d);
out:
if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
blk_mq_run_hw_queues(d->blkq, true);
}
d->timer.expires = jiffies + TIMERTICK;
add_timer(&d->timer);
spin_unlock_irqrestore(&d->lock, flags);
}
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
buf->iter = bio->bi_iter;
}
static struct buf *
nextbuf(struct aoedev *d)
{
struct request *rq;
struct request_queue *q;
struct aoe_req *req;
struct buf *buf;
struct bio *bio;
q = d->blkq;
if (q == NULL)
return NULL; /* initializing */
if (d->ip.buf)
return d->ip.buf;
rq = d->ip.rq;
if (rq == NULL) {
rq = list_first_entry_or_null(&d->rq_list, struct request,
queuelist);
if (rq == NULL)
return NULL;
list_del_init(&rq->queuelist);
blk_mq_start_request(rq);
d->ip.rq = rq;
d->ip.nxbio = rq->bio;
req = blk_mq_rq_to_pdu(rq);
req->nr_bios = 0;
__rq_for_each_bio(bio, rq)
req->nr_bios++;
}
buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
if (buf == NULL) {
pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
return NULL;
}
bio = d->ip.nxbio;
bufinit(buf, rq, bio);
bio = bio->bi_next;
d->ip.nxbio = bio;
if (bio == NULL)
d->ip.rq = NULL;
return d->ip.buf = buf;
}
/* enters with d->lock held */
void
aoecmd_work(struct aoedev *d)
{
rexmit_deferred(d);
while (aoecmd_ata_rw(d))
;
}
/* this function performs work that has been deferred until sleeping is OK
*/
void
aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
if (d->flags & DEVFL_NEWSIZE) {
set_capacity_and_notify(d->gd, d->ssize);
spin_lock_irq(&d->lock);
d->flags |= DEVFL_UP;
d->flags &= ~DEVFL_NEWSIZE;
spin_unlock_irq(&d->lock);
}
}
static void
ata_ident_fixstring(u16 *id, int ns)
{
u16 s;
while (ns-- > 0) {
s = *id;
*id++ = s >> 8 | s << 8;
}
}
static void
ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
{
u64 ssize;
u16 n;
/* word 83: command set supported */
n = get_unaligned_le16(&id[83 << 1]);
/* word 86: command set/feature enabled */
n |= get_unaligned_le16(&id[86 << 1]);
if (n & (1<<10)) { /* bit 10: LBA 48 */
d->flags |= DEVFL_EXT;
/* word 100: number lba48 sectors */
ssize = get_unaligned_le64(&id[100 << 1]);
/* set as in ide-disk.c:init_idedisk_capacity */
d->geo.cylinders = ssize;
d->geo.cylinders /= (255 * 63);
d->geo.heads = 255;
d->geo.sectors = 63;
} else {
d->flags &= ~DEVFL_EXT;
/* number lba28 sectors */
ssize = get_unaligned_le32(&id[60 << 1]);
/* NOTE: obsolete in ATA 6 */
d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
d->geo.heads = get_unaligned_le16(&id[55 << 1]);
d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
}
ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
memcpy(d->ident, id, sizeof(d->ident));
if (d->ssize != ssize)
printk(KERN_INFO
"aoe: %pm e%ld.%d v%04x has %llu sectors\n",
t->addr,
d->aoemajor, d->aoeminor,
d->fw_ver, (long long)ssize);
d->ssize = ssize;
d->geo.start = 0;
if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
return;
if (d->gd != NULL)
d->flags |= DEVFL_NEWSIZE;
else
d->flags |= DEVFL_GDALLOC;
queue_work(aoe_wq, &d->work);
}
static void
calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
{
register long n;
n = rtt;
/* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
n -= d->rttavg >> RTTSCALE;
d->rttavg += n;
if (n < 0)
n = -n;
n -= d->rttdev >> RTTDSCALE;
d->rttdev += n;
if (!t || t->maxout >= t->nframes)
return;
if (t->maxout < t->ssthresh)
t->maxout += 1;
else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
t->maxout += 1;
t->next_cwnd = t->maxout;
}
}
static struct aoetgt *
gettgt(struct aoedev *d, char *addr)
{
struct aoetgt **t, **e;
t = d->targets;
e = t + d->ntargets;
for (; t < e && *t; t++)
if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
return *t;
return NULL;
}
static void
bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
{
int soff = 0;
struct bio_vec bv;
iter.bi_size = cnt;
__bio_for_each_segment(bv, bio, iter, iter) {
char *p = bvec_kmap_local(&bv);
skb_copy_bits(skb, soff, p, bv.bv_len);
kunmap_local(p);
soff += bv.bv_len;
}
}
void
aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
{
struct bio *bio;
int bok;
struct request_queue *q;
blk_status_t err = BLK_STS_OK;
q = d->blkq;
if (rq == d->ip.rq)
d->ip.rq = NULL;
do {
bio = rq->bio;
bok = !fastfail && !bio->bi_status;
if (!bok)
err = BLK_STS_IOERR;
} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
__blk_mq_end_request(rq, err);
/* cf. https://lore.kernel.org/lkml/[email protected]/ */
if (!fastfail)
blk_mq_run_hw_queues(q, true);
}
static void
aoe_end_buf(struct aoedev *d, struct buf *buf)
{
struct request *rq = buf->rq;
struct aoe_req *req = blk_mq_rq_to_pdu(rq);
if (buf == d->ip.buf)
d->ip.buf = NULL;
mempool_free(buf, d->bufpool);
if (--req->nr_bios == 0)
aoe_end_request(d, rq, 0);
}
static void
ktiocomplete(struct frame *f)
{
struct aoe_hdr *hin, *hout;
struct aoe_atahdr *ahin, *ahout;
struct buf *buf;
struct sk_buff *skb;
struct aoetgt *t;
struct aoeif *ifp;
struct aoedev *d;
long n;
int untainted;
if (f == NULL)
return;
t = f->t;
d = t->d;
skb = f->r_skb;
buf = f->buf;
if (f->flags & FFL_PROBE)
goto out;
if (!skb) /* just fail the buf. */
goto noskb;
hout = (struct aoe_hdr *) skb_mac_header(f->skb);
ahout = (struct aoe_atahdr *) (hout+1);
hin = (struct aoe_hdr *) skb->data;
skb_pull(skb, sizeof(*hin));
ahin = (struct aoe_atahdr *) skb->data;
skb_pull(skb, sizeof(*ahin));
if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
noskb: if (buf)
buf->bio->bi_status = BLK_STS_IOERR;
goto out;
}
n = ahout->scnt << 9;
switch (ahout->cmdstat) {
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
if (skb->len < n) {
pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
"aoe: runt data size in read from",
(long) d->aoemajor, d->aoeminor,
skb->len, n);
buf->bio->bi_status = BLK_STS_IOERR;
break;
}
if (n > f->iter.bi_size) {
pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
"aoe: too-large data size in read from",
(long) d->aoemajor, d->aoeminor,
n, f->iter.bi_size);
buf->bio->bi_status = BLK_STS_IOERR;
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
fallthrough;
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
ifp = getif(t, skb->dev);
if (ifp)
ifp->lost = 0;
spin_unlock_irq(&d->lock);
break;
case ATA_CMD_ID_ATA:
if (skb->len < 512) {
pr_info("%s e%ld.%d. skb->len=%d need=512\n",
"aoe: runt data size in ataid from",
(long) d->aoemajor, d->aoeminor,
skb->len);
break;
}
if (skb_linearize(skb))
break;
spin_lock_irq(&d->lock);
ataid_complete(d, t, skb->data);
spin_unlock_irq(&d->lock);
break;
default:
pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
ahout->cmdstat,
be16_to_cpu(get_unaligned(&hin->major)),
hin->minor);
}
out:
spin_lock_irq(&d->lock);
if (t->taint > 0
&& --t->taint > 0
&& t->nout_probes == 0) {
count_targets(d, &untainted);
if (untainted > 0) {
probe(t);
t->nout_probes++;
}
}
aoe_freetframe(f);
if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
aoe_end_buf(d, buf);
spin_unlock_irq(&d->lock);
aoedev_put(d);
dev_kfree_skb(skb);
}
/* Enters with iocq.lock held.
* Returns true iff responses needing processing remain.
*/
static int
ktio(int id)
{
struct frame *f;
struct list_head *pos;
int i;
int actual_id;
for (i = 0; ; ++i) {
if (i == MAXIOC)
return 1;
if (list_empty(&iocq[id].head))
return 0;
pos = iocq[id].head.next;
list_del(pos);
f = list_entry(pos, struct frame, head);
spin_unlock_irq(&iocq[id].lock);
ktiocomplete(f);
/* Figure out if extra threads are required. */
actual_id = f->t->d->aoeminor % ncpus;
if (!kts[actual_id].active) {
BUG_ON(id != 0);
mutex_lock(&ktio_spawn_lock);
if (!kts[actual_id].active
&& aoe_ktstart(&kts[actual_id]) == 0)
kts[actual_id].active = 1;
mutex_unlock(&ktio_spawn_lock);
}
spin_lock_irq(&iocq[id].lock);
}
}
static int
kthread(void *vp)
{
struct ktstate *k;
DECLARE_WAITQUEUE(wait, current);
int more;
k = vp;
current->flags |= PF_NOFREEZE;
set_user_nice(current, -10);
complete(&k->rendez); /* tell spawner we're running */
do {
spin_lock_irq(k->lock);
more = k->fn(k->id);
if (!more) {
add_wait_queue(k->waitq, &wait);
__set_current_state(TASK_INTERRUPTIBLE);
}
spin_unlock_irq(k->lock);
if (!more) {
schedule();
remove_wait_queue(k->waitq, &wait);
} else
cond_resched();
} while (!kthread_should_stop());
complete(&k->rendez); /* tell spawner we're stopping */
return 0;
}
void
aoe_ktstop(struct ktstate *k)
{
kthread_stop(k->task);
wait_for_completion(&k->rendez);
}
int
aoe_ktstart(struct ktstate *k)
{
struct task_struct *task;
init_completion(&k->rendez);
task = kthread_run(kthread, k, "%s", k->name);
if (task == NULL || IS_ERR(task))
return -ENOMEM;
k->task = task;
wait_for_completion(&k->rendez); /* allow kthread to start */
init_completion(&k->rendez); /* for waiting for exit later */
return 0;
}
/* pass it off to kthreads for processing */
static void
ktcomplete(struct frame *f, struct sk_buff *skb)
{
int id;
ulong flags;
f->r_skb = skb;
id = f->t->d->aoeminor % ncpus;
spin_lock_irqsave(&iocq[id].lock, flags);
if (!kts[id].active) {
spin_unlock_irqrestore(&iocq[id].lock, flags);
/* The thread with id has not been spawned yet,
* so delegate the work to the main thread and
* try spawning a new thread.
*/
id = 0;
spin_lock_irqsave(&iocq[id].lock, flags);
}
list_add_tail(&f->head, &iocq[id].head);
spin_unlock_irqrestore(&iocq[id].lock, flags);
wake_up(&ktiowq[id]);
}
struct sk_buff *
aoecmd_ata_rsp(struct sk_buff *skb)
{
struct aoedev *d;
struct aoe_hdr *h;
struct frame *f;
u32 n;
ulong flags;
char ebuf[128];
u16 aoemajor;
h = (struct aoe_hdr *) skb->data;
aoemajor = be16_to_cpu(get_unaligned(&h->major));
d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
if (d == NULL) {
snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
"for unknown device %d.%d\n",
aoemajor, h->minor);
aoechr_error(ebuf);
return skb;
}
spin_lock_irqsave(&d->lock, flags);
n = be32_to_cpu(get_unaligned(&h->tag));
f = getframe(d, n);
if (f) {
calc_rttavg(d, f->t, tsince_hr(f));
f->t->nout--;
if (f->flags & FFL_PROBE)
f->t->nout_probes--;
} else {
f = getframe_deferred(d, n);
if (f) {
calc_rttavg(d, NULL, tsince_hr(f));
} else {
calc_rttavg(d, NULL, tsince(n));
spin_unlock_irqrestore(&d->lock, flags);
aoedev_put(d);
snprintf(ebuf, sizeof(ebuf),
"%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
"unexpected rsp",
get_unaligned_be16(&h->major),
h->minor,
get_unaligned_be32(&h->tag),
jiffies,
h->src,
h->dst);
aoechr_error(ebuf);
return skb;
}
}
aoecmd_work(d);
spin_unlock_irqrestore(&d->lock, flags);
ktcomplete(f, skb);
/*
* Note here that we do not perform an aoedev_put, as we are
* leaving this reference for the ktio to release.
*/
return NULL;
}
void
aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
{
struct sk_buff_head queue;
__skb_queue_head_init(&queue);
aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
aoenet_xmit(&queue);
}
struct sk_buff *
aoecmd_ata_id(struct aoedev *d)
{
struct aoe_hdr *h;
struct aoe_atahdr *ah;
struct frame *f;
struct sk_buff *skb;
struct aoetgt *t;
f = newframe(d);
if (f == NULL)
return NULL;
t = *d->tgt;
/* initialize the headers & frame */
skb = f->skb;
h = (struct aoe_hdr *) skb_mac_header(skb);
ah = (struct aoe_atahdr *) (h+1);
skb_put(skb, sizeof *h + sizeof *ah);
memset(h, 0, skb->len);
f->tag = aoehdr_atainit(d, t, h);
fhash(f);
t->nout++;
f->waited = 0;
f->waited_total = 0;
/* set up ata header */
ah->scnt = 1;
ah->cmdstat = ATA_CMD_ID_ATA;
ah->lba3 = 0xa0;
skb->dev = t->ifp->nd;
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb)
f->sent = ktime_get();
return skb;
}
static struct aoetgt **
grow_targets(struct aoedev *d)
{
ulong oldn, newn;
struct aoetgt **tt;
oldn = d->ntargets;
newn = oldn * 2;
tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
if (!tt)
return NULL;
memmove(tt, d->targets, sizeof(*d->targets) * oldn);
d->tgt = tt + (d->tgt - d->targets);
kfree(d->targets);
d->targets = tt;
d->ntargets = newn;
return &d->targets[oldn];
}
static struct aoetgt *
addtgt(struct aoedev *d, char *addr, ulong nframes)
{
struct aoetgt *t, **tt, **te;
tt = d->targets;
te = tt + d->ntargets;
for (; tt < te && *tt; tt++)
;
if (tt == te) {
tt = grow_targets(d);
if (!tt)
goto nomem;
}
t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
goto nomem;
t->nframes = nframes;
t->d = d;
memcpy(t->addr, addr, sizeof t->addr);
t->ifp = t->ifs;
aoecmd_wreset(t);
t->maxout = t->nframes / 2;
INIT_LIST_HEAD(&t->ffree);
return *tt = t;
nomem:
pr_info("aoe: cannot allocate memory to add target\n");
return NULL;
}
static void
setdbcnt(struct aoedev *d)
{
struct aoetgt **t, **e;
int bcnt = 0;
t = d->targets;
e = t + d->ntargets;
for (; t < e && *t; t++)
if (bcnt == 0 || bcnt > (*t)->minbcnt)
bcnt = (*t)->minbcnt;
if (bcnt != d->maxbcnt) {
d->maxbcnt = bcnt;
pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
d->aoemajor, d->aoeminor, bcnt);
}
}
static void
setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
{
struct aoedev *d;
struct aoeif *p, *e;
int minbcnt;
d = t->d;
minbcnt = bcnt;
p = t->ifs;
e = p + NAOEIFS;
for (; p < e; p++) {
if (p->nd == NULL)
break; /* end of the valid interfaces */
if (p->nd == nd) {
p->bcnt = bcnt; /* we're updating */
nd = NULL;
} else if (minbcnt > p->bcnt)
minbcnt = p->bcnt; /* find the min interface */
}
if (nd) {
if (p == e) {
pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
return;
}
dev_hold(nd);
p->nd = nd;
p->bcnt = bcnt;
}
t->minbcnt = minbcnt;
setdbcnt(d);
}
void
aoecmd_cfg_rsp(struct sk_buff *skb)
{
struct aoedev *d;
struct aoe_hdr *h;
struct aoe_cfghdr *ch;
struct aoetgt *t;
ulong flags, aoemajor;
struct sk_buff *sl;
struct sk_buff_head queue;
u16 n;
sl = NULL;
h = (struct aoe_hdr *) skb_mac_header(skb);
ch = (struct aoe_cfghdr *) (h+1);
/*
* Enough people have their dip switches set backwards to
* warrant a loud message for this special case.
*/
aoemajor = get_unaligned_be16(&h->major);
if (aoemajor == 0xfff) {
printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
"Check shelf dip switches.\n");
return;
}
if (aoemajor == 0xffff) {
pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
aoemajor, (int) h->minor);
return;
}
if (h->minor == 0xff) {
pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
aoemajor, (int) h->minor);
return;
}
n = be16_to_cpu(ch->bufcnt);
if (n > aoe_maxout) /* keep it reasonable */
n = aoe_maxout;
d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
if (d == NULL) {
pr_info("aoe: device allocation failure\n");
return;
}
spin_lock_irqsave(&d->lock, flags);
t = gettgt(d, h->src);
if (t) {
t->nframes = n;
if (n < t->maxout)
aoecmd_wreset(t);
} else {
t = addtgt(d, h->src, n);
if (!t)
goto bail;
}
n = skb->dev->mtu;
n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
n /= 512;
if (n > ch->scnt)
n = ch->scnt;
n = n ? n * 512 : DEFAULTBCNT;
setifbcnt(t, skb->dev, n);
/* don't change users' perspective */
if (d->nopen == 0) {
d->fw_ver = be16_to_cpu(ch->fwver);
sl = aoecmd_ata_id(d);
}
bail:
spin_unlock_irqrestore(&d->lock, flags);
aoedev_put(d);
if (sl) {
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, sl);
aoenet_xmit(&queue);
}
}
void
aoecmd_wreset(struct aoetgt *t)
{
t->maxout = 1;
t->ssthresh = t->nframes / 2;
t->next_cwnd = t->nframes;
}
void
aoecmd_cleanslate(struct aoedev *d)
{
struct aoetgt **t, **te;
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
d->maxbcnt = 0;
t = d->targets;
te = t + d->ntargets;
for (; t < te && *t; t++)
aoecmd_wreset(*t);
}
void
aoe_failbuf(struct aoedev *d, struct buf *buf)
{
if (buf == NULL)
return;
buf->iter.bi_size = 0;
buf->bio->bi_status = BLK_STS_IOERR;
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
}
void
aoe_flush_iocq(void)
{
int i;
for (i = 0; i < ncpus; i++) {
if (kts[i].active)
aoe_flush_iocq_by_index(i);
}
}
void
aoe_flush_iocq_by_index(int id)
{
struct frame *f;
struct aoedev *d;
LIST_HEAD(flist);
struct list_head *pos;
struct sk_buff *skb;
ulong flags;
spin_lock_irqsave(&iocq[id].lock, flags);
list_splice_init(&iocq[id].head, &flist);
spin_unlock_irqrestore(&iocq[id].lock, flags);
while (!list_empty(&flist)) {
pos = flist.next;
list_del(pos);
f = list_entry(pos, struct frame, head);
d = f->t->d;
skb = f->r_skb;
spin_lock_irqsave(&d->lock, flags);
if (f->buf) {
f->buf->nframesout--;
aoe_failbuf(d, f->buf);
}
aoe_freetframe(f);
spin_unlock_irqrestore(&d->lock, flags);
dev_kfree_skb(skb);
aoedev_put(d);
}
}
int __init
aoecmd_init(void)
{
void *p;
int i;
int ret;
/* get_zeroed_page returns page with ref count 1 */
p = (void *) get_zeroed_page(GFP_KERNEL);
if (!p)
return -ENOMEM;
empty_page = virt_to_page(p);
ncpus = num_online_cpus();
iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL);
if (!iocq)
return -ENOMEM;
kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL);
if (!kts) {
ret = -ENOMEM;
goto kts_fail;
}
ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL);
if (!ktiowq) {
ret = -ENOMEM;
goto ktiowq_fail;
}
for (i = 0; i < ncpus; i++) {
INIT_LIST_HEAD(&iocq[i].head);
spin_lock_init(&iocq[i].lock);
init_waitqueue_head(&ktiowq[i]);
snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i);
kts[i].fn = ktio;
kts[i].waitq = &ktiowq[i];
kts[i].lock = &iocq[i].lock;
kts[i].id = i;
kts[i].active = 0;
}
kts[0].active = 1;
if (aoe_ktstart(&kts[0])) {
ret = -ENOMEM;
goto ktstart_fail;
}
return 0;
ktstart_fail:
kfree(ktiowq);
ktiowq_fail:
kfree(kts);
kts_fail:
kfree(iocq);
return ret;
}
void
aoecmd_exit(void)
{
int i;
for (i = 0; i < ncpus; i++)
if (kts[i].active)
aoe_ktstop(&kts[i]);
aoe_flush_iocq();
/* Free up the iocq and thread speicific configuration
* allocated during startup.
*/
kfree(iocq);
kfree(kts);
kfree(ktiowq);
free_page((unsigned long) page_address(empty_page));
empty_page = NULL;
}
| linux-master | drivers/block/aoe/aoecmd.c |
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoechr.c
* AoE character device driver
*/
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include "aoe.h"
enum {
//MINOR_STAT = 1, (moved to sysfs)
MINOR_ERR = 2,
MINOR_DISCOVER,
MINOR_INTERFACES,
MINOR_REVALIDATE,
MINOR_FLUSH,
MSGSZ = 2048,
NMSG = 100, /* message backlog to retain */
};
struct aoe_chardev {
ulong minor;
char name[32];
};
enum { EMFL_VALID = 1 };
struct ErrMsg {
short flags;
short len;
char *msg;
};
static DEFINE_MUTEX(aoechr_mutex);
/* A ring buffer of error messages, to be read through
* "/dev/etherd/err". When no messages are present,
* readers will block waiting for messages to appear.
*/
static struct ErrMsg emsgs[NMSG];
static int emsgs_head_idx, emsgs_tail_idx;
static struct completion emsgs_comp;
static spinlock_t emsgs_lock;
static int nblocked_emsgs_readers;
static struct aoe_chardev chardevs[] = {
{ MINOR_ERR, "err" },
{ MINOR_DISCOVER, "discover" },
{ MINOR_INTERFACES, "interfaces" },
{ MINOR_REVALIDATE, "revalidate" },
{ MINOR_FLUSH, "flush" },
};
static char *aoe_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
}
static const struct class aoe_class = {
.name = "aoe",
.devnode = aoe_devnode,
};
static int
discover(void)
{
aoecmd_cfg(0xffff, 0xff);
return 0;
}
static int
interfaces(const char __user *str, size_t size)
{
if (set_aoe_iflist(str, size)) {
printk(KERN_ERR
"aoe: could not set interface list: too many interfaces\n");
return -EINVAL;
}
return 0;
}
static int
revalidate(const char __user *str, size_t size)
{
int major, minor, n;
ulong flags;
struct aoedev *d;
struct sk_buff *skb;
char buf[16];
if (size >= sizeof buf)
return -EINVAL;
buf[sizeof buf - 1] = '\0';
if (copy_from_user(buf, str, size))
return -EFAULT;
n = sscanf(buf, "e%d.%d", &major, &minor);
if (n != 2) {
pr_err("aoe: invalid device specification %s\n", buf);
return -EINVAL;
}
d = aoedev_by_aoeaddr(major, minor, 0);
if (!d)
return -EINVAL;
spin_lock_irqsave(&d->lock, flags);
aoecmd_cleanslate(d);
aoecmd_cfg(major, minor);
loop:
skb = aoecmd_ata_id(d);
spin_unlock_irqrestore(&d->lock, flags);
/* try again if we are able to sleep a bit,
* otherwise give up this revalidation
*/
if (!skb && !msleep_interruptible(250)) {
spin_lock_irqsave(&d->lock, flags);
goto loop;
}
aoedev_put(d);
if (skb) {
struct sk_buff_head queue;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
return 0;
}
void
aoechr_error(char *msg)
{
struct ErrMsg *em;
char *mp;
ulong flags, n;
n = strlen(msg);
spin_lock_irqsave(&emsgs_lock, flags);
em = emsgs + emsgs_tail_idx;
if ((em->flags & EMFL_VALID)) {
bail: spin_unlock_irqrestore(&emsgs_lock, flags);
return;
}
mp = kmemdup(msg, n, GFP_ATOMIC);
if (!mp)
goto bail;
em->msg = mp;
em->flags |= EMFL_VALID;
em->len = n;
emsgs_tail_idx++;
emsgs_tail_idx %= ARRAY_SIZE(emsgs);
spin_unlock_irqrestore(&emsgs_lock, flags);
if (nblocked_emsgs_readers)
complete(&emsgs_comp);
}
static ssize_t
aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
{
int ret = -EINVAL;
switch ((unsigned long) filp->private_data) {
default:
printk(KERN_INFO "aoe: can't write to that file.\n");
break;
case MINOR_DISCOVER:
ret = discover();
break;
case MINOR_INTERFACES:
ret = interfaces(buf, cnt);
break;
case MINOR_REVALIDATE:
ret = revalidate(buf, cnt);
break;
case MINOR_FLUSH:
ret = aoedev_flush(buf, cnt);
break;
}
if (ret == 0)
ret = cnt;
return ret;
}
static int
aoechr_open(struct inode *inode, struct file *filp)
{
int n, i;
mutex_lock(&aoechr_mutex);
n = iminor(inode);
filp->private_data = (void *) (unsigned long) n;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
if (chardevs[i].minor == n) {
mutex_unlock(&aoechr_mutex);
return 0;
}
mutex_unlock(&aoechr_mutex);
return -EINVAL;
}
static int
aoechr_rel(struct inode *inode, struct file *filp)
{
return 0;
}
static ssize_t
aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
{
unsigned long n;
char *mp;
struct ErrMsg *em;
ssize_t len;
ulong flags;
n = (unsigned long) filp->private_data;
if (n != MINOR_ERR)
return -EFAULT;
spin_lock_irqsave(&emsgs_lock, flags);
for (;;) {
em = emsgs + emsgs_head_idx;
if ((em->flags & EMFL_VALID) != 0)
break;
if (filp->f_flags & O_NDELAY) {
spin_unlock_irqrestore(&emsgs_lock, flags);
return -EAGAIN;
}
nblocked_emsgs_readers++;
spin_unlock_irqrestore(&emsgs_lock, flags);
n = wait_for_completion_interruptible(&emsgs_comp);
spin_lock_irqsave(&emsgs_lock, flags);
nblocked_emsgs_readers--;
if (n) {
spin_unlock_irqrestore(&emsgs_lock, flags);
return -ERESTARTSYS;
}
}
if (em->len > cnt) {
spin_unlock_irqrestore(&emsgs_lock, flags);
return -EAGAIN;
}
mp = em->msg;
len = em->len;
em->msg = NULL;
em->flags &= ~EMFL_VALID;
emsgs_head_idx++;
emsgs_head_idx %= ARRAY_SIZE(emsgs);
spin_unlock_irqrestore(&emsgs_lock, flags);
n = copy_to_user(buf, mp, len);
kfree(mp);
return n == 0 ? len : -EFAULT;
}
static const struct file_operations aoe_fops = {
.write = aoechr_write,
.read = aoechr_read,
.open = aoechr_open,
.release = aoechr_rel,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
int __init
aoechr_init(void)
{
int n, i;
n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
if (n < 0) {
printk(KERN_ERR "aoe: can't register char device\n");
return n;
}
init_completion(&emsgs_comp);
spin_lock_init(&emsgs_lock);
n = class_register(&aoe_class);
if (n) {
unregister_chrdev(AOE_MAJOR, "aoechr");
return n;
}
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
device_create(&aoe_class, NULL,
MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
chardevs[i].name);
return 0;
}
void
aoechr_exit(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
device_destroy(&aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
class_unregister(&aoe_class);
unregister_chrdev(AOE_MAJOR, "aoechr");
}
| linux-master | drivers/block/aoe/aoechr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Xenbus code for blkif backend
Copyright (C) 2005 Rusty Russell <[email protected]>
Copyright (C) 2005 XenSource Ltd
*/
#define pr_fmt(fmt) "xen-blkback: " fmt
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include "common.h"
/* On the XenBus the max length of 'ring-ref%u'. */
#define RINGREF_NAME_LEN (20)
struct backend_info {
struct xenbus_device *dev;
struct xen_blkif *blkif;
struct xenbus_watch backend_watch;
unsigned major;
unsigned minor;
char *mode;
};
static struct kmem_cache *xen_blkif_cachep;
static void connect(struct backend_info *);
static int connect_ring(struct backend_info *);
static void backend_changed(struct xenbus_watch *, const char *,
const char *);
static void xen_blkif_free(struct xen_blkif *blkif);
static void xen_vbd_free(struct xen_vbd *vbd);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
{
return be->dev;
}
/*
* The last request could free the device from softirq context and
* xen_blkif_free() can sleep.
*/
static void xen_blkif_deferred_free(struct work_struct *work)
{
struct xen_blkif *blkif;
blkif = container_of(work, struct xen_blkif, free_work);
xen_blkif_free(blkif);
}
static int blkback_name(struct xen_blkif *blkif, char *buf)
{
char *devpath, *devname;
struct xenbus_device *dev = blkif->be->dev;
devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
if (IS_ERR(devpath))
return PTR_ERR(devpath);
devname = strstr(devpath, "/dev/");
if (devname != NULL)
devname += strlen("/dev/");
else
devname = devpath;
snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
kfree(devpath);
return 0;
}
static void xen_update_blkif_status(struct xen_blkif *blkif)
{
int err;
char name[TASK_COMM_LEN];
struct xen_blkif_ring *ring;
int i;
/* Not ready to connect? */
if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
return;
/* Already connected? */
if (blkif->be->dev->state == XenbusStateConnected)
return;
/* Attempt to connect: exit if we fail to. */
connect(blkif->be);
if (blkif->be->dev->state != XenbusStateConnected)
return;
err = blkback_name(blkif, name);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
return;
}
err = sync_blockdev(blkif->vbd.bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
}
invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
for (i = 0; i < blkif->nr_rings; i++) {
ring = &blkif->rings[i];
ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
if (IS_ERR(ring->xenblkd)) {
err = PTR_ERR(ring->xenblkd);
ring->xenblkd = NULL;
xenbus_dev_fatal(blkif->be->dev, err,
"start %s-%d xenblkd", name, i);
goto out;
}
}
return;
out:
while (--i >= 0) {
ring = &blkif->rings[i];
kthread_stop(ring->xenblkd);
}
return;
}
static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
{
unsigned int r;
blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
GFP_KERNEL);
if (!blkif->rings)
return -ENOMEM;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
spin_lock_init(&ring->blk_ring_lock);
init_waitqueue_head(&ring->wq);
INIT_LIST_HEAD(&ring->pending_free);
INIT_LIST_HEAD(&ring->persistent_purge_list);
INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
gnttab_page_cache_init(&ring->free_pages);
spin_lock_init(&ring->pending_free_lock);
init_waitqueue_head(&ring->pending_free_wq);
init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif;
ring->st_print = jiffies;
ring->active = true;
}
return 0;
}
/* Enable the persistent grants feature. */
static bool feature_persistent = true;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif)
return ERR_PTR(-ENOMEM);
blkif->domid = domid;
atomic_set(&blkif->refcnt, 1);
init_completion(&blkif->drain_complete);
/*
* Because freeing back to the cache may be deferred, it is not
* safe to unload the module (and hence destroy the cache) until
* this has completed. To prevent premature unloading, take an
* extra module reference here and release only when the object
* has been freed back to the cache.
*/
__module_get(THIS_MODULE);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
return blkif;
}
static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
unsigned int nr_grefs, unsigned int evtchn)
{
int err;
struct xen_blkif *blkif = ring->blkif;
const struct blkif_common_sring *sring_common;
RING_IDX rsp_prod, req_prod;
unsigned int size;
/* Already connected through? */
if (ring->irq)
return 0;
err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
&ring->blk_ring);
if (err < 0)
return err;
sring_common = (struct blkif_common_sring *)ring->blk_ring;
rsp_prod = READ_ONCE(sring_common->rsp_prod);
req_prod = READ_ONCE(sring_common->req_prod);
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
struct blkif_sring *sring_native =
(struct blkif_sring *)ring->blk_ring;
BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32 =
(struct blkif_x86_32_sring *)ring->blk_ring;
BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_sring *sring_x86_64 =
(struct blkif_x86_64_sring *)ring->blk_ring;
BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
break;
}
default:
BUG();
}
err = -EIO;
if (req_prod - rsp_prod > size)
goto fail;
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0)
goto fail;
ring->irq = err;
return 0;
fail:
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring->blk_rings.common.sring = NULL;
return err;
}
static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0;
if (!ring->active)
continue;
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
ring->xenblkd = NULL;
wake_up(&ring->shutdown_wq);
}
/* The above kthread_stop() guarantees that at this point we
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
if (atomic_read(&ring->inflight) > 0) {
busy = true;
continue;
}
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
ring->irq = 0;
}
if (ring->blk_rings.common.sring) {
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring->blk_rings.common.sring = NULL;
}
/* Remove all persistent grants and the cache of ballooned pages. */
xen_blkbk_free_caches(ring);
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
kfree(req->segments[j]);
for (j = 0; j < MAX_INDIRECT_PAGES; j++)
kfree(req->indirect_pages[j]);
kfree(req);
i++;
}
BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
BUG_ON(!list_empty(&ring->persistent_purge_list));
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
BUG_ON(ring->free_pages.num_pages != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
if (busy)
return -EBUSY;
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in
* here.
*/
kfree(blkif->rings);
blkif->rings = NULL;
blkif->nr_rings = 0;
return 0;
}
static void xen_blkif_free(struct xen_blkif *blkif)
{
WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
kfree(blkif->be->mode);
kfree(blkif->be);
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
module_put(THIS_MODULE);
}
int __init xen_blkif_interface_init(void)
{
xen_blkif_cachep = kmem_cache_create("blkif_cache",
sizeof(struct xen_blkif),
0, 0, NULL);
if (!xen_blkif_cachep)
return -ENOMEM;
return 0;
}
void xen_blkif_interface_fini(void)
{
kmem_cache_destroy(xen_blkif_cachep);
xen_blkif_cachep = NULL;
}
/*
* sysfs interface for VBD I/O requests
*/
#define VBD_SHOW_ALLRING(name, format) \
static ssize_t show_##name(struct device *_dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct xenbus_device *dev = to_xenbus_device(_dev); \
struct backend_info *be = dev_get_drvdata(&dev->dev); \
struct xen_blkif *blkif = be->blkif; \
unsigned int i; \
unsigned long long result = 0; \
\
if (!blkif->rings) \
goto out; \
\
for (i = 0; i < blkif->nr_rings; i++) { \
struct xen_blkif_ring *ring = &blkif->rings[i]; \
\
result += ring->st_##name; \
} \
\
out: \
return sprintf(buf, format, result); \
} \
static DEVICE_ATTR(name, 0444, show_##name, NULL)
VBD_SHOW_ALLRING(oo_req, "%llu\n");
VBD_SHOW_ALLRING(rd_req, "%llu\n");
VBD_SHOW_ALLRING(wr_req, "%llu\n");
VBD_SHOW_ALLRING(f_req, "%llu\n");
VBD_SHOW_ALLRING(ds_req, "%llu\n");
VBD_SHOW_ALLRING(rd_sect, "%llu\n");
VBD_SHOW_ALLRING(wr_sect, "%llu\n");
static struct attribute *xen_vbdstat_attrs[] = {
&dev_attr_oo_req.attr,
&dev_attr_rd_req.attr,
&dev_attr_wr_req.attr,
&dev_attr_f_req.attr,
&dev_attr_ds_req.attr,
&dev_attr_rd_sect.attr,
&dev_attr_wr_sect.attr,
NULL
};
static const struct attribute_group xen_vbdstat_group = {
.name = "statistics",
.attrs = xen_vbdstat_attrs,
};
#define VBD_SHOW(name, format, args...) \
static ssize_t show_##name(struct device *_dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct xenbus_device *dev = to_xenbus_device(_dev); \
struct backend_info *be = dev_get_drvdata(&dev->dev); \
\
return sprintf(buf, format, ##args); \
} \
static DEVICE_ATTR(name, 0444, show_##name, NULL)
VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
VBD_SHOW(mode, "%s\n", be->mode);
static int xenvbd_sysfs_addif(struct xenbus_device *dev)
{
int error;
error = device_create_file(&dev->dev, &dev_attr_physical_device);
if (error)
goto fail1;
error = device_create_file(&dev->dev, &dev_attr_mode);
if (error)
goto fail2;
error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
if (error)
goto fail3;
return 0;
fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
fail2: device_remove_file(&dev->dev, &dev_attr_mode);
fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
return error;
}
static void xenvbd_sysfs_delif(struct xenbus_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
device_remove_file(&dev->dev, &dev_attr_mode);
device_remove_file(&dev->dev, &dev_attr_physical_device);
}
static void xen_vbd_free(struct xen_vbd *vbd)
{
if (vbd->bdev)
blkdev_put(vbd->bdev, NULL);
vbd->bdev = NULL;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
{
struct xen_vbd *vbd;
struct block_device *bdev;
vbd = &blkif->vbd;
vbd->handle = handle;
vbd->readonly = readonly;
vbd->type = 0;
vbd->pdevice = MKDEV(major, minor);
bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
if (IS_ERR(bdev)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
vbd->bdev = bdev;
if (vbd->bdev->bd_disk == NULL) {
pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
return -ENOENT;
}
vbd->size = vbd_sz(vbd);
if (cdrom || disk_to_cdi(vbd->bdev->bd_disk))
vbd->type |= VDISK_CDROM;
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
if (bdev_write_cache(bdev))
vbd->flush_support = true;
if (bdev_max_secure_erase_sectors(bdev))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
}
static void xen_blkbk_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
if (be->major || be->minor)
xenvbd_sysfs_delif(dev);
if (be->backend_watch.node) {
unregister_xenbus_watch(&be->backend_watch);
kfree(be->backend_watch.node);
be->backend_watch.node = NULL;
}
dev_set_drvdata(&dev->dev, NULL);
if (be->blkif) {
xen_blkif_disconnect(be->blkif);
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
}
}
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state)
{
struct xenbus_device *dev = be->dev;
int err;
err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
"%d", state);
if (err)
dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
return err;
}
static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
int err;
int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev;
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
if (bdev_max_discard_sectors(bdev)) {
err = xenbus_printf(xbt, dev->nodename,
"discard-granularity", "%u",
bdev_discard_granularity(bdev));
if (err) {
dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
return;
}
err = xenbus_printf(xbt, dev->nodename,
"discard-alignment", "%u",
bdev_discard_alignment(bdev));
if (err) {
dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
return;
}
state = 1;
/* Optional. */
err = xenbus_printf(xbt, dev->nodename,
"discard-secure", "%d",
blkif->vbd.discard_secure);
if (err) {
dev_warn(&dev->dev, "writing discard-secure (%d)", err);
return;
}
}
err = xenbus_printf(xbt, dev->nodename, "feature-discard",
"%d", state);
if (err)
dev_warn(&dev->dev, "writing feature-discard (%d)", err);
}
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state)
{
struct xenbus_device *dev = be->dev;
int err;
err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
"%d", state);
if (err)
dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
return err;
}
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures, and watch the store waiting for the hotplug scripts to tell us
* the device's physical major and minor numbers. Switch to InitWait.
*/
static int xen_blkbk_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int err;
struct backend_info *be = kzalloc(sizeof(struct backend_info),
GFP_KERNEL);
/* match the pr_debug in xen_blkbk_remove */
pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
if (!be) {
xenbus_dev_fatal(dev, -ENOMEM,
"allocating backend structure");
return -ENOMEM;
}
be->dev = dev;
dev_set_drvdata(&dev->dev, be);
be->blkif = xen_blkif_alloc(dev->otherend_id);
if (IS_ERR(be->blkif)) {
err = PTR_ERR(be->blkif);
be->blkif = NULL;
xenbus_dev_fatal(dev, err, "creating block interface");
goto fail;
}
err = xenbus_printf(XBT_NIL, dev->nodename,
"feature-max-indirect-segments", "%u",
MAX_INDIRECT_SEGMENTS);
if (err)
dev_warn(&dev->dev,
"writing %s/feature-max-indirect-segments (%d)",
dev->nodename, err);
/* Multi-queue: advertise how many queues are supported by us.*/
err = xenbus_printf(XBT_NIL, dev->nodename,
"multi-queue-max-queues", "%u", xenblk_max_queues);
if (err)
pr_warn("Error writing multi-queue-max-queues\n");
/* setup back pointer */
be->blkif->be = be;
err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
backend_changed,
"%s/%s", dev->nodename, "physical-device");
if (err)
goto fail;
err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
xen_blkif_max_ring_order);
if (err)
pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err)
goto fail;
return 0;
fail:
pr_warn("%s failed\n", __func__);
xen_blkbk_remove(dev);
return err;
}
/*
* Callback received when the hotplug scripts have placed the physical-device
* node. Read it and the mode node, and create a vbd. If the frontend is
* ready, connect.
*/
static void backend_changed(struct xenbus_watch *watch,
const char *path, const char *token)
{
int err;
unsigned major;
unsigned minor;
struct backend_info *be
= container_of(watch, struct backend_info, backend_watch);
struct xenbus_device *dev = be->dev;
int cdrom = 0;
unsigned long handle;
char *device_type;
pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
&major, &minor);
if (XENBUS_EXIST_ERR(err)) {
/*
* Since this watch will fire once immediately after it is
* registered, we expect this. Ignore it, and wait for the
* hotplug scripts.
*/
return;
}
if (err != 2) {
xenbus_dev_fatal(dev, err, "reading physical-device");
return;
}
if (be->major | be->minor) {
if (be->major != major || be->minor != minor)
pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
be->major, be->minor, major, minor);
return;
}
be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
if (IS_ERR(be->mode)) {
err = PTR_ERR(be->mode);
be->mode = NULL;
xenbus_dev_fatal(dev, err, "reading mode");
return;
}
device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
if (!IS_ERR(device_type)) {
cdrom = strcmp(device_type, "cdrom") == 0;
kfree(device_type);
}
/* Front end dir is a number, which is used as the handle. */
err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
if (err) {
kfree(be->mode);
be->mode = NULL;
return;
}
be->major = major;
be->minor = minor;
err = xen_vbd_create(be->blkif, handle, major, minor,
!strchr(be->mode, 'w'), cdrom);
if (err)
xenbus_dev_fatal(dev, err, "creating vbd structure");
else {
err = xenvbd_sysfs_addif(dev);
if (err) {
xen_vbd_free(&be->blkif->vbd);
xenbus_dev_fatal(dev, err, "creating sysfs entries");
}
}
if (err) {
kfree(be->mode);
be->mode = NULL;
be->major = 0;
be->minor = 0;
} else {
/* We're potentially connected now */
xen_update_blkif_status(be->blkif);
}
}
/*
* Callback received when the frontend's state changes.
*/
static void frontend_changed(struct xenbus_device *dev,
enum xenbus_state frontend_state)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
int err;
pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
switch (frontend_state) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
pr_info("%s: prepare for reconnect\n", dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
break;
case XenbusStateInitialised:
case XenbusStateConnected:
/*
* Ensure we connect even when two watches fire in
* close succession and we miss the intermediate value
* of frontend_state.
*/
if (dev->state == XenbusStateConnected)
break;
/*
* Enforce precondition before potential leak point.
* xen_blkif_disconnect() is idempotent.
*/
err = xen_blkif_disconnect(be->blkif);
if (err) {
xenbus_dev_fatal(dev, err, "pending I/O");
break;
}
err = connect_ring(be);
if (err) {
/*
* Clean up so that memory resources can be used by
* other devices. connect_ring reported already error.
*/
xen_blkif_disconnect(be->blkif);
break;
}
xen_update_blkif_status(be->blkif);
break;
case XenbusStateClosing:
xenbus_switch_state(dev, XenbusStateClosing);
break;
case XenbusStateClosed:
xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
fallthrough;
/* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev);
break;
default:
xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
frontend_state);
break;
}
}
/* Once a memory pressure is detected, squeeze free page pools for a while. */
static unsigned int buffer_squeeze_duration_ms = 10;
module_param_named(buffer_squeeze_duration_ms,
buffer_squeeze_duration_ms, int, 0644);
MODULE_PARM_DESC(buffer_squeeze_duration_ms,
"Duration in ms to squeeze pages buffer when a memory pressure is detected");
/*
* Callback received when the memory pressure is detected.
*/
static void reclaim_memory(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
if (!be)
return;
be->blkif->buffer_squeeze_end = jiffies +
msecs_to_jiffies(buffer_squeeze_duration_ms);
}
/* ** Connection ** */
/*
* Write the physical details regarding the block device to the store, and
* switch to Connected state.
*/
static void connect(struct backend_info *be)
{
struct xenbus_transaction xbt;
int err;
struct xenbus_device *dev = be->dev;
pr_debug("%s %s\n", __func__, dev->otherend);
/* Supply the information about the device the frontend needs */
again:
err = xenbus_transaction_start(&xbt);
if (err) {
xenbus_dev_fatal(dev, err, "starting transaction");
return;
}
/* If we can't advertise it is OK. */
xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
xen_blkbk_discard(xbt, be);
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sectors",
dev->nodename);
goto abort;
}
/* FIXME: use a typename instead */
err = xenbus_printf(xbt, dev->nodename, "info", "%u",
be->blkif->vbd.type |
(be->blkif->vbd.readonly ? VDISK_READONLY : 0));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/info",
dev->nodename);
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
(unsigned long)
bdev_logical_block_size(be->blkif->vbd.bdev));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sector-size",
dev->nodename);
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
bdev_physical_block_size(be->blkif->vbd.bdev));
if (err)
xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
dev->nodename);
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
goto again;
if (err)
xenbus_dev_fatal(dev, err, "ending transaction");
err = xenbus_switch_state(dev, XenbusStateConnected);
if (err)
xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
dev->nodename);
return;
abort:
xenbus_transaction_end(xbt, 1);
}
/*
* Each ring may have multi pages, depends on "ring-page-order".
*/
static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
{
unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
struct pending_req *req, *n;
int err, i, j;
struct xen_blkif *blkif = ring->blkif;
struct xenbus_device *dev = blkif->be->dev;
unsigned int nr_grefs, evtchn;
err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
&evtchn);
if (err != 1) {
err = -EINVAL;
xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
return err;
}
nr_grefs = blkif->nr_ring_pages;
if (unlikely(!nr_grefs)) {
WARN_ON(true);
return -EINVAL;
}
for (i = 0; i < nr_grefs; i++) {
char ring_ref_name[RINGREF_NAME_LEN];
if (blkif->multi_ref)
snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
else {
WARN_ON(i != 0);
snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
}
err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
"%u", &ring_ref[i]);
if (err != 1) {
err = -EINVAL;
xenbus_dev_fatal(dev, err, "reading %s/%s",
dir, ring_ref_name);
return err;
}
}
err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
goto fail;
list_add_tail(&req->free_list, &ring->pending_free);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
if (!req->segments[j])
goto fail;
}
for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
GFP_KERNEL);
if (!req->indirect_pages[j])
goto fail;
}
}
/* Map the shared frame, irq etc. */
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
goto fail;
}
return 0;
fail:
list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
if (!req->segments[j])
break;
kfree(req->segments[j]);
}
for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
if (!req->indirect_pages[j])
break;
kfree(req->indirect_pages[j]);
}
kfree(req);
}
return err;
}
static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
char protocol[64] = "";
int err, i;
char *xspath;
size_t xspathsize;
const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
unsigned int requested_num_queues = 0;
unsigned int ring_page_order;
pr_debug("%s %s\n", __func__, dev->otherend);
blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
"%63s", protocol);
if (err <= 0)
strcpy(protocol, "unspecified, assuming default");
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
else {
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
blkif->vbd.feature_gnt_persistent =
blkif->vbd.feature_gnt_persistent_parm &&
xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
/*
* Read the number of hardware queues from frontend.
*/
requested_num_queues = xenbus_read_unsigned(dev->otherend,
"multi-queue-num-queues",
1);
if (requested_num_queues > xenblk_max_queues
|| requested_num_queues == 0) {
/* Buggy or malicious guest. */
xenbus_dev_fatal(dev, err,
"guest requested %u queues, exceeding the maximum of %u.",
requested_num_queues, xenblk_max_queues);
return -ENOSYS;
}
blkif->nr_rings = requested_num_queues;
if (xen_blkif_alloc_rings(blkif))
return -ENOMEM;
pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
blkif->nr_rings, blkif->blk_protocol, protocol,
blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
&ring_page_order);
if (err != 1) {
blkif->nr_ring_pages = 1;
blkif->multi_ref = false;
} else if (ring_page_order <= xen_blkif_max_ring_order) {
blkif->nr_ring_pages = 1 << ring_page_order;
blkif->multi_ref = true;
} else {
err = -EINVAL;
xenbus_dev_fatal(dev, err,
"requested ring page order %d exceed max:%d",
ring_page_order,
xen_blkif_max_ring_order);
return err;
}
if (blkif->nr_rings == 1)
return read_per_ring_refs(&blkif->rings[0], dev->otherend);
else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kmalloc(xspathsize, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
return -ENOMEM;
}
for (i = 0; i < blkif->nr_rings; i++) {
memset(xspath, 0, xspathsize);
snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
err = read_per_ring_refs(&blkif->rings[i], xspath);
if (err) {
kfree(xspath);
return err;
}
}
kfree(xspath);
}
return 0;
}
static const struct xenbus_device_id xen_blkbk_ids[] = {
{ "vbd" },
{ "" }
};
static struct xenbus_driver xen_blkbk_driver = {
.ids = xen_blkbk_ids,
.probe = xen_blkbk_probe,
.remove = xen_blkbk_remove,
.otherend_changed = frontend_changed,
.allow_rebind = true,
.reclaim_memory = reclaim_memory,
};
int xen_blkif_xenbus_init(void)
{
return xenbus_register_backend(&xen_blkbk_driver);
}
void xen_blkif_xenbus_fini(void)
{
xenbus_unregister_driver(&xen_blkbk_driver);
}
| linux-master | drivers/block/xen-blkback/xenbus.c |
/******************************************************************************
*
* Back-end of the driver for virtual block devices. This portion of the
* driver exports a 'unified' block-device interface that can be accessed
* by any operating system that implements a compatible front end. A
* reference front-end implementation can be found in:
* drivers/block/xen-blkfront.c
*
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
* Copyright (c) 2005, Christopher Clark
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define pr_fmt(fmt) "xen-blkback: " fmt
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/bitmap.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
#include <xen/grant_table.h>
#include "common.h"
/*
* Maximum number of unused free pages to keep in the internal buffer.
* Setting this to a value too low will reduce memory used in each backend,
* but can have a performance penalty.
*
* A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
* be set to a lower value that might degrade performance on some intensive
* IO workloads.
*/
static int max_buffer_pages = 1024;
module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in each block backend buffer");
/*
* Maximum number of grants to map persistently in blkback. For maximum
* performance this should be the total numbers of grants that can be used
* to fill the ring, but since this might become too high, specially with
* the use of indirect descriptors, we set it to a value that provides good
* performance without using too much memory.
*
* When the list of persistent grants is full we clean it up using a LRU
* algorithm.
*/
static int max_pgrants = 1056;
module_param_named(max_persistent_grants, max_pgrants, int, 0644);
MODULE_PARM_DESC(max_persistent_grants,
"Maximum number of grants to map persistently");
/*
* How long a persistent grant is allowed to remain allocated without being in
* use. The time is in seconds, 0 means indefinitely long.
*/
static unsigned int pgrant_timeout = 60;
module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
uint, 0644);
MODULE_PARM_DESC(persistent_grant_unused_seconds,
"Time in seconds an unused persistent grant is allowed to "
"remain allocated. Default is 60, 0 means unlimited.");
/*
* Maximum number of rings/queues blkback supports, allow as many queues as there
* are CPUs if user has not specified a value.
*/
unsigned int xenblk_max_queues;
module_param_named(max_queues, xenblk_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of hardware queues per virtual disk." \
"By default it is the number of online CPUs.");
/*
* Maximum order of pages to be used for the shared ring between front and
* backend, 4KB page granularity is used.
*/
unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
/*
* The LRU mechanism to clean the lists of persistent grants needs to
* be executed periodically. The time interval between consecutive executions
* of the purge mechanism is set in ms.
*/
#define LRU_INTERVAL 100
/*
* When the persistent grants list is full we will remove unused grants
* from the list. The percent number of grants to be removed at each LRU
* execution.
*/
#define LRU_PERCENT_CLEAN 5
/* Run-time switchable: /sys/module/blkback/parameters/ */
static unsigned int log_stats;
module_param(log_stats, int, 0644);
#define BLKBACK_INVALID_HANDLE (~0)
static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
{
return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
HZ * pgrant_timeout);
}
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req);
static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st);
#define foreach_grant_safe(pos, n, rbtree, node) \
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
&(pos)->node != NULL; \
(pos) = container_of(n, typeof(*(pos)), node), \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
/*
* We don't need locking around the persistent grant helpers
* because blkback uses a single-thread for each backend, so we
* can be sure that this functions will never be called recursively.
*
* The only exception to that is put_persistent_grant, that can be called
* from interrupt context (by xen_blkbk_unmap), so we have to use atomic
* bit operations to modify the flags of a persistent grant and to count
* the number of used grants.
*/
static int add_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *persistent_gnt)
{
struct rb_node **new = NULL, *parent = NULL;
struct persistent_gnt *this;
struct xen_blkif *blkif = ring->blkif;
if (ring->persistent_gnt_c >= max_pgrants) {
if (!blkif->vbd.overflow_max_grants)
blkif->vbd.overflow_max_grants = 1;
return -EBUSY;
}
/* Figure out where to put new node */
new = &ring->persistent_gnts.rb_node;
while (*new) {
this = container_of(*new, struct persistent_gnt, node);
parent = *new;
if (persistent_gnt->gnt < this->gnt)
new = &((*new)->rb_left);
else if (persistent_gnt->gnt > this->gnt)
new = &((*new)->rb_right);
else {
pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
return -EINVAL;
}
}
persistent_gnt->active = true;
/* Add new node and rebalance tree. */
rb_link_node(&(persistent_gnt->node), parent, new);
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
ring->persistent_gnt_c++;
atomic_inc(&ring->persistent_gnt_in_use);
return 0;
}
static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
grant_ref_t gref)
{
struct persistent_gnt *data;
struct rb_node *node = NULL;
node = ring->persistent_gnts.rb_node;
while (node) {
data = container_of(node, struct persistent_gnt, node);
if (gref < data->gnt)
node = node->rb_left;
else if (gref > data->gnt)
node = node->rb_right;
else {
if (data->active) {
pr_alert_ratelimited("requesting a grant already in use\n");
return NULL;
}
data->active = true;
atomic_inc(&ring->persistent_gnt_in_use);
return data;
}
}
return NULL;
}
static void put_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *persistent_gnt)
{
if (!persistent_gnt->active)
pr_alert_ratelimited("freeing a grant already unused\n");
persistent_gnt->last_used = jiffies;
persistent_gnt->active = false;
atomic_dec(&ring->persistent_gnt_in_use);
}
static void free_persistent_gnts(struct xen_blkif_ring *ring)
{
struct rb_root *root = &ring->persistent_gnts;
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
struct rb_node *n;
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
if (RB_EMPTY_ROOT(root))
return;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
gnttab_set_unmap_op(&unmap[segs_to_unmap],
(unsigned long) pfn_to_kaddr(page_to_pfn(
persistent_gnt->page)),
GNTMAP_host_map,
persistent_gnt->handle);
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
gnttab_page_cache_put(&ring->free_pages, pages,
segs_to_unmap);
segs_to_unmap = 0;
}
rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt);
ring->persistent_gnt_c--;
}
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
BUG_ON(ring->persistent_gnt_c != 0);
}
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
int segs_to_unmap = 0;
struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
struct gntab_unmap_queue_data unmap_data;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
while(!list_empty(&ring->persistent_purge_list)) {
persistent_gnt = list_first_entry(&ring->persistent_purge_list,
struct persistent_gnt,
remove_node);
list_del(&persistent_gnt->remove_node);
gnttab_set_unmap_op(&unmap[segs_to_unmap],
vaddr(persistent_gnt->page),
GNTMAP_host_map,
persistent_gnt->handle);
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
gnttab_page_cache_put(&ring->free_pages, pages,
segs_to_unmap);
segs_to_unmap = 0;
}
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
}
}
static void purge_persistent_gnt(struct xen_blkif_ring *ring)
{
struct persistent_gnt *persistent_gnt;
struct rb_node *n;
unsigned int num_clean, total;
bool scan_used = false;
struct rb_root *root;
if (work_busy(&ring->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
goto out;
}
if (ring->persistent_gnt_c < max_pgrants ||
(ring->persistent_gnt_c == max_pgrants &&
!ring->blkif->vbd.overflow_max_grants)) {
num_clean = 0;
} else {
num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
num_clean = min(ring->persistent_gnt_c, num_clean);
pr_debug("Going to purge at least %u persistent grants\n",
num_clean);
}
/*
* At this point, we can assure that there will be no calls
* to get_persistent_grant (because we are executing this code from
* xen_blkif_schedule), there can only be calls to put_persistent_gnt,
* which means that the number of currently used grants will go down,
* but never up, so we will always be able to remove the requested
* number of grants.
*/
total = 0;
BUG_ON(!list_empty(&ring->persistent_purge_list));
root = &ring->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
if (persistent_gnt->active)
continue;
if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
continue;
if (scan_used && total >= num_clean)
continue;
rb_erase(&persistent_gnt->node, root);
list_add(&persistent_gnt->remove_node,
&ring->persistent_purge_list);
total++;
}
/*
* Check whether we also need to start cleaning
* grants that were used since last purge in order to cope
* with the requested num
*/
if (!scan_used && total < num_clean) {
pr_debug("Still missing %u purged frames\n", num_clean - total);
scan_used = true;
goto purge_list;
}
if (total) {
ring->persistent_gnt_c -= total;
ring->blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
schedule_work(&ring->persistent_purge_work);
pr_debug("Purged %u/%u\n", num_clean, total);
}
out:
return;
}
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
{
struct pending_req *req = NULL;
unsigned long flags;
spin_lock_irqsave(&ring->pending_free_lock, flags);
if (!list_empty(&ring->pending_free)) {
req = list_entry(ring->pending_free.next, struct pending_req,
free_list);
list_del(&req->free_list);
}
spin_unlock_irqrestore(&ring->pending_free_lock, flags);
return req;
}
/*
* Return the 'pending_req' structure back to the freepool. We also
* wake up the thread if it was waiting for a free page.
*/
static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
{
unsigned long flags;
int was_empty;
spin_lock_irqsave(&ring->pending_free_lock, flags);
was_empty = list_empty(&ring->pending_free);
list_add(&req->free_list, &ring->pending_free);
spin_unlock_irqrestore(&ring->pending_free_lock, flags);
if (was_empty)
wake_up(&ring->pending_free_wq);
}
/*
* Routines for managing virtual block devices (vbds).
*/
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
enum req_op operation)
{
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;
if ((operation != REQ_OP_READ) && vbd->readonly)
goto out;
if (likely(req->nr_sects)) {
blkif_sector_t end = req->sector_number + req->nr_sects;
if (unlikely(end < req->sector_number))
goto out;
if (unlikely(end > vbd_sz(vbd)))
goto out;
}
req->dev = vbd->pdevice;
req->bdev = vbd->bdev;
rc = 0;
out:
return rc;
}
static void xen_vbd_resize(struct xen_blkif *blkif)
{
struct xen_vbd *vbd = &blkif->vbd;
struct xenbus_transaction xbt;
int err;
struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
unsigned long long new_size = vbd_sz(vbd);
pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
pr_info("VBD Resize: new size %llu\n", new_size);
vbd->size = new_size;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
pr_warn("Error starting transaction\n");
return;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(vbd));
if (err) {
pr_warn("Error writing new size\n");
goto abort;
}
/*
* Write the current state; we will use this to synchronize
* the front-end. If the current state is "connected" the
* front-end will get the new size information online.
*/
err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
if (err) {
pr_warn("Error writing the state\n");
goto abort;
}
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
goto again;
if (err)
pr_warn("Error ending transaction\n");
return;
abort:
xenbus_transaction_end(xbt, 1);
}
/*
* Notification from the guest OS.
*/
static void blkif_notify_work(struct xen_blkif_ring *ring)
{
ring->waiting_reqs = 1;
wake_up(&ring->wq);
}
irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
{
blkif_notify_work(dev_id);
return IRQ_HANDLED;
}
/*
* SCHEDULER FUNCTIONS
*/
static void print_stats(struct xen_blkif_ring *ring)
{
pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
" | ds %4llu | pg: %4u/%4d\n",
current->comm, ring->st_oo_req,
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
ring->st_ds_req = 0;
}
int xen_blkif_schedule(void *arg)
{
struct xen_blkif_ring *ring = arg;
struct xen_blkif *blkif = ring->blkif;
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
int ret;
bool do_eoi;
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
continue;
if (unlikely(vbd->size != vbd_sz(vbd)))
xen_vbd_resize(blkif);
timeout = msecs_to_jiffies(LRU_INTERVAL);
timeout = wait_event_interruptible_timeout(
ring->wq,
ring->waiting_reqs || kthread_should_stop(),
timeout);
if (timeout == 0)
goto purge_gnt_list;
timeout = wait_event_interruptible_timeout(
ring->pending_free_wq,
!list_empty(&ring->pending_free) ||
kthread_should_stop(),
timeout);
if (timeout == 0)
goto purge_gnt_list;
do_eoi = ring->waiting_reqs;
ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
ret = do_block_io_op(ring, &eoi_flags);
if (ret > 0)
ring->waiting_reqs = 1;
if (ret == -EACCES)
wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop());
if (do_eoi && !ring->waiting_reqs) {
xen_irq_lateeoi(ring->irq, eoi_flags);
eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
}
purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, ring->next_lru)) {
purge_persistent_gnt(ring);
ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
}
/* Shrink the free pages pool if it is too large. */
if (time_before(jiffies, blkif->buffer_squeeze_end))
gnttab_page_cache_shrink(&ring->free_pages, 0);
else
gnttab_page_cache_shrink(&ring->free_pages,
max_buffer_pages);
if (log_stats && time_after(jiffies, ring->st_print))
print_stats(ring);
}
/* Drain pending purge work */
flush_work(&ring->persistent_purge_work);
if (log_stats)
print_stats(ring);
ring->xenblkd = NULL;
return 0;
}
/*
* Remove persistent grants and empty the pool of free pages
*/
void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{
/* Free all persistent grant pages */
free_persistent_gnts(ring);
/* Since we are shutting down remove all pages from the buffer */
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
}
static unsigned int xen_blkbk_unmap_prepare(
struct xen_blkif_ring *ring,
struct grant_page **pages,
unsigned int num,
struct gnttab_unmap_grant_ref *unmap_ops,
struct page **unmap_pages)
{
unsigned int i, invcount = 0;
for (i = 0; i < num; i++) {
if (pages[i]->persistent_gnt != NULL) {
put_persistent_gnt(ring, pages[i]->persistent_gnt);
continue;
}
if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
continue;
unmap_pages[invcount] = pages[i]->page;
gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
invcount++;
}
return invcount;
}
static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
{
struct pending_req *pending_req = (struct pending_req *)(data->data);
struct xen_blkif_ring *ring = pending_req->ring;
struct xen_blkif *blkif = ring->blkif;
/* BUG_ON used to reproduce existing behaviour,
but is this the best way to deal with this? */
BUG_ON(result);
gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
make_response(ring, pending_req->id,
pending_req->operation, pending_req->status);
free_req(ring, pending_req);
/*
* Make sure the request is freed before releasing blkif,
* or there could be a race between free_req and the
* cleanup done in xen_blkif_free during shutdown.
*
* NB: The fact that we might try to wake up pending_free_wq
* before drain_complete (in case there's a drain going on)
* it's not a problem with our current implementation
* because we can assure there's no thread waiting on
* pending_free_wq if there's a drain going on, but it has
* to be taken into account if the current model is changed.
*/
if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
complete(&blkif->drain_complete);
}
xen_blkif_put(blkif);
}
static void xen_blkbk_unmap_and_respond(struct pending_req *req)
{
struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
struct xen_blkif_ring *ring = req->ring;
struct grant_page **pages = req->segments;
unsigned int invcount;
invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
req->unmap, req->unmap_pages);
work->data = req;
work->done = xen_blkbk_unmap_and_respond_callback;
work->unmap_ops = req->unmap;
work->kunmap_ops = NULL;
work->pages = req->unmap_pages;
work->count = invcount;
gnttab_unmap_refs_async(&req->gnttab_unmap_data);
}
/*
* Unmap the grant references.
*
* This could accumulate ops up to the batch size to reduce the number
* of hypercalls, but since this is only used in error paths there's
* no real need.
*/
static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
struct grant_page *pages[],
int num)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int invcount = 0;
int ret;
while (num) {
unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
unmap, unmap_pages);
if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
gnttab_page_cache_put(&ring->free_pages, unmap_pages,
invcount);
}
pages += batch;
num -= batch;
}
}
static int xen_blkbk_map(struct xen_blkif_ring *ring,
struct grant_page *pages[],
int num, bool ro)
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt = NULL;
phys_addr_t addr = 0;
int i, seg_idx, new_map_idx;
int segs_to_map = 0;
int ret = 0;
int last_map = 0, map_until = 0;
int use_persistent_gnts;
struct xen_blkif *blkif = ring->blkif;
use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
/*
* Fill out preq.nr_sects with proper amount of sectors, and setup
* assign map[..] with the PFN of the page in our domain with the
* corresponding grant reference for each page.
*/
again:
for (i = map_until; i < num; i++) {
uint32_t flags;
if (use_persistent_gnts) {
persistent_gnt = get_persistent_gnt(
ring,
pages[i]->gref);
}
if (persistent_gnt) {
/*
* We are using persistent grants and
* the grant is already mapped
*/
pages[i]->page = persistent_gnt->page;
pages[i]->persistent_gnt = persistent_gnt;
} else {
if (gnttab_page_cache_get(&ring->free_pages,
&pages[i]->page)) {
gnttab_page_cache_put(&ring->free_pages,
pages_to_gnt,
segs_to_map);
ret = -ENOMEM;
goto out;
}
addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page;
pages[i]->persistent_gnt = NULL;
flags = GNTMAP_host_map;
if (!use_persistent_gnts && ro)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[segs_to_map++], addr,
flags, pages[i]->gref,
blkif->domid);
}
map_until = i + 1;
if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
break;
}
if (segs_to_map)
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
* so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain.
*/
for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
if (!pages[seg_idx]->persistent_gnt) {
/* This is a newly mapped grant */
BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) {
pr_debug("invalid buffer -- could not remap it\n");
gnttab_page_cache_put(&ring->free_pages,
&pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= !ret;
goto next;
}
pages[seg_idx]->handle = map[new_map_idx].handle;
} else {
continue;
}
if (use_persistent_gnts &&
ring->persistent_gnt_c < max_pgrants) {
/*
* We are using persistent grants, the grant is
* not mapped but we might have room for it.
*/
persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
GFP_KERNEL);
if (!persistent_gnt) {
/*
* If we don't have enough memory to
* allocate the persistent_gnt struct
* map this grant non-persistenly
*/
goto next;
}
persistent_gnt->gnt = map[new_map_idx].ref;
persistent_gnt->handle = map[new_map_idx].handle;
persistent_gnt->page = pages[seg_idx]->page;
if (add_persistent_gnt(ring,
persistent_gnt)) {
kfree(persistent_gnt);
persistent_gnt = NULL;
goto next;
}
pages[seg_idx]->persistent_gnt = persistent_gnt;
pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, ring->persistent_gnt_c,
max_pgrants);
goto next;
}
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
blkif->vbd.overflow_max_grants = 1;
pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
blkif->domid, blkif->vbd.handle);
}
/*
* We could not map this grant persistently, so use it as
* a non-persistent grant.
*/
next:
new_map_idx++;
}
segs_to_map = 0;
last_map = map_until;
if (!ret && map_until != num)
goto again;
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
if (i >= map_until)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
}
return ret;
}
static int xen_blkbk_map_seg(struct pending_req *pending_req)
{
int rc;
rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
pending_req->nr_segs,
(pending_req->operation != BLKIF_OP_READ));
return rc;
}
static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct pending_req *pending_req,
struct seg_buf seg[],
struct phys_req *preq)
{
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif_ring *ring = pending_req->ring;
int indirect_grefs, rc, n, nseg, i;
struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_segs;
indirect_grefs = INDIRECT_PAGES(nseg);
BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
for (i = 0; i < indirect_grefs; i++)
pages[i]->gref = req->u.indirect.indirect_grefs[i];
rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
if (rc)
goto unmap;
for (n = 0; n < nseg; n++) {
uint8_t first_sect, last_sect;
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
kunmap_atomic(segments);
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
}
i = n % SEGS_PER_INDIRECT_FRAME;
pending_req->segments[n]->gref = segments[i].gref;
first_sect = READ_ONCE(segments[i].first_sect);
last_sect = READ_ONCE(segments[i].last_sect);
if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
rc = -EINVAL;
goto unmap;
}
seg[n].nsec = last_sect - first_sect + 1;
seg[n].offset = first_sect << 9;
preq->nr_sects += seg[n].nsec;
}
unmap:
if (segments)
kunmap_atomic(segments);
xen_blkbk_unmap(ring, pages, indirect_grefs);
return rc;
}
static int dispatch_discard_io(struct xen_blkif_ring *ring,
struct blkif_request *req)
{
int err = 0;
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
struct block_device *bdev = blkif->vbd.bdev;
struct phys_req preq;
xen_blkif_get(blkif);
preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors;
err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
if (err) {
pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number,
preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
goto fail_response;
}
ring->st_ds_req++;
if (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE))
err = blkdev_issue_secure_erase(bdev,
req->u.discard.sector_number,
req->u.discard.nr_sectors, GFP_KERNEL);
else
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors, GFP_KERNEL);
fail_response:
if (err == -EOPNOTSUPP) {
pr_debug("discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
} else if (err)
status = BLKIF_RSP_ERROR;
make_response(ring, req->u.discard.id, req->operation, status);
xen_blkif_put(blkif);
return err;
}
static int dispatch_other_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req)
{
free_req(ring, pending_req);
make_response(ring, req->u.other.id, req->operation,
BLKIF_RSP_EOPNOTSUPP);
return -EIO;
}
static void xen_blk_drain_io(struct xen_blkif_ring *ring)
{
struct xen_blkif *blkif = ring->blkif;
atomic_set(&blkif->drain, 1);
do {
if (atomic_read(&ring->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
if (!atomic_read(&blkif->drain))
break;
} while (!kthread_should_stop());
atomic_set(&blkif->drain, 0);
}
static void __end_block_io_op(struct pending_req *pending_req,
blk_status_t error)
{
/* An error fails the entire request. */
if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
error == BLK_STS_NOTSUPP) {
pr_debug("flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
error == BLK_STS_NOTSUPP) {
pr_debug("write barrier op failed, not supported\n");
xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) {
pr_debug("Buffer not up-to-date at end of operation,"
" error=%d\n", error);
pending_req->status = BLKIF_RSP_ERROR;
}
/*
* If all of the bio's have completed it is time to unmap
* the grant references associated with 'request' and provide
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt))
xen_blkbk_unmap_and_respond(pending_req);
}
/*
* bio callback.
*/
static void end_block_io_op(struct bio *bio)
{
__end_block_io_op(bio->bi_private, bio->bi_status);
bio_put(bio);
}
static void blkif_get_x86_32_req(struct blkif_request *dst,
const struct blkif_x86_32_request *src)
{
unsigned int i, n;
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
dst->u.rw.handle = src->u.rw.handle;
dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
dst->u.rw.nr_segments);
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments =
READ_ONCE(src->u.indirect.nr_segments);
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
n = min(MAX_INDIRECT_PAGES,
INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < n; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
static void blkif_get_x86_64_req(struct blkif_request *dst,
const struct blkif_x86_64_request *src)
{
unsigned int i, n;
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
dst->u.rw.handle = src->u.rw.handle;
dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
dst->u.rw.nr_segments);
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments =
READ_ONCE(src->u.indirect.nr_segments);
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
n = min(MAX_INDIRECT_PAGES,
INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < n; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
/*
* Function to copy the from the ring buffer the 'struct blkif_request'
* (which has the sectors we want, number of them, grant references, etc),
* and transmute it to the block API to hand it over to the proper block disk.
*/
static int
__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
struct pending_req *pending_req;
RING_IDX rc, rp;
int more_to_do = 0;
rc = blk_rings->common.req_cons;
rp = blk_rings->common.sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
rc = blk_rings->common.rsp_prod_pvt;
pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
rp, rc, rp - rc, ring->blkif->vbd.pdevice);
return -EACCES;
}
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break;
/* We've seen a request, so clear spurious eoi flag. */
*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
if (kthread_should_stop()) {
more_to_do = 1;
break;
}
pending_req = alloc_req(ring);
if (NULL == pending_req) {
ring->st_oo_req++;
more_to_do = 1;
break;
}
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
break;
case BLKIF_PROTOCOL_X86_32:
blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
break;
case BLKIF_PROTOCOL_X86_64:
blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
break;
default:
BUG();
}
blk_rings->common.req_cons = ++rc; /* before make_response() */
/* Apply all sanity checks to /private copy/ of request. */
barrier();
switch (req.operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_INDIRECT:
if (dispatch_rw_block_io(ring, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
free_req(ring, pending_req);
if (dispatch_discard_io(ring, &req))
goto done;
break;
default:
if (dispatch_other_io(ring, &req, pending_req))
goto done;
break;
}
/* Yield point for this unbounded loop. */
cond_resched();
}
done:
return more_to_do;
}
static int
do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do;
do {
more_to_do = __do_block_io_op(ring, eoi_flags);
if (more_to_do)
break;
RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
} while (more_to_do);
return more_to_do;
}
/*
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
*/
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req)
{
struct phys_req preq;
struct seg_buf *seg = pending_req->seg;
unsigned int nseg;
struct bio *bio = NULL;
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
enum req_op operation;
blk_opf_t operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;
unsigned short req_operation;
req_operation = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.indirect_op : req->operation;
if ((req->operation == BLKIF_OP_INDIRECT) &&
(req_operation != BLKIF_OP_READ) &&
(req_operation != BLKIF_OP_WRITE)) {
pr_debug("Invalid indirect operation (%u)\n", req_operation);
goto fail_response;
}
switch (req_operation) {
case BLKIF_OP_READ:
ring->st_rd_req++;
operation = REQ_OP_READ;
break;
case BLKIF_OP_WRITE:
ring->st_wr_req++;
operation = REQ_OP_WRITE;
operation_flags = REQ_SYNC | REQ_IDLE;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
fallthrough;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
operation_flags = REQ_PREFLUSH;
break;
default:
operation = 0; /* make gcc happy */
goto fail_response;
break;
}
/* Check that the number of segments is sane. */
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
(nseg > MAX_INDIRECT_SEGMENTS))) {
pr_debug("Bad number of segments in request (%d)\n", nseg);
/* Haven't submitted any bio's yet. */
goto fail_response;
}
preq.nr_sects = 0;
pending_req->ring = ring;
pending_req->id = req->u.rw.id;
pending_req->operation = req_operation;
pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_segs = nseg;
if (req->operation != BLKIF_OP_INDIRECT) {
preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
for (i = 0; i < nseg; i++) {
pages[i]->gref = req->u.rw.seg[i].gref;
seg[i].nsec = req->u.rw.seg[i].last_sect -
req->u.rw.seg[i].first_sect + 1;
seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
(req->u.rw.seg[i].last_sect <
req->u.rw.seg[i].first_sect))
goto fail_response;
preq.nr_sects += seg[i].nsec;
}
} else {
preq.dev = req->u.indirect.handle;
preq.sector_number = req->u.indirect.sector_number;
if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
goto fail_response;
}
if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == REQ_OP_READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects,
ring->blkif->vbd.pdevice);
goto fail_response;
}
/*
* This check _MUST_ be done after xen_vbd_translate as the preq.bdev
* is set there.
*/
for (i = 0; i < nseg; i++) {
if (((int)preq.sector_number|(int)seg[i].nsec) &
((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
pr_debug("Misaligned I/O request from domain %d\n",
ring->blkif->domid);
goto fail_response;
}
}
/* Wait on all outstanding I/O's and once that has been completed
* issue the flush.
*/
if (drain)
xen_blk_drain_io(pending_req->ring);
/*
* If we have failed at this point, we need to undo the M2P override,
* set gnttab_set_unmap_op on all of the grant references and perform
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
if (xen_blkbk_map_seg(pending_req))
goto fail_flush;
/*
* This corresponding xen_blkif_put is done in __end_block_io_op, or
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(ring->blkif);
atomic_inc(&ring->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
(bio_add_page(bio,
pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
operation | operation_flags,
GFP_KERNEL);
biolist[nbio++] = bio;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
}
preq.sector_number += seg[i].nsec;
}
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
GFP_KERNEL);
biolist[nbio++] = bio;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
}
atomic_set(&pending_req->pendcnt, nbio);
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
submit_bio(biolist[i]);
/* Let the I/Os go.. */
blk_finish_plug(&plug);
if (operation == REQ_OP_READ)
ring->st_rd_sect += preq.nr_sects;
else if (operation == REQ_OP_WRITE)
ring->st_wr_sect += preq.nr_sects;
return 0;
fail_flush:
xen_blkbk_unmap(ring, pending_req->segments,
pending_req->nr_segs);
fail_response:
/* Haven't submitted any bio's yet. */
make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
free_req(ring, pending_req);
msleep(1); /* back off a bit */
return -EIO;
}
/*
* Put a response on the ring on how the operation fared.
*/
static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st)
{
struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings;
int notify;
spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
resp = RING_GET_RESPONSE(&blk_rings->native,
blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
resp = RING_GET_RESPONSE(&blk_rings->x86_32,
blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
resp = RING_GET_RESPONSE(&blk_rings->x86_64,
blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
resp->id = id;
resp->operation = op;
resp->status = st;
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
if (notify)
notify_remote_via_irq(ring->irq);
}
static int __init xen_blkif_init(void)
{
int rc = 0;
if (!xen_domain())
return -ENODEV;
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
}
if (xenblk_max_queues == 0)
xenblk_max_queues = num_online_cpus();
rc = xen_blkif_interface_init();
if (rc)
goto failed_init;
rc = xen_blkif_xenbus_init();
if (rc)
goto failed_init;
failed_init:
return rc;
}
module_init(xen_blkif_init);
static void __exit xen_blkif_fini(void)
{
xen_blkif_xenbus_fini();
xen_blkif_interface_fini();
}
module_exit(xen_blkif_fini);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vbd");
| linux-master | drivers/block/xen-blkback/blkback.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/parser.h>
#include <linux/module.h>
#include <linux/in6.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <rdma/ib.h>
#include <rdma/rdma_cm.h>
#include "rnbd-clt.h"
static struct device *rnbd_dev;
static const struct class rnbd_dev_class = {
.name = "rnbd-client",
};
static struct kobject *rnbd_devs_kobj;
enum {
RNBD_OPT_ERR = 0,
RNBD_OPT_DEST_PORT = 1 << 0,
RNBD_OPT_PATH = 1 << 1,
RNBD_OPT_DEV_PATH = 1 << 2,
RNBD_OPT_ACCESS_MODE = 1 << 3,
RNBD_OPT_SESSNAME = 1 << 6,
RNBD_OPT_NR_POLL_QUEUES = 1 << 7,
};
static const unsigned int rnbd_opt_mandatory[] = {
RNBD_OPT_DEV_PATH,
RNBD_OPT_SESSNAME,
};
static const match_table_t rnbd_opt_tokens = {
{RNBD_OPT_PATH, "path=%s" },
{RNBD_OPT_DEV_PATH, "device_path=%s" },
{RNBD_OPT_DEST_PORT, "dest_port=%d" },
{RNBD_OPT_ACCESS_MODE, "access_mode=%s" },
{RNBD_OPT_SESSNAME, "sessname=%s" },
{RNBD_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{RNBD_OPT_ERR, NULL },
};
struct rnbd_map_options {
char *sessname;
struct rtrs_addr *paths;
size_t *path_cnt;
char *pathname;
u16 *dest_port;
enum rnbd_access_mode *access_mode;
u32 *nr_poll_queues;
};
static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
struct rnbd_map_options *opt)
{
char *options, *sep_opt;
char *p;
substring_t args[MAX_OPT_ARGS];
int opt_mask = 0;
int token;
int ret = -EINVAL;
int nr_poll_queues = 0;
int dest_port = 0;
int p_cnt = 0;
int i;
options = kstrdup(buf, GFP_KERNEL);
if (!options)
return -ENOMEM;
sep_opt = strstrip(options);
while ((p = strsep(&sep_opt, " ")) != NULL) {
if (!*p)
continue;
token = match_token(p, rnbd_opt_tokens, args);
opt_mask |= token;
switch (token) {
case RNBD_OPT_SESSNAME:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (strlen(p) > NAME_MAX) {
pr_err("map_device: sessname too long\n");
ret = -EINVAL;
kfree(p);
goto out;
}
strscpy(opt->sessname, p, NAME_MAX);
kfree(p);
break;
case RNBD_OPT_PATH:
if (p_cnt >= max_path_cnt) {
pr_err("map_device: too many (> %zu) paths provided\n",
max_path_cnt);
ret = -ENOMEM;
goto out;
}
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = rtrs_addr_to_sockaddr(p, strlen(p),
*opt->dest_port,
&opt->paths[p_cnt]);
if (ret) {
pr_err("Can't parse path %s: %d\n", p, ret);
kfree(p);
goto out;
}
p_cnt++;
kfree(p);
break;
case RNBD_OPT_DEV_PATH:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (strlen(p) > NAME_MAX) {
pr_err("map_device: Device path too long\n");
ret = -EINVAL;
kfree(p);
goto out;
}
strscpy(opt->pathname, p, NAME_MAX);
kfree(p);
break;
case RNBD_OPT_DEST_PORT:
if (match_int(args, &dest_port) || dest_port < 0 ||
dest_port > 65535) {
pr_err("bad destination port number parameter '%d'\n",
dest_port);
ret = -EINVAL;
goto out;
}
*opt->dest_port = dest_port;
break;
case RNBD_OPT_ACCESS_MODE:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (!strcmp(p, "ro")) {
*opt->access_mode = RNBD_ACCESS_RO;
} else if (!strcmp(p, "rw")) {
*opt->access_mode = RNBD_ACCESS_RW;
} else if (!strcmp(p, "migration")) {
*opt->access_mode = RNBD_ACCESS_MIGRATION;
} else {
pr_err("map_device: Invalid access_mode: '%s'\n",
p);
ret = -EINVAL;
kfree(p);
goto out;
}
kfree(p);
break;
case RNBD_OPT_NR_POLL_QUEUES:
if (match_int(args, &nr_poll_queues) || nr_poll_queues < -1 ||
nr_poll_queues > (int)nr_cpu_ids) {
pr_err("bad nr_poll_queues parameter '%d'\n",
nr_poll_queues);
ret = -EINVAL;
goto out;
}
if (nr_poll_queues == -1)
nr_poll_queues = nr_cpu_ids;
*opt->nr_poll_queues = nr_poll_queues;
break;
default:
pr_err("map_device: Unknown parameter or missing value '%s'\n",
p);
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < ARRAY_SIZE(rnbd_opt_mandatory); i++) {
if ((opt_mask & rnbd_opt_mandatory[i])) {
ret = 0;
} else {
pr_err("map_device: Parameters missing\n");
ret = -EINVAL;
break;
}
}
out:
*opt->path_cnt = p_cnt;
kfree(options);
return ret;
}
static ssize_t state_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct rnbd_clt_dev *dev;
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
switch (dev->dev_state) {
case DEV_STATE_INIT:
return sysfs_emit(page, "init\n");
case DEV_STATE_MAPPED:
/* TODO fix cli tool before changing to proper state */
return sysfs_emit(page, "open\n");
case DEV_STATE_MAPPED_DISCONNECTED:
/* TODO fix cli tool before changing to proper state */
return sysfs_emit(page, "closed\n");
case DEV_STATE_UNMAPPED:
return sysfs_emit(page, "unmapped\n");
default:
return sysfs_emit(page, "unknown\n");
}
}
static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state);
static ssize_t nr_poll_queues_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct rnbd_clt_dev *dev;
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
return sysfs_emit(page, "%d\n", dev->nr_poll_queues);
}
static struct kobj_attribute rnbd_clt_nr_poll_queues =
__ATTR_RO(nr_poll_queues);
static ssize_t mapping_path_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct rnbd_clt_dev *dev;
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
return sysfs_emit(page, "%s\n", dev->pathname);
}
static struct kobj_attribute rnbd_clt_mapping_path_attr =
__ATTR_RO(mapping_path);
static ssize_t access_mode_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct rnbd_clt_dev *dev;
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
return sysfs_emit(page, "%s\n", rnbd_access_modes[dev->access_mode].str);
}
static struct kobj_attribute rnbd_clt_access_mode =
__ATTR_RO(access_mode);
static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sysfs_emit(page, "Usage: echo <normal|force> > %s\n",
attr->attr.name);
}
static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rnbd_clt_dev *dev;
char *opt, *options;
bool force;
int err;
opt = kstrdup(buf, GFP_KERNEL);
if (!opt)
return -ENOMEM;
options = strstrip(opt);
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
if (sysfs_streq(options, "normal")) {
force = false;
} else if (sysfs_streq(options, "force")) {
force = true;
} else {
rnbd_clt_err(dev,
"unmap_device: Invalid value: %s\n",
options);
err = -EINVAL;
goto out;
}
rnbd_clt_info(dev, "Unmapping device, option: %s.\n",
force ? "force" : "normal");
/*
* We take explicit module reference only for one reason: do not
* race with lockless rnbd_destroy_sessions().
*/
if (!try_module_get(THIS_MODULE)) {
err = -ENODEV;
goto out;
}
err = rnbd_clt_unmap_device(dev, force, &attr->attr);
if (err) {
if (err != -EALREADY)
rnbd_clt_err(dev, "unmap_device: %d\n", err);
goto module_put;
}
/*
* Here device can be vanished!
*/
err = count;
module_put:
module_put(THIS_MODULE);
out:
kfree(opt);
return err;
}
static struct kobj_attribute rnbd_clt_unmap_device_attr =
__ATTR(unmap_device, 0644, rnbd_clt_unmap_dev_show,
rnbd_clt_unmap_dev_store);
static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "Usage: echo <new size in sectors> > %s\n",
attr->attr.name);
}
static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long sectors;
struct rnbd_clt_dev *dev;
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
ret = kstrtoul(buf, 0, §ors);
if (ret)
return ret;
ret = rnbd_clt_resize_disk(dev, sectors);
if (ret)
return ret;
return count;
}
static struct kobj_attribute rnbd_clt_resize_dev_attr =
__ATTR(resize, 0644, rnbd_clt_resize_dev_show,
rnbd_clt_resize_dev_store);
static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sysfs_emit(page, "Usage: echo <1> > %s\n", attr->attr.name);
}
static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rnbd_clt_dev *dev;
char *opt, *options;
int err;
opt = kstrdup(buf, GFP_KERNEL);
if (!opt)
return -ENOMEM;
options = strstrip(opt);
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
if (!sysfs_streq(options, "1")) {
rnbd_clt_err(dev,
"remap_device: Invalid value: %s\n",
options);
err = -EINVAL;
goto out;
}
err = rnbd_clt_remap_device(dev);
if (likely(!err))
err = count;
out:
kfree(opt);
return err;
}
static struct kobj_attribute rnbd_clt_remap_device_attr =
__ATTR(remap_device, 0644, rnbd_clt_remap_dev_show,
rnbd_clt_remap_dev_store);
static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
struct rnbd_clt_dev *dev;
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
return sysfs_emit(page, "%s\n", dev->sess->sessname);
}
static struct kobj_attribute rnbd_clt_session_attr =
__ATTR_RO(session);
static struct attribute *rnbd_dev_attrs[] = {
&rnbd_clt_unmap_device_attr.attr,
&rnbd_clt_resize_dev_attr.attr,
&rnbd_clt_remap_device_attr.attr,
&rnbd_clt_mapping_path_attr.attr,
&rnbd_clt_state_attr.attr,
&rnbd_clt_session_attr.attr,
&rnbd_clt_access_mode.attr,
&rnbd_clt_nr_poll_queues.attr,
NULL,
};
ATTRIBUTE_GROUPS(rnbd_dev);
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
{
/*
* The module unload rnbd_client_exit path is racing with unmapping of
* the last single device from the sysfs manually
* i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
* of sysfs link already was removed already.
*/
if (dev->blk_symlink_name) {
if (try_module_get(THIS_MODULE)) {
sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
module_put(THIS_MODULE);
}
/* It should be freed always. */
kfree(dev->blk_symlink_name);
dev->blk_symlink_name = NULL;
}
}
static struct kobj_type rnbd_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = rnbd_dev_groups,
};
static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
{
int ret;
struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
"rnbd");
if (ret) {
rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
ret);
kobject_put(&dev->kobj);
}
kobject_uevent(gd_kobj, KOBJ_ONLINE);
return ret;
}
static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page,
"Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
attr->attr.name);
}
static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
size_t len)
{
int ret;
char pathname[NAME_MAX], *s;
strscpy(pathname, dev->pathname, sizeof(pathname));
while ((s = strchr(pathname, '/')))
s[0] = '!';
ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
if (ret >= len)
return -ENAMETOOLONG;
return 0;
}
static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
{
struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
int ret, len;
len = strlen(dev->pathname) + strlen(dev->sess->sessname) + 2;
dev->blk_symlink_name = kzalloc(len, GFP_KERNEL);
if (!dev->blk_symlink_name) {
rnbd_clt_err(dev, "Failed to allocate memory for blk_symlink_name\n");
return -ENOMEM;
}
ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
len);
if (ret) {
rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
ret);
goto out_err;
}
ret = sysfs_create_link(rnbd_devs_kobj, gd_kobj,
dev->blk_symlink_name);
if (ret) {
rnbd_clt_err(dev, "Creating /sys/block symlink failed, err: %d\n",
ret);
goto out_err;
}
return 0;
out_err:
kfree(dev->blk_symlink_name);
dev->blk_symlink_name = NULL ;
return ret;
}
static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rnbd_clt_dev *dev;
struct rnbd_map_options opt;
int ret;
char pathname[NAME_MAX];
char sessname[NAME_MAX];
enum rnbd_access_mode access_mode = RNBD_ACCESS_RW;
u16 port_nr = RTRS_PORT;
u32 nr_poll_queues = 0;
struct sockaddr_storage *addrs;
struct rtrs_addr paths[6];
size_t path_cnt;
opt.sessname = sessname;
opt.paths = paths;
opt.path_cnt = &path_cnt;
opt.pathname = pathname;
opt.dest_port = &port_nr;
opt.access_mode = &access_mode;
opt.nr_poll_queues = &nr_poll_queues;
addrs = kcalloc(ARRAY_SIZE(paths) * 2, sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return -ENOMEM;
for (path_cnt = 0; path_cnt < ARRAY_SIZE(paths); path_cnt++) {
paths[path_cnt].src = &addrs[path_cnt * 2];
paths[path_cnt].dst = &addrs[path_cnt * 2 + 1];
}
ret = rnbd_clt_parse_map_options(buf, ARRAY_SIZE(paths), &opt);
if (ret)
goto out;
pr_info("Mapping device %s on session %s, (access_mode: %s, nr_poll_queues: %d)\n",
pathname, sessname,
rnbd_access_modes[access_mode].str,
nr_poll_queues);
dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
access_mode, nr_poll_queues);
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
goto out;
}
ret = rnbd_clt_add_dev_kobj(dev);
if (ret)
goto unmap_dev;
ret = rnbd_clt_add_dev_symlink(dev);
if (ret)
goto unmap_dev;
kfree(addrs);
return count;
unmap_dev:
rnbd_clt_unmap_device(dev, true, NULL);
out:
kfree(addrs);
return ret;
}
static struct kobj_attribute rnbd_clt_map_device_attr =
__ATTR(map_device, 0644,
rnbd_clt_map_device_show, rnbd_clt_map_device_store);
static struct attribute *default_attrs[] = {
&rnbd_clt_map_device_attr.attr,
NULL,
};
static struct attribute_group default_attr_group = {
.attrs = default_attrs,
};
static const struct attribute_group *default_attr_groups[] = {
&default_attr_group,
NULL,
};
int rnbd_clt_create_sysfs_files(void)
{
int err;
err = class_register(&rnbd_dev_class);
if (err)
return err;
rnbd_dev = device_create_with_groups(&rnbd_dev_class, NULL,
MKDEV(0, 0), NULL,
default_attr_groups, "ctl");
if (IS_ERR(rnbd_dev)) {
err = PTR_ERR(rnbd_dev);
goto cls_destroy;
}
rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
if (!rnbd_devs_kobj) {
err = -ENOMEM;
goto dev_destroy;
}
return 0;
dev_destroy:
device_destroy(&rnbd_dev_class, MKDEV(0, 0));
cls_destroy:
class_unregister(&rnbd_dev_class);
return err;
}
void rnbd_clt_destroy_sysfs_files(void)
{
sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
kobject_del(rnbd_devs_kobj);
kobject_put(rnbd_devs_kobj);
device_destroy(&rnbd_dev_class, MKDEV(0, 0));
class_unregister(&rnbd_dev_class);
}
| linux-master | drivers/block/rnbd/rnbd-clt-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
#include <linux/blkdev.h>
#include "rnbd-srv.h"
#include "rnbd-srv-trace.h"
MODULE_DESCRIPTION("RDMA Network Block Device Server");
MODULE_LICENSE("GPL");
static u16 port_nr = RTRS_PORT;
module_param_named(port_nr, port_nr, ushort, 0444);
MODULE_PARM_DESC(port_nr,
"The port number the server is listening on (default: "
__stringify(RTRS_PORT)")");
#define DEFAULT_DEV_SEARCH_PATH "/"
static char dev_search_path[PATH_MAX] = DEFAULT_DEV_SEARCH_PATH;
static int dev_search_path_set(const char *val, const struct kernel_param *kp)
{
const char *p = strrchr(val, '\n') ? : val + strlen(val);
if (strlen(val) >= sizeof(dev_search_path))
return -EINVAL;
snprintf(dev_search_path, sizeof(dev_search_path), "%.*s",
(int)(p - val), val);
pr_info("dev_search_path changed to '%s'\n", dev_search_path);
return 0;
}
static struct kparam_string dev_search_path_kparam_str = {
.maxlen = sizeof(dev_search_path),
.string = dev_search_path
};
static const struct kernel_param_ops dev_search_path_ops = {
.set = dev_search_path_set,
.get = param_get_string,
};
module_param_cb(dev_search_path, &dev_search_path_ops,
&dev_search_path_kparam_str, 0444);
MODULE_PARM_DESC(dev_search_path,
"Sets the dev_search_path. When a device is mapped this path is prepended to the device path from the map device operation. If %SESSNAME% is specified in a path, then device will be searched in a session namespace. (default: "
DEFAULT_DEV_SEARCH_PATH ")");
static DEFINE_MUTEX(sess_lock);
static DEFINE_SPINLOCK(dev_lock);
static LIST_HEAD(sess_list);
static LIST_HEAD(dev_list);
struct rnbd_io_private {
struct rtrs_srv_op *id;
struct rnbd_srv_sess_dev *sess_dev;
};
static void rnbd_sess_dev_release(struct kref *kref)
{
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kref, struct rnbd_srv_sess_dev, kref);
complete(sess_dev->destroy_comp);
}
static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
{
kref_put(&sess_dev->kref, rnbd_sess_dev_release);
}
static struct rnbd_srv_sess_dev *
rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
{
struct rnbd_srv_sess_dev *sess_dev;
int ret = 0;
rcu_read_lock();
sess_dev = xa_load(&srv_sess->index_idr, dev_id);
if (sess_dev)
ret = kref_get_unless_zero(&sess_dev->kref);
rcu_read_unlock();
if (!ret)
return ERR_PTR(-ENXIO);
return sess_dev;
}
static void rnbd_dev_bi_end_io(struct bio *bio)
{
struct rnbd_io_private *rnbd_priv = bio->bi_private;
struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
rnbd_put_sess_dev(sess_dev);
rtrs_srv_resp_rdma(rnbd_priv->id, blk_status_to_errno(bio->bi_status));
kfree(rnbd_priv);
bio_put(bio);
}
static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rtrs_srv_op *id, void *data, u32 datalen,
const void *usr, size_t usrlen)
{
const struct rnbd_msg_io *msg = usr;
struct rnbd_io_private *priv;
struct rnbd_srv_sess_dev *sess_dev;
u32 dev_id;
int err;
struct bio *bio;
short prio;
trace_process_rdma(srv_sess, msg, id, datalen, usrlen);
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_id = le32_to_cpu(msg->device_id);
sess_dev = rnbd_get_sess_dev(dev_id, srv_sess);
if (IS_ERR(sess_dev)) {
pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n",
srv_sess->sessname, dev_id);
err = -ENOTCONN;
goto err;
}
priv->sess_dev = sess_dev;
priv->id = id;
bio = bio_alloc(sess_dev->bdev, 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
err = -EINVAL;
goto bio_put;
}
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = priv;
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
submit_bio(bio);
return 0;
bio_put:
bio_put(bio);
rnbd_put_sess_dev(sess_dev);
err:
kfree(priv);
return err;
}
static void destroy_device(struct kref *kref)
{
struct rnbd_srv_dev *dev = container_of(kref, struct rnbd_srv_dev, kref);
WARN_ONCE(!list_empty(&dev->sess_dev_list),
"Device %s is being destroyed but still in use!\n",
dev->name);
spin_lock(&dev_lock);
list_del(&dev->list);
spin_unlock(&dev_lock);
mutex_destroy(&dev->lock);
if (dev->dev_kobj.state_in_sysfs)
/*
* Destroy kobj only if it was really created.
*/
rnbd_srv_destroy_dev_sysfs(dev);
else
kfree(dev);
}
static void rnbd_put_srv_dev(struct rnbd_srv_dev *dev)
{
kref_put(&dev->kref, destroy_device);
}
void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
{
DECLARE_COMPLETION_ONSTACK(dc);
if (keep_id)
/* free the resources for the id but don't */
/* allow to re-use the id itself because it */
/* is still used by the client */
xa_cmpxchg(&sess_dev->sess->index_idr, sess_dev->device_id,
sess_dev, NULL, 0);
else
xa_erase(&sess_dev->sess->index_idr, sess_dev->device_id);
synchronize_rcu();
sess_dev->destroy_comp = &dc;
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
blkdev_put(sess_dev->bdev, NULL);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (!sess_dev->readonly)
sess_dev->dev->open_write_cnt--;
mutex_unlock(&sess_dev->dev->lock);
rnbd_put_srv_dev(sess_dev->dev);
rnbd_srv_info(sess_dev, "Device closed\n");
kfree(sess_dev);
}
static void destroy_sess(struct rnbd_srv_session *srv_sess)
{
struct rnbd_srv_sess_dev *sess_dev;
unsigned long index;
if (xa_empty(&srv_sess->index_idr))
goto out;
trace_destroy_sess(srv_sess);
mutex_lock(&srv_sess->lock);
xa_for_each(&srv_sess->index_idr, index, sess_dev)
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
out:
xa_destroy(&srv_sess->index_idr);
pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
mutex_lock(&sess_lock);
list_del(&srv_sess->list);
mutex_unlock(&sess_lock);
mutex_destroy(&srv_sess->lock);
kfree(srv_sess);
}
static int create_sess(struct rtrs_srv_sess *rtrs)
{
struct rnbd_srv_session *srv_sess;
char pathname[NAME_MAX];
int err;
err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname));
if (err) {
pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err);
return err;
}
srv_sess = kzalloc(sizeof(*srv_sess), GFP_KERNEL);
if (!srv_sess)
return -ENOMEM;
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
mutex_init(&srv_sess->lock);
mutex_lock(&sess_lock);
list_add(&srv_sess->list, &sess_list);
mutex_unlock(&sess_lock);
srv_sess->rtrs = rtrs;
strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname));
rtrs_srv_set_sess_priv(rtrs, srv_sess);
trace_create_sess(srv_sess);
return 0;
}
static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs,
enum rtrs_srv_link_ev ev, void *priv)
{
struct rnbd_srv_session *srv_sess = priv;
switch (ev) {
case RTRS_SRV_LINK_EV_CONNECTED:
return create_sess(rtrs);
case RTRS_SRV_LINK_EV_DISCONNECTED:
if (WARN_ON_ONCE(!srv_sess))
return -EINVAL;
destroy_sess(srv_sess);
return 0;
default:
pr_warn("Received unknown RTRS session event %d from session %s\n",
ev, srv_sess->sessname);
return -EINVAL;
}
}
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
struct kobj_attribute *attr)
{
struct rnbd_srv_session *sess = sess_dev->sess;
/* It is already started to close by client's close message. */
if (!mutex_trylock(&sess->lock))
return;
sess_dev->keep_id = true;
/* first remove sysfs itself to avoid deadlock */
sysfs_remove_file_self(&sess_dev->kobj, &attr->attr);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&sess->lock);
}
static void process_msg_close(struct rnbd_srv_session *srv_sess,
void *data, size_t datalen, const void *usr,
size_t usrlen)
{
const struct rnbd_msg_close *close_msg = usr;
struct rnbd_srv_sess_dev *sess_dev;
trace_process_msg_close(srv_sess, close_msg);
sess_dev = rnbd_get_sess_dev(le32_to_cpu(close_msg->device_id),
srv_sess);
if (IS_ERR(sess_dev))
return;
rnbd_put_sess_dev(sess_dev);
mutex_lock(&srv_sess->lock);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
}
static int process_msg_open(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen);
static void process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen);
static int rnbd_srv_rdma_ev(void *priv, struct rtrs_srv_op *id,
void *data, size_t datalen,
const void *usr, size_t usrlen)
{
struct rnbd_srv_session *srv_sess = priv;
const struct rnbd_msg_hdr *hdr = usr;
int ret = 0;
u16 type;
if (WARN_ON_ONCE(!srv_sess))
return -ENODEV;
type = le16_to_cpu(hdr->type);
switch (type) {
case RNBD_MSG_IO:
return process_rdma(srv_sess, id, data, datalen, usr, usrlen);
case RNBD_MSG_CLOSE:
process_msg_close(srv_sess, data, datalen, usr, usrlen);
break;
case RNBD_MSG_OPEN:
ret = process_msg_open(srv_sess, usr, usrlen, data, datalen);
break;
case RNBD_MSG_SESS_INFO:
process_msg_sess_info(srv_sess, usr, usrlen, data, datalen);
break;
default:
pr_warn("Received unexpected message type %d from session %s\n",
type, srv_sess->sessname);
return -EINVAL;
}
/*
* Since ret is passed to rtrs to handle the failure case, we
* just return 0 at the end otherwise callers in rtrs would call
* send_io_resp_imm again to print redundant err message.
*/
rtrs_srv_resp_rdma(id, ret);
return 0;
}
static struct rnbd_srv_sess_dev
*rnbd_sess_dev_alloc(struct rnbd_srv_session *srv_sess)
{
struct rnbd_srv_sess_dev *sess_dev;
int error;
sess_dev = kzalloc(sizeof(*sess_dev), GFP_KERNEL);
if (!sess_dev)
return ERR_PTR(-ENOMEM);
error = xa_alloc(&srv_sess->index_idr, &sess_dev->device_id, sess_dev,
xa_limit_32b, GFP_NOWAIT);
if (error < 0) {
pr_warn("Allocating idr failed, err: %d\n", error);
kfree(sess_dev);
return ERR_PTR(error);
}
return sess_dev;
}
static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev)
{
struct rnbd_srv_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
snprintf(dev->name, sizeof(dev->name), "%pg", bdev);
kref_init(&dev->kref);
INIT_LIST_HEAD(&dev->sess_dev_list);
mutex_init(&dev->lock);
return dev;
}
static struct rnbd_srv_dev *
rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev *new_dev)
{
struct rnbd_srv_dev *dev;
spin_lock(&dev_lock);
list_for_each_entry(dev, &dev_list, list) {
if (!strncmp(dev->name, new_dev->name, sizeof(dev->name))) {
if (!kref_get_unless_zero(&dev->kref))
/*
* We lost the race, device is almost dead.
* Continue traversing to find a valid one.
*/
continue;
spin_unlock(&dev_lock);
return dev;
}
}
list_add(&new_dev->list, &dev_list);
spin_unlock(&dev_lock);
return new_dev;
}
static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
struct rnbd_srv_session *srv_sess,
enum rnbd_access_mode access_mode)
{
int ret = 0;
mutex_lock(&srv_dev->lock);
switch (access_mode) {
case RNBD_ACCESS_RO:
break;
case RNBD_ACCESS_RW:
if (srv_dev->open_write_cnt == 0) {
srv_dev->open_write_cnt++;
} else {
pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
srv_dev->name, srv_sess->sessname,
srv_dev->open_write_cnt,
rnbd_access_modes[access_mode].str);
ret = -EPERM;
}
break;
case RNBD_ACCESS_MIGRATION:
if (srv_dev->open_write_cnt < 2) {
srv_dev->open_write_cnt++;
} else {
pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
srv_dev->name, srv_sess->sessname,
srv_dev->open_write_cnt,
rnbd_access_modes[access_mode].str);
ret = -EPERM;
}
break;
default:
pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
srv_dev->name, srv_sess->sessname, access_mode);
ret = -EINVAL;
}
mutex_unlock(&srv_dev->lock);
return ret;
}
static struct rnbd_srv_dev *
rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
struct rnbd_srv_session *srv_sess,
enum rnbd_access_mode access_mode)
{
int ret;
struct rnbd_srv_dev *new_dev, *dev;
new_dev = rnbd_srv_init_srv_dev(bdev);
if (IS_ERR(new_dev))
return new_dev;
dev = rnbd_srv_find_or_add_srv_dev(new_dev);
if (dev != new_dev)
kfree(new_dev);
ret = rnbd_srv_check_update_open_perm(dev, srv_sess, access_mode);
if (ret) {
rnbd_put_srv_dev(dev);
return ERR_PTR(ret);
}
return dev;
}
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
struct block_device *bdev = sess_dev->bdev;
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = cpu_to_le32(sess_dev->device_id);
rsp->nsectors = cpu_to_le64(bdev_nr_sectors(bdev));
rsp->logical_block_size = cpu_to_le16(bdev_logical_block_size(bdev));
rsp->physical_block_size = cpu_to_le16(bdev_physical_block_size(bdev));
rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev));
rsp->max_hw_sectors =
cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev)));
rsp->max_write_same_sectors = 0;
rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev));
rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev));
rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev));
rsp->secure_discard = cpu_to_le16(bdev_max_secure_erase_sectors(bdev));
rsp->cache_policy = 0;
if (bdev_write_cache(bdev))
rsp->cache_policy |= RNBD_WRITEBACK;
if (bdev_fua(bdev))
rsp->cache_policy |= RNBD_FUA;
}
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
struct block_device *bdev, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
if (IS_ERR(sdev))
return sdev;
kref_init(&sdev->kref);
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
sdev->bdev = bdev;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->readonly = readonly;
sdev->access_mode = open_msg->access_mode;
return sdev;
}
static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
const char *dev_name)
{
char *full_path;
char *a, *b;
full_path = kmalloc(PATH_MAX, GFP_KERNEL);
if (!full_path)
return ERR_PTR(-ENOMEM);
/*
* Replace %SESSNAME% with a real session name in order to
* create device namespace.
*/
a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path));
if (a) {
int len = a - dev_search_path;
len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len,
dev_search_path, srv_sess->sessname, dev_name);
if (len >= PATH_MAX) {
pr_err("Too long path: %s, %s, %s\n",
dev_search_path, srv_sess->sessname, dev_name);
kfree(full_path);
return ERR_PTR(-EINVAL);
}
} else {
snprintf(full_path, PATH_MAX, "%s/%s",
dev_search_path, dev_name);
}
/* eliminitate duplicated slashes */
a = strchr(full_path, '/');
b = a;
while (*b != '\0') {
if (*b == '/' && *a == '/') {
b++;
} else {
a++;
*a = *b;
b++;
}
}
a++;
*a = '\0';
return full_path;
}
static void process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen)
{
const struct rnbd_msg_sess_info *sess_info_msg = msg;
struct rnbd_msg_sess_info_rsp *rsp = data;
srv_sess->ver = min_t(u8, sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
trace_process_msg_sess_info(srv_sess, sess_info_msg);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
rsp->ver = srv_sess->ver;
}
/**
* find_srv_sess_dev() - a dev is already opened by this name
* @srv_sess: the session to search.
* @dev_name: string containing the name of the device.
*
* Return struct rnbd_srv_sess_dev if srv_sess already opened the dev_name
* NULL if the session didn't open the device yet.
*/
static struct rnbd_srv_sess_dev *
find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
{
struct rnbd_srv_sess_dev *sess_dev;
unsigned long index;
if (xa_empty(&srv_sess->index_idr))
return NULL;
xa_for_each(&srv_sess->index_idr, index, sess_dev)
if (!strcmp(sess_dev->pathname, dev_name))
return sess_dev;
return NULL;
}
static int process_msg_open(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen)
{
int ret;
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
struct block_device *bdev;
blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
trace_process_msg_open(srv_sess, open_msg);
if (open_msg->access_mode != RNBD_ACCESS_RO)
open_flags |= BLK_OPEN_WRITE;
mutex_lock(&srv_sess->lock);
srv_sess_dev = find_srv_sess_dev(srv_sess, open_msg->dev_name);
if (srv_sess_dev)
goto fill_response;
if ((strlen(dev_search_path) + strlen(open_msg->dev_name))
>= PATH_MAX) {
pr_err("Opening device for session %s failed, device path too long. '%s/%s' is longer than PATH_MAX (%d)\n",
srv_sess->sessname, dev_search_path, open_msg->dev_name,
PATH_MAX);
ret = -EINVAL;
goto reject;
}
if (strstr(open_msg->dev_name, "..")) {
pr_err("Opening device for session %s failed, device path %s contains relative path ..\n",
srv_sess->sessname, open_msg->dev_name);
ret = -EINVAL;
goto reject;
}
full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name);
if (IS_ERR(full_path)) {
ret = PTR_ERR(full_path);
pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n",
open_msg->dev_name, srv_sess->sessname, ret);
goto reject;
}
bdev = blkdev_get_by_path(full_path, open_flags, NULL, NULL);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
full_path, srv_sess->sessname, ret);
goto free_path;
}
srv_dev = rnbd_srv_get_or_create_srv_dev(bdev, srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(srv_dev));
ret = PTR_ERR(srv_dev);
goto blkdev_put;
}
srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, bdev,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev));
ret = PTR_ERR(srv_sess_dev);
goto srv_dev_put;
}
/* Create the srv_dev sysfs files if they haven't been created yet. The
* reason to delay the creation is not to create the sysfs files before
* we are sure the device can be opened.
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
"Opening device failed, failed to create device sysfs files, err: %d\n",
ret);
goto free_srv_sess_dev;
}
}
ret = rnbd_srv_create_dev_session_sysfs(srv_sess_dev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
"Opening device failed, failed to create dev client sysfs files, err: %d\n",
ret);
goto free_srv_sess_dev;
}
list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
mutex_unlock(&srv_dev->lock);
rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->name);
kfree(full_path);
fill_response:
rnbd_srv_fill_msg_open_rsp(rsp, srv_sess_dev);
mutex_unlock(&srv_sess->lock);
return 0;
free_srv_sess_dev:
xa_erase(&srv_sess->index_idr, srv_sess_dev->device_id);
synchronize_rcu();
kfree(srv_sess_dev);
srv_dev_put:
if (open_msg->access_mode != RNBD_ACCESS_RO) {
mutex_lock(&srv_dev->lock);
srv_dev->open_write_cnt--;
mutex_unlock(&srv_dev->lock);
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
blkdev_put(bdev, NULL);
free_path:
kfree(full_path);
reject:
mutex_unlock(&srv_sess->lock);
return ret;
}
static struct rtrs_srv_ctx *rtrs_ctx;
static struct rtrs_srv_ops rtrs_ops;
static int __init rnbd_srv_init_module(void)
{
int err = 0;
BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
rtrs_ops = (struct rtrs_srv_ops) {
.rdma_ev = rnbd_srv_rdma_ev,
.link_ev = rnbd_srv_link_ev,
};
rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr);
if (IS_ERR(rtrs_ctx)) {
pr_err("rtrs_srv_open(), err: %d\n", err);
return PTR_ERR(rtrs_ctx);
}
err = rnbd_srv_create_sysfs_files();
if (err) {
pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err);
rtrs_srv_close(rtrs_ctx);
}
return err;
}
static void __exit rnbd_srv_cleanup_module(void)
{
rtrs_srv_close(rtrs_ctx);
WARN_ON(!list_empty(&sess_list));
rnbd_srv_destroy_sysfs_files();
}
module_init(rnbd_srv_init_module);
module_exit(rnbd_srv_cleanup_module);
| linux-master | drivers/block/rnbd/rnbd-srv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include "rnbd-srv.h"
static struct device *rnbd_dev;
static const struct class rnbd_dev_class = {
.name = "rnbd-server",
};
static struct kobject *rnbd_devs_kobj;
static void rnbd_srv_dev_release(struct kobject *kobj)
{
struct rnbd_srv_dev *dev;
dev = container_of(kobj, struct rnbd_srv_dev, dev_kobj);
kfree(dev);
}
static struct kobj_type dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rnbd_srv_dev_release
};
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
struct block_device *bdev)
{
struct kobject *bdev_kobj;
int ret;
ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
rnbd_devs_kobj, "%pg", bdev);
if (ret) {
kobject_put(&dev->dev_kobj);
return ret;
}
dev->dev_sessions_kobj = kobject_create_and_add("sessions",
&dev->dev_kobj);
if (!dev->dev_sessions_kobj) {
ret = -ENOMEM;
goto free_dev_kobj;
}
bdev_kobj = &disk_to_dev(bdev->bd_disk)->kobj;
ret = sysfs_create_link(&dev->dev_kobj, bdev_kobj, "block_dev");
if (ret)
goto put_sess_kobj;
return 0;
put_sess_kobj:
kobject_put(dev->dev_sessions_kobj);
free_dev_kobj:
kobject_del(&dev->dev_kobj);
kobject_put(&dev->dev_kobj);
return ret;
}
void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev)
{
sysfs_remove_link(&dev->dev_kobj, "block_dev");
kobject_del(dev->dev_sessions_kobj);
kobject_put(dev->dev_sessions_kobj);
kobject_del(&dev->dev_kobj);
kobject_put(&dev->dev_kobj);
}
static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
return sysfs_emit(page, "%d\n", sess_dev->readonly);
}
static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
__ATTR_RO(read_only);
static ssize_t access_mode_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
return sysfs_emit(page, "%s\n",
rnbd_access_modes[sess_dev->access_mode].str);
}
static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
__ATTR_RO(access_mode);
static ssize_t mapping_path_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
return sysfs_emit(page, "%s\n", sess_dev->pathname);
}
static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
__ATTR_RO(mapping_path);
static ssize_t rnbd_srv_dev_session_force_close_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sysfs_emit(page, "Usage: echo 1 > %s\n",
attr->attr.name);
}
static ssize_t rnbd_srv_dev_session_force_close_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
if (!sysfs_streq(buf, "1")) {
rnbd_srv_err(sess_dev, "%s: invalid value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
rnbd_srv_info(sess_dev, "force close requested\n");
rnbd_srv_sess_dev_force_close(sess_dev, attr);
return count;
}
static struct kobj_attribute rnbd_srv_dev_session_force_close_attr =
__ATTR(force_close, 0644,
rnbd_srv_dev_session_force_close_show,
rnbd_srv_dev_session_force_close_store);
static struct attribute *rnbd_srv_default_dev_sessions_attrs[] = {
&rnbd_srv_dev_session_access_mode_attr.attr,
&rnbd_srv_dev_session_ro_attr.attr,
&rnbd_srv_dev_session_mapping_path_attr.attr,
&rnbd_srv_dev_session_force_close_attr.attr,
NULL,
};
static struct attribute_group rnbd_srv_default_dev_session_attr_group = {
.attrs = rnbd_srv_default_dev_sessions_attrs,
};
void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
{
sysfs_remove_group(&sess_dev->kobj,
&rnbd_srv_default_dev_session_attr_group);
kobject_del(&sess_dev->kobj);
kobject_put(&sess_dev->kobj);
}
static void rnbd_srv_sess_dev_release(struct kobject *kobj)
{
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
rnbd_destroy_sess_dev(sess_dev, sess_dev->keep_id);
}
static struct kobj_type rnbd_srv_sess_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rnbd_srv_sess_dev_release,
};
int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
{
int ret;
ret = kobject_init_and_add(&sess_dev->kobj, &rnbd_srv_sess_dev_ktype,
sess_dev->dev->dev_sessions_kobj, "%s",
sess_dev->sess->sessname);
if (ret) {
kobject_put(&sess_dev->kobj);
return ret;
}
ret = sysfs_create_group(&sess_dev->kobj,
&rnbd_srv_default_dev_session_attr_group);
if (ret) {
kobject_del(&sess_dev->kobj);
kobject_put(&sess_dev->kobj);
}
return ret;
}
int rnbd_srv_create_sysfs_files(void)
{
int err;
err = class_register(&rnbd_dev_class);
if (err)
return err;
rnbd_dev = device_create(&rnbd_dev_class, NULL,
MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(rnbd_dev)) {
err = PTR_ERR(rnbd_dev);
goto cls_destroy;
}
rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
if (!rnbd_devs_kobj) {
err = -ENOMEM;
goto dev_destroy;
}
return 0;
dev_destroy:
device_destroy(&rnbd_dev_class, MKDEV(0, 0));
cls_destroy:
class_unregister(&rnbd_dev_class);
return err;
}
void rnbd_srv_destroy_sysfs_files(void)
{
kobject_del(rnbd_devs_kobj);
kobject_put(rnbd_devs_kobj);
device_destroy(&rnbd_dev_class, MKDEV(0, 0));
class_unregister(&rnbd_dev_class);
}
| linux-master | drivers/block/rnbd/rnbd-srv-sysfs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#include "rtrs.h"
#include "rtrs-srv.h"
#include "rnbd-srv.h"
#include "rnbd-proto.h"
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "rnbd-srv-trace.h"
| linux-master | drivers/block/rnbd/rnbd-srv-trace.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.