python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_file.c
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
* (c) Copyright 2005-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_file.h"
static inline struct fd_dev *FD_DEV(struct se_device *dev)
{
return container_of(dev, struct fd_dev, dev);
}
static int fd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!fd_host) {
pr_err("Unable to allocate memory for struct fd_host\n");
return -ENOMEM;
}
fd_host->fd_host_id = host_id;
hba->hba_ptr = fd_host;
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_VERSION);
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
hba->hba_id, fd_host->fd_host_id);
return 0;
}
static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
hba->hba_ptr = NULL;
}
static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!fd_dev) {
pr_err("Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return &fd_dev->dev;
}
static bool fd_configure_unmap(struct se_device *dev)
{
struct file *file = FD_DEV(dev)->fd_file;
struct inode *inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode))
return target_configure_unmap_from_queue(&dev->dev_attrib,
I_BDEV(inode));
/* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
dev->dev_attrib.max_unmap_lba_count = 0x2000;
/* Currently hardcoded to 1 in Linux/SCSI code. */
dev->dev_attrib.max_unmap_block_desc_count = 1;
dev->dev_attrib.unmap_granularity = 1;
dev->dev_attrib.unmap_granularity_alignment = 0;
return true;
}
static int fd_configure_device(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct file *file;
struct inode *inode = NULL;
int flags, ret = -EINVAL;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
/*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates.
*/
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
/*
* Optionally allow fd_buffered_io=1 to be enabled for people
* who want use the fs buffer cache as an WriteCache mechanism.
*
* This means that in event of a hard failure, there is a risk
* of silent data-loss if the SCSI client has *not* performed a
* forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
* to write-out the entire device cache.
*/
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
flags &= ~O_DSYNC;
}
file = filp_open(fd_dev->fd_dev_name, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
ret = PTR_ERR(file);
goto fail;
}
fd_dev->fd_file = file;
/*
* If using a block backend with this struct file, we extract
* fd_dev->fd_[block,dev]_size from struct block_device.
*
* Otherwise, we use the passed fd_size= from configfs
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode);
unsigned long long dev_size;
fd_dev->fd_block_size = bdev_logical_block_size(bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
if (bdev_nonrot(bdev))
dev->dev_attrib.is_nonrot = 1;
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
pr_err("FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
}
fd_dev->fd_block_size = FD_BLOCKSIZE;
/*
* Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
* based upon struct iovec limit for vfs_writev()
*/
dev->dev_attrib.max_write_same_len = 0x1000;
}
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
" with FDBD_HAS_BUFFERED_IO_WCE\n");
dev->dev_attrib.emulate_write_cache = 1;
}
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return 0;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
return ret;
}
static void fd_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct fd_dev *fd_dev = FD_DEV(dev);
kfree(fd_dev);
}
static void fd_free_device(struct se_device *dev)
{
call_rcu(&dev->rcu_head, fd_dev_call_rcu);
}
static void fd_destroy_device(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
}
struct target_core_file_cmd {
unsigned long len;
struct se_cmd *cmd;
struct kiocb iocb;
struct bio_vec bvecs[];
};
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct target_core_file_cmd *cmd;
cmd = container_of(iocb, struct target_core_file_cmd, iocb);
if (ret != cmd->len)
target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
else
target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
kfree(cmd);
}
static sense_reason_t
fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
int is_write = !(data_direction == DMA_FROM_DEVICE);
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(dev);
struct file *file = fd_dev->fd_file;
struct target_core_file_cmd *aio_cmd;
struct iov_iter iter;
struct scatterlist *sg;
ssize_t len = 0;
int ret = 0, i;
aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
if (!aio_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
for_each_sg(sgl, sg, sgl_nents, i) {
bvec_set_page(&aio_cmd->bvecs[i], sg_page(sg), sg->length,
sg->offset);
len += sg->length;
}
iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
aio_cmd->cmd = cmd;
aio_cmd->len = len;
aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
aio_cmd->iocb.ki_filp = file;
aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
aio_cmd->iocb.ki_flags = IOCB_DIRECT;
if (is_write && (cmd->se_cmd_flags & SCF_FUA))
aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
if (is_write)
ret = call_write_iter(file, &aio_cmd->iocb, &iter);
else
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
if (ret != -EIOCBQUEUED)
cmd_rw_aio_complete(&aio_cmd->iocb, ret);
return 0;
}
static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
u32 block_size, struct scatterlist *sgl,
u32 sgl_nents, u32 data_length, int is_write)
{
struct scatterlist *sg;
struct iov_iter iter;
struct bio_vec *bvec;
ssize_t len = 0;
loff_t pos = (cmd->t_task_lba * block_size);
int ret = 0, i;
bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvec) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
for_each_sg(sgl, sg, sgl_nents, i) {
bvec_set_page(&bvec[i], sg_page(sg), sg->length, sg->offset);
len += sg->length;
}
iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
if (is_write)
ret = vfs_iter_write(fd, &iter, &pos, 0);
else
ret = vfs_iter_read(fd, &iter, &pos, 0);
if (is_write) {
if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
if (ret >= 0)
ret = -EINVAL;
}
} else {
/*
* Return zeros and GOOD status even if the READ did not return
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(file_inode(fd)->i_mode)) {
if (ret < 0 || ret != data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
data_length);
if (ret >= 0)
ret = -EINVAL;
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
} else if (ret != data_length) {
/*
* Short read case:
* Probably some one truncate file under us.
* We must explicitly zero sg-pages to prevent
* expose uninizialized pages to userspace.
*/
if (ret < data_length)
ret += iov_iter_zero(data_length - ret, &iter);
else
ret = -EINVAL;
}
}
}
kfree(bvec);
return ret;
}
static sense_reason_t
fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
/*
* Determine if we will be flushing the entire device.
*/
if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length - 1;
else
end = LLONG_MAX;
}
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (immed)
return 0;
if (ret)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static sense_reason_t
fd_execute_write_same(struct se_cmd *cmd)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(se_dev);
loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
sector_t nolb = sbc_get_write_same_sectors(cmd);
struct iov_iter iter;
struct bio_vec *bvec;
unsigned int len = 0, i;
ssize_t ret;
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with FILEIO"
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
if (!cmd->t_data_nents)
return TCM_INVALID_CDB_FIELD;
if (cmd->t_data_nents > 1 ||
cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
" block_size: %u\n",
cmd->t_data_nents,
cmd->t_data_sg[0].length,
cmd->se_dev->dev_attrib.block_size);
return TCM_INVALID_CDB_FIELD;
}
bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvec)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
for (i = 0; i < nolb; i++) {
bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]),
cmd->t_data_sg[0].length,
cmd->t_data_sg[0].offset);
len += se_dev->dev_attrib.block_size;
}
iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
kfree(bvec);
if (ret < 0 || ret != len) {
pr_err("vfs_iter_write() returned %zd for write same\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static int
fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
void *buf, size_t bufsize)
{
struct fd_dev *fd_dev = FD_DEV(se_dev);
struct file *prot_fd = fd_dev->fd_prot_file;
sector_t prot_length, prot;
loff_t pos = lba * se_dev->prot_length;
if (!prot_fd) {
pr_err("Unable to locate fd_dev->fd_prot_file\n");
return -ENODEV;
}
prot_length = nolb * se_dev->prot_length;
memset(buf, 0xff, bufsize);
for (prot = 0; prot < prot_length;) {
sector_t len = min_t(sector_t, bufsize, prot_length - prot);
ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
if (ret != len) {
pr_err("vfs_write to prot file failed: %zd\n", ret);
return ret < 0 ? ret : -ENODEV;
}
prot += ret;
}
return 0;
}
static int
fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
void *buf;
int rc;
buf = (void *)__get_free_page(GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
free_page((unsigned long)buf);
return rc;
}
static sense_reason_t
fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
struct file *file = FD_DEV(cmd->se_dev)->fd_file;
struct inode *inode = file->f_mapping->host;
int ret;
if (!nolb) {
return 0;
}
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = I_BDEV(inode);
struct se_device *dev = cmd->se_dev;
ret = blkdev_issue_discard(bdev,
target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
GFP_KERNEL);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
} else {
/* The backend is normal file, use fallocate */
struct se_device *se_dev = cmd->se_dev;
loff_t pos = lba * se_dev->dev_attrib.block_size;
unsigned int len = nolb * se_dev->dev_attrib.block_size;
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
if (!file->f_op->fallocate)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = file->f_op->fallocate(file, mode, pos, len);
if (ret < 0) {
pr_warn("FILEIO: fallocate() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
}
return 0;
}
static sense_reason_t
fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(dev);
struct file *file = fd_dev->fd_file;
struct file *pfile = fd_dev->fd_prot_file;
sense_reason_t rc;
int ret = 0;
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
ret = fd_do_rw(cmd, pfile, dev->prot_length,
cmd->t_prot_sg, cmd->t_prot_nents,
cmd->prot_length, 0);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
sgl, sgl_nents, cmd->data_length, 0);
if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type &&
dev->dev_attrib.pi_prot_verify) {
u32 sectors = cmd->data_length >>
ilog2(dev->dev_attrib.block_size);
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
0, cmd->t_prot_sg, 0);
if (rc)
return rc;
}
} else {
if (cmd->prot_type && dev->dev_attrib.pi_prot_type &&
dev->dev_attrib.pi_prot_verify) {
u32 sectors = cmd->data_length >>
ilog2(dev->dev_attrib.block_size);
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
0, cmd->t_prot_sg, 0);
if (rc)
return rc;
}
ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
sgl, sgl_nents, cmd->data_length, 1);
/*
* Perform implicit vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
loff_t start = cmd->t_task_lba *
dev->dev_attrib.block_size;
loff_t end;
if (cmd->data_length)
end = start + cmd->data_length - 1;
else
end = LLONG_MAX;
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
ret = fd_do_rw(cmd, pfile, dev->prot_length,
cmd->t_prot_sg, cmd->t_prot_nents,
cmd->prot_length, 1);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
}
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static sense_reason_t
fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(dev);
/*
* We are currently limited by the number of iovecs (2048) per
* single vfs_[writev,readv] call.
*/
if (cmd->data_length > FD_MAX_BYTES) {
pr_err("FILEIO: Not able to process I/O of %u bytes due to"
"FD_MAX_BYTES: %u iovec count limitation\n",
cmd->data_length, FD_MAX_BYTES);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
}
enum {
Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
Opt_fd_async_io, Opt_err
};
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
{Opt_fd_buffered_io, "fd_buffered_io=%d"},
{Opt_fd_async_io, "fd_async_io=%d"},
{Opt_err, NULL}
};
static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_fd_dev_name:
if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
FD_MAX_DEV_NAME) == 0) {
ret = -EINVAL;
break;
}
pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
case Opt_fd_dev_size:
arg_p = match_strdup(&args[0]);
if (!arg_p) {
ret = -ENOMEM;
break;
}
ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
pr_err("kstrtoull() failed for"
" fd_dev_size=\n");
goto out;
}
pr_debug("FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
ret = match_int(args, &arg);
if (ret)
goto out;
if (arg != 1) {
pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
break;
case Opt_fd_async_io:
ret = match_int(args, &arg);
if (ret)
goto out;
if (arg != 1) {
pr_err("bogus fd_async_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
pr_debug("FILEIO: Using async I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct fd_dev *fd_dev = FD_DEV(dev);
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s Async: %d\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
"Buffered-WCE" : "O_DSYNC",
!!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
return bl;
}
static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host;
unsigned long long dev_size;
/*
* When using a file that references an underlying struct block_device,
* ensure dev_size is always based on the current inode size in order
* to handle underlying block_device resize operations.
*/
if (S_ISBLK(i->i_mode))
dev_size = i_size_read(i);
else
dev_size = fd_dev->fd_dev_size;
return div_u64(dev_size - dev->dev_attrib.block_size,
dev->dev_attrib.block_size);
}
static int fd_init_prot(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
struct file *prot_file, *file = fd_dev->fd_file;
struct inode *inode;
int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
char buf[FD_MAX_DEV_PROT_NAME];
if (!file) {
pr_err("Unable to locate fd_dev->fd_file\n");
return -ENODEV;
}
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
pr_err("FILEIO Protection emulation only supported on"
" !S_ISBLK\n");
return -ENOSYS;
}
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
flags &= ~O_DSYNC;
snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
fd_dev->fd_dev_name);
prot_file = filp_open(buf, flags, 0600);
if (IS_ERR(prot_file)) {
pr_err("filp_open(%s) failed\n", buf);
ret = PTR_ERR(prot_file);
return ret;
}
fd_dev->fd_prot_file = prot_file;
return 0;
}
static int fd_format_prot(struct se_device *dev)
{
unsigned char *buf;
int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
int ret;
if (!dev->dev_attrib.pi_prot_type) {
pr_err("Unable to format_prot while pi_prot_type == 0\n");
return -ENODEV;
}
buf = vzalloc(unit_size);
if (!buf) {
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
pr_debug("Using FILEIO prot_length: %llu\n",
(unsigned long long)(dev->transport->get_blocks(dev) + 1) *
dev->prot_length);
ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
buf, unit_size);
vfree(buf);
return ret;
}
static void fd_free_prot(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
if (!fd_dev->fd_prot_file)
return;
filp_close(fd_dev->fd_prot_file, NULL);
fd_dev->fd_prot_file = NULL;
}
static struct exec_cmd_ops fd_exec_cmd_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
.execute_write_same = fd_execute_write_same,
.execute_unmap = fd_execute_unmap,
};
static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &fd_exec_cmd_ops);
}
static const struct target_backend_ops fileio_ops = {
.name = "fileio",
.inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION,
.owner = THIS_MODULE,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.alloc_device = fd_alloc_device,
.configure_device = fd_configure_device,
.destroy_device = fd_destroy_device,
.free_device = fd_free_device,
.configure_unmap = fd_configure_unmap,
.parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
.init_prot = fd_init_prot,
.format_prot = fd_format_prot,
.free_prot = fd_free_prot,
.tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init fileio_module_init(void)
{
return transport_backend_register(&fileio_ops);
}
static void __exit fileio_module_exit(void)
{
target_backend_unregister(&fileio_ops);
}
MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
MODULE_AUTHOR("[email protected]");
MODULE_LICENSE("GPL");
module_init(fileio_module_init);
module_exit(fileio_module_exit);
|
linux-master
|
drivers/target/target_core_file.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
static DEFINE_MUTEX(device_mutex);
static LIST_HEAD(device_list);
static DEFINE_IDR(devices_idr);
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
sense_reason_t
transport_lookup_cmd_lun(struct se_cmd *se_cmd)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_dev_entry *deve;
sense_reason_t ret = TCM_NO_SENSE;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
atomic_long_inc(&deve->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
atomic_long_add(se_cmd->data_length,
&deve->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
atomic_long_add(se_cmd->data_length,
&deve->read_bytes);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
deve->lun_access_ro) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08llx\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun);
rcu_read_unlock();
return TCM_WRITE_PROTECTED;
}
se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
goto out_unlock;
}
se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
}
out_unlock:
rcu_read_unlock();
if (!se_lun) {
/*
* Use the se_portal_group->tpg_virt_lun0 to allow for
* REPORT_LUNS, et al to be returned when no active
* MappedLUN=0 exists for this Initiator Port.
*/
if (se_cmd->orig_fe_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08llx from %s\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun,
nacl->initiatorname);
return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
(se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED;
se_lun = se_sess->se_tpg->tpg_virt_lun0;
if (!percpu_ref_tryget_live(&se_lun->lun_ref))
return TCM_NON_EXISTENT_LUN;
se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
}
/*
* RCU reference protected by percpu se_lun->lun_ref taken above that
* must drop to zero (including initial reference) before this se_lun
* pointer can be kfree_rcu() by the final se_lun->lun_group put via
* target_core_fabric_configfs.c:target_fabric_port_release
*/
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
atomic_long_inc(&se_cmd->se_dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
atomic_long_add(se_cmd->data_length,
&se_cmd->se_dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
atomic_long_add(se_cmd->data_length,
&se_cmd->se_dev->read_bytes);
return ret;
}
EXPORT_SYMBOL(transport_lookup_cmd_lun);
int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
goto out_unlock;
}
se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
}
out_unlock:
rcu_read_unlock();
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08llx for %s\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun,
nacl->initiatorname);
return -ENODEV;
}
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
struct se_dev_entry *deve;
bool ret;
rcu_read_lock();
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
ret = deve && deve->lun_access_ro;
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
* and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
* when a matching rtpi is found.
*/
struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *nacl,
u16 rtpi)
{
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_portal_group *tpg = nacl->se_tpg;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->fabric_name);
continue;
}
if (lun->lun_tpg->tpg_rtpi != rtpi)
continue;
kref_get(&deve->pr_kref);
rcu_read_unlock();
return deve;
}
rcu_read_unlock();
return NULL;
}
void core_free_device_list_for_node(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
}
void core_update_device_list_access(
u64 mapped_lun,
bool lun_access_ro,
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, mapped_lun);
if (deve)
deve->lun_access_ro = lun_access_ro;
mutex_unlock(&nacl->lun_entry_mutex);
}
/*
* Called with rcu_read_lock or nacl->device_list_lock held.
*/
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
{
struct se_dev_entry *deve;
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
if (deve->mapped_lun == mapped_lun)
return deve;
return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);
void target_pr_kref_release(struct kref *kref)
{
struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
pr_kref);
complete(&deve->pr_comp);
}
/*
* Establish UA condition on SCSI device - all LUNs
*/
void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
{
struct se_dev_entry *se_deve;
struct se_lun *lun;
spin_lock(&dev->se_port_lock);
list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
core_scsi3_ua_allocate(se_deve, asc, ascq);
spin_unlock(&lun->lun_deve_lock);
}
spin_unlock(&dev->se_port_lock);
}
static void
target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
bool skip_new)
{
struct se_dev_entry *tmp;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
if (skip_new && tmp == new)
continue;
core_scsi3_ua_allocate(tmp, 0x3F,
ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
}
rcu_read_unlock();
}
int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u64 mapped_lun,
bool lun_access_ro,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *orig, *new;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
pr_err("Unable to allocate se_dev_entry memory\n");
return -ENOMEM;
}
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
new->mapped_lun = mapped_lun;
kref_init(&new->pr_kref);
init_completion(&new->pr_comp);
new->lun_access_ro = lun_access_ro;
new->creation_time = get_jiffies_64();
new->attach_count++;
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
struct se_lun *orig_lun = orig->se_lun;
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
" for dynamic -> explicit NodeACL conversion:"
" %s\n", nacl->initiatorname);
mutex_unlock(&nacl->lun_entry_mutex);
kfree(new);
return -EINVAL;
}
if (orig->se_lun_acl != NULL) {
pr_warn_ratelimited("Detected existing explicit"
" se_lun_acl->se_lun_group reference for %s"
" mapped_lun: %llu, failing\n",
nacl->initiatorname, mapped_lun);
mutex_unlock(&nacl->lun_entry_mutex);
kfree(new);
return -EINVAL;
}
new->se_lun = lun;
new->se_lun_acl = lun_acl;
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
spin_lock(&lun->lun_deve_lock);
list_del(&orig->lun_link);
list_add_tail(&new->lun_link, &lun->lun_deve_list);
spin_unlock(&lun->lun_deve_lock);
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
target_luns_data_has_changed(nacl, new, true);
kfree_rcu(orig, rcu_head);
return 0;
}
new->se_lun = lun;
new->se_lun_acl = lun_acl;
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
spin_lock(&lun->lun_deve_lock);
list_add_tail(&new->lun_link, &lun->lun_deve_list);
spin_unlock(&lun->lun_deve_lock);
target_luns_data_has_changed(nacl, new, true);
return 0;
}
void core_disable_device_list_for_node(
struct se_lun *lun,
struct se_dev_entry *orig,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
lockdep_assert_held(&nacl->lun_entry_mutex);
/*
* If the MappedLUN entry is being disabled, the entry in
* lun->lun_deve_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs ->
* struct se_lun_acl, but we remove deve->lun_link from
* lun->lun_deve_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
spin_lock(&lun->lun_deve_lock);
list_del(&orig->lun_link);
spin_unlock(&lun->lun_deve_lock);
/*
* Disable struct se_dev_entry LUN ACL mapping
*/
core_scsi3_ua_release_all(orig);
hlist_del_rcu(&orig->link);
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
orig->lun_access_ro = false;
orig->creation_time = 0;
orig->attach_count--;
/*
* Before firing off RCU callback, wait for any in process SPEC_I_PT=1
* or REGISTER_AND_MOVE PR operation to complete.
*/
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
target_luns_data_has_changed(nacl, NULL, false);
}
/* core_clear_lun_from_tpg():
*
*
*/
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
struct se_node_acl *nacl;
struct se_dev_entry *deve;
mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
if (lun != deve->se_lun)
continue;
core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
mutex_unlock(&nacl->lun_entry_mutex);
}
mutex_unlock(&tpg->acl_node_mutex);
}
static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
spin_lock(&dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
&dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
spin_unlock(&dev->t10_wwn.t10_vpd_lock);
}
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
u32 aligned_max_sectors;
u32 alignment;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
alignment = max(1ul, PAGE_SIZE / block_size);
aligned_max_sectors = rounddown(max_sectors, alignment);
if (max_sectors != aligned_max_sectors)
pr_info("Rounding down aligned max_sectors from %u to %u\n",
max_sectors, aligned_max_sectors);
return aligned_max_sectors;
}
int core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
struct se_lun *lun)
{
int rc;
rc = core_tpg_add_lun(tpg, lun, false, dev);
if (rc < 0)
return rc;
pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl &&
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
core_tpg_add_node_to_devs(acl, tpg, lun);
}
}
mutex_unlock(&tpg->acl_node_mutex);
}
return 0;
}
/* core_dev_del_lun():
*
*
*/
void core_dev_del_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
" device object\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->fabric_name);
core_tpg_remove_lun(tpg, lun);
}
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_node_acl *nacl,
u64 mapped_lun,
int *ret)
{
struct se_lun_acl *lacl;
if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
tpg->se_tpg_tfo->fabric_name);
*ret = -EOVERFLOW;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
return lacl;
}
int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
struct se_lun *lun,
bool lun_access_ro)
{
struct se_node_acl *nacl = lacl->se_lun_nacl;
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
if (!nacl)
return -EINVAL;
if (lun->lun_access_ro)
lun_access_ro = true;
lacl->se_lun = lun;
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
lun_access_ro, nacl, tpg) < 0)
return -EINVAL;
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
lun_access_ro ? "RO" : "RW",
nacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
lacl->mapped_lun);
return 0;
}
int core_dev_del_initiator_node_lun_acl(
struct se_lun *lun,
struct se_lun_acl *lacl)
{
struct se_portal_group *tpg = lun->lun_tpg;
struct se_node_acl *nacl;
struct se_dev_entry *deve;
nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (deve)
core_disable_device_list_for_node(lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %llu\n",
tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
nacl->initiatorname, lacl->mapped_lun);
return 0;
}
void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg),
tpg->se_tpg_tfo->fabric_name,
lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
}
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->t10_wwn;
int device_type = dev->transport->get_device_type(dev);
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
wwn->vendor);
pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
wwn->model);
pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
wwn->revision);
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
struct se_lun *xcopy_lun;
int i;
dev = hba->backend->ops->alloc_device(hba, name);
if (!dev)
return NULL;
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
if (!dev->queues) {
dev->transport->free_device(dev);
return NULL;
}
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
struct se_device_queue *q;
q = &dev->queues[i];
INIT_LIST_HEAD(&q->state_list);
spin_lock_init(&q->lock);
init_llist_head(&q->sq.cmd_list);
INIT_WORK(&q->sq.work, target_queued_submit_work);
}
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
sema_init(&dev->caw_sem, 1);
INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
spin_lock_init(&dev->t10_pr.registration_lock);
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
spin_lock_init(&dev->t10_alua.lba_map_lock);
INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
mutex_init(&dev->lun_reset_mutex);
dev->t10_wwn.t10_dev = dev;
/*
* Use OpenFabrics IEEE Company ID: 00 14 05
*/
dev->t10_wwn.company_id = 0x001405;
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
dev->dev_attrib.emulate_dpo = 1;
dev->dev_attrib.emulate_fua_write = 1;
dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
dev->dev_attrib.max_unmap_block_desc_count =
DA_MAX_UNMAP_BLOCK_DESC_COUNT;
dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.unmap_zeroes_data =
DA_UNMAP_ZEROES_DATA_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_shutdown_comp);
INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
xcopy_lun->lun_tpg = &xcopy_pt_tpg;
/* Preload the default INQUIRY const values */
strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
sizeof(dev->t10_wwn.revision));
return dev;
}
/*
* Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters.
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct block_device *bdev)
{
int block_size = bdev_logical_block_size(bdev);
if (!bdev_max_discard_sectors(bdev))
return false;
attrib->max_unmap_lba_count =
bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
attrib->max_unmap_block_desc_count = 1;
attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
attrib->unmap_granularity_alignment =
bdev_discard_alignment(bdev) / block_size;
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
/*
* Convert from blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
{
switch (dev->dev_attrib.block_size) {
case 4096:
return lb << 3;
case 2048:
return lb << 2;
case 1024:
return lb << 1;
default:
return lb;
}
}
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
static int target_devices_idr_iter(int id, void *p, void *data)
__must_hold(&device_mutex)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
int ret;
config_item_put(iter->prev_item);
iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
* to allow other callers to access partially setup devices,
* so we skip them here.
*/
if (!target_dev_configured(dev))
return 0;
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!iter->prev_item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
mutex_lock(&device_mutex);
return ret;
}
/**
* target_for_each_device - iterate over configured devices
* @fn: iterator function
* @data: pointer to data that will be passed to fn
*
* fn must return 0 to continue looping over devices. non-zero will break
* from the loop and return that value to the caller.
*/
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data)
{
struct devices_idr_iter iter = { .fn = fn, .data = data };
int ret;
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
config_item_put(iter.prev_item);
return ret;
}
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
int ret, id;
if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
/*
* Add early so modules like tcmu can use during its
* configuration.
*/
mutex_lock(&device_mutex);
/*
* Use cyclic to try and avoid collisions with devices
* that were recently removed.
*/
id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
mutex_unlock(&device_mutex);
if (id < 0) {
ret = -ENOMEM;
goto out;
}
dev->dev_index = id;
ret = dev->transport->configure_device(dev);
if (ret)
goto out_free_index;
if (dev->transport->configure_unmap &&
dev->transport->configure_unmap(dev)) {
pr_debug("Discard support available, but disabled by default.\n");
}
/*
* XXX: there is not much point to have two different values here..
*/
dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
/*
* Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->creation_time = get_jiffies_64();
ret = core_setup_alua(dev);
if (ret)
goto out_destroy_device;
/*
* Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
scsi_dump_inquiry(dev);
spin_lock(&hba->device_lock);
hba->dev_count++;
spin_unlock(&hba->device_lock);
dev->dev_flags |= DF_CONFIGURED;
return 0;
out_destroy_device:
dev->transport->destroy_device(dev);
out_free_index:
mutex_lock(&device_mutex);
idr_remove(&devices_idr, dev->dev_index);
mutex_unlock(&device_mutex);
out:
se_release_vpd_for_dev(dev);
return ret;
}
void target_free_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
WARN_ON(!list_empty(&dev->dev_sep_list));
if (target_dev_configured(dev)) {
dev->transport->destroy_device(dev);
mutex_lock(&device_mutex);
idr_remove(&devices_idr, dev->dev_index);
mutex_unlock(&device_mutex);
spin_lock(&hba->device_lock);
hba->dev_count--;
spin_unlock(&hba->device_lock);
}
core_alua_free_lu_gp_mem(dev);
core_alua_set_lba_map(dev, NULL, 0, 0);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
if (dev->transport->free_prot)
dev->transport->free_prot(dev);
kfree(dev->queues);
dev->transport->free_device(dev);
}
int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
dev = target_alloc_device(hba, "virt_lun0");
if (!dev) {
ret = -ENOMEM;
goto out_free_hba;
}
hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
ret = target_configure_device(dev);
if (ret)
goto out_free_se_dev;
lun0_hba = hba;
g_lun0_dev = dev;
return 0;
out_free_se_dev:
target_free_device(dev);
out_free_hba:
core_delete_hba(hba);
return ret;
}
void core_dev_release_virtual_lun0(void)
{
struct se_hba *hba = lun0_hba;
if (!hba)
return;
if (g_lun0_dev)
target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
/*
* Common CDB parsing for kernel and user passthrough.
*/
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
unsigned char *cdb = cmd->t_task_cdb;
struct se_device *dev = cmd->se_dev;
unsigned int size;
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
if (cdb[0] == REPORT_LUNS) {
cmd->execute_cmd = spc_emulate_report_luns;
return TCM_NO_SENSE;
}
/*
* With emulate_pr disabled, all reservation requests should fail,
* regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
*/
if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
(cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
(cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
* For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
* emulate the response, since tcmu does not have the information
* required to process these commands.
*/
if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
size = get_unaligned_be16(&cdb[7]);
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == PERSISTENT_RESERVE_OUT) {
cmd->execute_cmd = target_scsi3_emulate_pr_out;
size = get_unaligned_be32(&cdb[5]);
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
size = get_unaligned_be16(&cdb[7]);
else
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
size = get_unaligned_be16(&cdb[7]);
else
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
}
/* Set DATA_CDB flag for ops that should have it */
switch (cdb[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
case COMPARE_AND_WRITE:
case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case VARIABLE_LENGTH_CMD:
switch (get_unaligned_be16(&cdb[8])) {
case READ_32:
case WRITE_32:
case WRITE_VERIFY_32:
case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
}
}
cmd->execute_cmd = exec_cmd;
return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);
|
linux-master
|
drivers/target/target_core_device.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_stat.c
*
* Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code
*
* (c) Copyright 2006-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/utsname.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/configfs.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
#define SCSI_LU_INDEX 1
#define LU_COUNT 1
/*
* SCSI Device Table
*/
static struct se_device *to_stat_dev(struct config_item *item)
{
struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
struct se_dev_stat_grps, scsi_dev_group);
return container_of(sgrps, struct se_device, dev_stat_grps);
}
static ssize_t target_stat_inst_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_stat_dev(item)->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
static ssize_t target_stat_indx_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index);
}
static ssize_t target_stat_role_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "Target\n");
}
static ssize_t target_stat_ports_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count);
}
CONFIGFS_ATTR_RO(target_stat_, inst);
CONFIGFS_ATTR_RO(target_stat_, indx);
CONFIGFS_ATTR_RO(target_stat_, role);
CONFIGFS_ATTR_RO(target_stat_, ports);
static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
&target_stat_attr_inst,
&target_stat_attr_indx,
&target_stat_attr_role,
&target_stat_attr_ports,
NULL,
};
static const struct config_item_type target_stat_scsi_dev_cit = {
.ct_attrs = target_stat_scsi_dev_attrs,
.ct_owner = THIS_MODULE,
};
/*
* SCSI Target Device Table
*/
static struct se_device *to_stat_tgt_dev(struct config_item *item)
{
struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
struct se_dev_stat_grps, scsi_tgt_dev_group);
return container_of(sgrps, struct se_device, dev_stat_grps);
}
static ssize_t target_stat_tgt_inst_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_stat_tgt_dev(item)->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
static ssize_t target_stat_tgt_indx_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", to_stat_tgt_dev(item)->dev_index);
}
static ssize_t target_stat_tgt_num_lus_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
}
static ssize_t target_stat_tgt_status_show(struct config_item *item,
char *page)
{
if (to_stat_tgt_dev(item)->export_count)
return snprintf(page, PAGE_SIZE, "activated");
else
return snprintf(page, PAGE_SIZE, "deactivated");
}
static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
char *page)
{
int non_accessible_lus;
if (to_stat_tgt_dev(item)->export_count)
non_accessible_lus = 0;
else
non_accessible_lus = 1;
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
}
static ssize_t target_stat_tgt_resets_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&to_stat_tgt_dev(item)->num_resets));
}
static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete));
}
static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task));
}
CONFIGFS_ATTR_RO(target_stat_tgt_, inst);
CONFIGFS_ATTR_RO(target_stat_tgt_, indx);
CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus);
CONFIGFS_ATTR_RO(target_stat_tgt_, status);
CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus);
CONFIGFS_ATTR_RO(target_stat_tgt_, resets);
CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete);
CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task);
static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
&target_stat_tgt_attr_inst,
&target_stat_tgt_attr_indx,
&target_stat_tgt_attr_num_lus,
&target_stat_tgt_attr_status,
&target_stat_tgt_attr_non_access_lus,
&target_stat_tgt_attr_resets,
&target_stat_tgt_attr_aborts_complete,
&target_stat_tgt_attr_aborts_no_task,
NULL,
};
static const struct config_item_type target_stat_scsi_tgt_dev_cit = {
.ct_attrs = target_stat_scsi_tgt_dev_attrs,
.ct_owner = THIS_MODULE,
};
/*
* SCSI Logical Unit Table
*/
static struct se_device *to_stat_lu_dev(struct config_item *item)
{
struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
struct se_dev_stat_grps, scsi_lu_group);
return container_of(sgrps, struct se_device, dev_stat_grps);
}
static ssize_t target_stat_lu_inst_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_stat_lu_dev(item)->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
static ssize_t target_stat_lu_dev_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
to_stat_lu_dev(item)->dev_index);
}
static ssize_t target_stat_lu_indx_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
}
static ssize_t target_stat_lu_lun_show(struct config_item *item, char *page)
{
/* FIXME: scsiLuDefaultLun */
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
}
static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
(strlen(dev->t10_wwn.unit_serial)) ?
dev->t10_wwn.unit_serial : "None");
}
static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
"s\n", dev->t10_wwn.vendor);
}
static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
"s\n", dev->t10_wwn.model);
}
static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
"s\n", dev->t10_wwn.revision);
}
static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
dev->transport->get_device_type(dev));
}
static ssize_t target_stat_lu_status_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuStatus */
return snprintf(page, PAGE_SIZE, "%s\n",
(dev->export_count) ? "available" : "notavailable");
}
static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
char *page)
{
/* scsiLuState */
return snprintf(page, PAGE_SIZE, "exposed\n");
}
static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuNumCommands */
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&dev->num_cmds));
}
static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuReadMegaBytes */
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&dev->read_bytes) >> 20);
}
static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuWrittenMegaBytes */
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&dev->write_bytes) >> 20);
}
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuInResets */
return snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&dev->num_resets));
}
static ssize_t target_stat_lu_full_stat_show(struct config_item *item,
char *page)
{
/* FIXME: scsiLuOutTaskSetFullStatus */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
static ssize_t target_stat_lu_hs_num_cmds_show(struct config_item *item,
char *page)
{
/* FIXME: scsiLuHSInCommands */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
static ssize_t target_stat_lu_creation_time_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuCreationTime */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
INITIAL_JIFFIES) * 100 / HZ));
}
CONFIGFS_ATTR_RO(target_stat_lu_, inst);
CONFIGFS_ATTR_RO(target_stat_lu_, dev);
CONFIGFS_ATTR_RO(target_stat_lu_, indx);
CONFIGFS_ATTR_RO(target_stat_lu_, lun);
CONFIGFS_ATTR_RO(target_stat_lu_, lu_name);
CONFIGFS_ATTR_RO(target_stat_lu_, vend);
CONFIGFS_ATTR_RO(target_stat_lu_, prod);
CONFIGFS_ATTR_RO(target_stat_lu_, rev);
CONFIGFS_ATTR_RO(target_stat_lu_, dev_type);
CONFIGFS_ATTR_RO(target_stat_lu_, status);
CONFIGFS_ATTR_RO(target_stat_lu_, state_bit);
CONFIGFS_ATTR_RO(target_stat_lu_, num_cmds);
CONFIGFS_ATTR_RO(target_stat_lu_, read_mbytes);
CONFIGFS_ATTR_RO(target_stat_lu_, write_mbytes);
CONFIGFS_ATTR_RO(target_stat_lu_, resets);
CONFIGFS_ATTR_RO(target_stat_lu_, full_stat);
CONFIGFS_ATTR_RO(target_stat_lu_, hs_num_cmds);
CONFIGFS_ATTR_RO(target_stat_lu_, creation_time);
static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
&target_stat_lu_attr_inst,
&target_stat_lu_attr_dev,
&target_stat_lu_attr_indx,
&target_stat_lu_attr_lun,
&target_stat_lu_attr_lu_name,
&target_stat_lu_attr_vend,
&target_stat_lu_attr_prod,
&target_stat_lu_attr_rev,
&target_stat_lu_attr_dev_type,
&target_stat_lu_attr_status,
&target_stat_lu_attr_state_bit,
&target_stat_lu_attr_num_cmds,
&target_stat_lu_attr_read_mbytes,
&target_stat_lu_attr_write_mbytes,
&target_stat_lu_attr_resets,
&target_stat_lu_attr_full_stat,
&target_stat_lu_attr_hs_num_cmds,
&target_stat_lu_attr_creation_time,
NULL,
};
static const struct config_item_type target_stat_scsi_lu_cit = {
.ct_attrs = target_stat_scsi_lu_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Called from target_core_configfs.c:target_core_make_subdev() to setup
* the target statistics groups + configfs CITs located in target_core_stat.c
*/
void target_stat_setup_dev_default_groups(struct se_device *dev)
{
config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
configfs_add_default_group(&dev->dev_stat_grps.scsi_dev_group,
&dev->dev_stat_grps.stat_group);
config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
configfs_add_default_group(&dev->dev_stat_grps.scsi_tgt_dev_group,
&dev->dev_stat_grps.stat_group);
config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
configfs_add_default_group(&dev->dev_stat_grps.scsi_lu_group,
&dev->dev_stat_grps.stat_group);
}
/*
* SCSI Port Table
*/
static struct se_lun *to_stat_port(struct config_item *item)
{
struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
struct se_port_stat_grps, scsi_port_group);
return container_of(pgrps, struct se_lun, port_stat_grps);
}
static ssize_t target_stat_port_inst_show(struct config_item *item, char *page)
{
struct se_lun *lun = to_stat_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_port_dev_show(struct config_item *item, char *page)
{
struct se_lun *lun = to_stat_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_port_indx_show(struct config_item *item, char *page)
{
struct se_lun *lun = to_stat_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_port_role_show(struct config_item *item, char *page)
{
struct se_lun *lun = to_stat_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_port_busy_count_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev) {
/* FIXME: scsiPortBusyStatuses */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
}
rcu_read_unlock();
return ret;
}
CONFIGFS_ATTR_RO(target_stat_port_, inst);
CONFIGFS_ATTR_RO(target_stat_port_, dev);
CONFIGFS_ATTR_RO(target_stat_port_, indx);
CONFIGFS_ATTR_RO(target_stat_port_, role);
CONFIGFS_ATTR_RO(target_stat_port_, busy_count);
static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
&target_stat_port_attr_inst,
&target_stat_port_attr_dev,
&target_stat_port_attr_indx,
&target_stat_port_attr_role,
&target_stat_port_attr_busy_count,
NULL,
};
static const struct config_item_type target_stat_scsi_port_cit = {
.ct_attrs = target_stat_scsi_port_attrs,
.ct_owner = THIS_MODULE,
};
/*
* SCSI Target Port Table
*/
static struct se_lun *to_stat_tgt_port(struct config_item *item)
{
struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
struct se_port_stat_grps, scsi_tgt_port_group);
return container_of(pgrps, struct se_lun, port_stat_grps);
}
static ssize_t target_stat_tgt_port_inst_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_dev_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_indx_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_name_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_portal_group *tpg = lun->lun_tpg;
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
tpg->se_tpg_tfo->fabric_name,
lun->lun_tpg->tpg_rtpi);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_portal_group *tpg = lun->lun_tpg;
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
tpg->se_tpg_tfo->tpg_get_tag(tpg));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&lun->lun_stats.cmd_pdus));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev) {
/* FIXME: scsiTgtPortHsInCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
}
rcu_read_unlock();
return ret;
}
CONFIGFS_ATTR_RO(target_stat_tgt_port_, inst);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, dev);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, indx);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, name);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, port_index);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, in_cmds);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, write_mbytes);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, read_mbytes);
CONFIGFS_ATTR_RO(target_stat_tgt_port_, hs_in_cmds);
static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
&target_stat_tgt_port_attr_inst,
&target_stat_tgt_port_attr_dev,
&target_stat_tgt_port_attr_indx,
&target_stat_tgt_port_attr_name,
&target_stat_tgt_port_attr_port_index,
&target_stat_tgt_port_attr_in_cmds,
&target_stat_tgt_port_attr_write_mbytes,
&target_stat_tgt_port_attr_read_mbytes,
&target_stat_tgt_port_attr_hs_in_cmds,
NULL,
};
static const struct config_item_type target_stat_scsi_tgt_port_cit = {
.ct_attrs = target_stat_scsi_tgt_port_attrs,
.ct_owner = THIS_MODULE,
};
/*
* SCSI Transport Table
*/
static struct se_lun *to_transport_stat(struct config_item *item)
{
struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
struct se_port_stat_grps, scsi_transport_group);
return container_of(pgrps, struct se_lun, port_stat_grps);
}
static ssize_t target_stat_transport_inst_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_transport_stat(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_transport_device_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_transport_stat(item);
struct se_device *dev;
struct se_portal_group *tpg = lun->lun_tpg;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev) {
/* scsiTransportType */
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
tpg->se_tpg_tfo->fabric_name);
}
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_transport_indx_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_transport_stat(item);
struct se_device *dev;
struct se_portal_group *tpg = lun->lun_tpg;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_transport_dev_name_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_transport_stat(item);
struct se_device *dev;
struct se_portal_group *tpg = lun->lun_tpg;
struct t10_wwn *wwn;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev) {
wwn = &dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
(strlen(wwn->unit_serial)) ? wwn->unit_serial :
wwn->vendor);
}
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_transport_proto_id_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_transport_stat(item);
struct se_device *dev;
struct se_portal_group *tpg = lun->lun_tpg;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id);
rcu_read_unlock();
return ret;
}
CONFIGFS_ATTR_RO(target_stat_transport_, inst);
CONFIGFS_ATTR_RO(target_stat_transport_, device);
CONFIGFS_ATTR_RO(target_stat_transport_, indx);
CONFIGFS_ATTR_RO(target_stat_transport_, dev_name);
CONFIGFS_ATTR_RO(target_stat_transport_, proto_id);
static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
&target_stat_transport_attr_inst,
&target_stat_transport_attr_device,
&target_stat_transport_attr_indx,
&target_stat_transport_attr_dev_name,
&target_stat_transport_attr_proto_id,
NULL,
};
static const struct config_item_type target_stat_scsi_transport_cit = {
.ct_attrs = target_stat_scsi_transport_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
* the target port statistics groups + configfs CITs located in target_core_stat.c
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
configfs_add_default_group(&lun->port_stat_grps.scsi_port_group,
&lun->port_stat_grps.stat_group);
config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
configfs_add_default_group(&lun->port_stat_grps.scsi_tgt_port_group,
&lun->port_stat_grps.stat_group);
config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
configfs_add_default_group(&lun->port_stat_grps.scsi_transport_group,
&lun->port_stat_grps.stat_group);
}
/*
* SCSI Authorized Initiator Table
*/
static struct se_lun_acl *auth_to_lacl(struct config_item *item)
{
struct se_ml_stat_grps *lgrps = container_of(to_config_group(item),
struct se_ml_stat_grps, scsi_auth_intr_group);
return container_of(lgrps, struct se_lun_acl, ml_stat_grps);
}
static ssize_t target_stat_auth_inst_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_dev_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiDeviceIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_port_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_indx_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_dev_or_port_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrDevOrPort */
ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_intr_name_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrName */
ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_map_indx_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrLunMapIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_att_count_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrAttachedTimes */
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrOutCommands */
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&deve->total_cmds));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrReadMegaBytes */
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&deve->read_bytes) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrWrittenMegaBytes */
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&deve->write_bytes) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrHSOutCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_creation_time_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrLastCreation */
ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
INITIAL_JIFFIES) * 100 / HZ));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_row_status_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrRowStatus */
ret = snprintf(page, PAGE_SIZE, "Ready\n");
rcu_read_unlock();
return ret;
}
CONFIGFS_ATTR_RO(target_stat_auth_, inst);
CONFIGFS_ATTR_RO(target_stat_auth_, dev);
CONFIGFS_ATTR_RO(target_stat_auth_, port);
CONFIGFS_ATTR_RO(target_stat_auth_, indx);
CONFIGFS_ATTR_RO(target_stat_auth_, dev_or_port);
CONFIGFS_ATTR_RO(target_stat_auth_, intr_name);
CONFIGFS_ATTR_RO(target_stat_auth_, map_indx);
CONFIGFS_ATTR_RO(target_stat_auth_, att_count);
CONFIGFS_ATTR_RO(target_stat_auth_, num_cmds);
CONFIGFS_ATTR_RO(target_stat_auth_, read_mbytes);
CONFIGFS_ATTR_RO(target_stat_auth_, write_mbytes);
CONFIGFS_ATTR_RO(target_stat_auth_, hs_num_cmds);
CONFIGFS_ATTR_RO(target_stat_auth_, creation_time);
CONFIGFS_ATTR_RO(target_stat_auth_, row_status);
static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
&target_stat_auth_attr_inst,
&target_stat_auth_attr_dev,
&target_stat_auth_attr_port,
&target_stat_auth_attr_indx,
&target_stat_auth_attr_dev_or_port,
&target_stat_auth_attr_intr_name,
&target_stat_auth_attr_map_indx,
&target_stat_auth_attr_att_count,
&target_stat_auth_attr_num_cmds,
&target_stat_auth_attr_read_mbytes,
&target_stat_auth_attr_write_mbytes,
&target_stat_auth_attr_hs_num_cmds,
&target_stat_auth_attr_creation_time,
&target_stat_auth_attr_row_status,
NULL,
};
static const struct config_item_type target_stat_scsi_auth_intr_cit = {
.ct_attrs = target_stat_scsi_auth_intr_attrs,
.ct_owner = THIS_MODULE,
};
/*
* SCSI Attached Initiator Port Table
*/
static struct se_lun_acl *iport_to_lacl(struct config_item *item)
{
struct se_ml_stat_grps *lgrps = container_of(to_config_group(item),
struct se_ml_stat_grps, scsi_att_intr_port_group);
return container_of(lgrps, struct se_lun_acl, ml_stat_grps);
}
static ssize_t target_stat_iport_inst_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_iport_dev_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiDeviceIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_iport_port_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_iport_indx_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_session *se_sess;
struct se_portal_group *tpg;
ssize_t ret;
spin_lock_irq(&nacl->nacl_sess_lock);
se_sess = nacl->nacl_sess;
if (!se_sess) {
spin_unlock_irq(&nacl->nacl_sess_lock);
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiAttIntrPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->sess_get_index(se_sess));
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
static ssize_t target_stat_iport_port_auth_indx_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
/* scsiAttIntrPortAuthIntrIdx */
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_iport_port_ident_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_session *se_sess;
struct se_portal_group *tpg;
ssize_t ret;
unsigned char buf[64];
spin_lock_irq(&nacl->nacl_sess_lock);
se_sess = nacl->nacl_sess;
if (!se_sess) {
spin_unlock_irq(&nacl->nacl_sess_lock);
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
CONFIGFS_ATTR_RO(target_stat_iport_, inst);
CONFIGFS_ATTR_RO(target_stat_iport_, dev);
CONFIGFS_ATTR_RO(target_stat_iport_, port);
CONFIGFS_ATTR_RO(target_stat_iport_, indx);
CONFIGFS_ATTR_RO(target_stat_iport_, port_auth_indx);
CONFIGFS_ATTR_RO(target_stat_iport_, port_ident);
static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
&target_stat_iport_attr_inst,
&target_stat_iport_attr_dev,
&target_stat_iport_attr_port,
&target_stat_iport_attr_indx,
&target_stat_iport_attr_port_auth_indx,
&target_stat_iport_attr_port_ident,
NULL,
};
static const struct config_item_type target_stat_scsi_att_intr_port_cit = {
.ct_attrs = target_stat_scsi_ath_intr_port_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
* the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
configfs_add_default_group(&lacl->ml_stat_grps.scsi_auth_intr_group,
&lacl->ml_stat_grps.stat_group);
config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
configfs_add_default_group(&lacl->ml_stat_grps.scsi_att_intr_port_group,
&lacl->ml_stat_grps.stat_group);
}
|
linux-master
|
drivers/target/target_core_stat.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_pscsi.c
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/blkdev.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_alua.h"
#include "target_core_internal.h"
#include "target_core_pscsi.h"
static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
{
return container_of(dev, struct pscsi_dev_virt, dev);
}
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
/* pscsi_attach_hba():
*
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
* from the passed SCSI Host ID.
*/
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
if (!phv) {
pr_err("Unable to allocate struct pscsi_hba_virt\n");
return -ENOMEM;
}
phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRTUAL_HOST_ID;
hba->hba_ptr = phv;
pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_VERSION);
pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
hba->hba_id);
return 0;
}
static void pscsi_detach_hba(struct se_hba *hba)
{
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *scsi_host = phv->phv_lld_host;
if (scsi_host) {
scsi_host_put(scsi_host);
pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
" Generic Target Core\n", hba->hba_id,
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
"Unknown");
} else
pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
" from Generic Target Core\n", hba->hba_id);
kfree(phv);
hba->hba_ptr = NULL;
}
static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
/*
* Release the struct Scsi_Host
*/
if (!mode_flag) {
if (!sh)
return 0;
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRTUAL_HOST_ID;
pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
(sh->hostt->name) : "Unknown");
scsi_host_put(sh);
return 0;
}
/*
* Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode
*/
sh = scsi_host_lookup(phv->phv_host_id);
if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
return -EINVAL;
}
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
return 1;
}
static void pscsi_tape_read_blocksize(struct se_device *dev,
struct scsi_device *sdev)
{
unsigned char cdb[MAX_COMMAND_SIZE], *buf;
int ret;
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
cdb[4] = 0x0c; /* 12 bytes */
ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL);
if (ret)
goto out_free;
/*
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = get_unaligned_be24(&buf[9]);
out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
kfree(buf);
}
static void
pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
{
if (sdev->inquiry_len < INQUIRY_LEN)
return;
/*
* Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
*/
BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
snprintf(wwn->vendor, sizeof(wwn->vendor),
"%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
snprintf(wwn->model, sizeof(wwn->model),
"%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
snprintf(wwn->revision, sizeof(wwn->revision),
"%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
}
static int
pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
{
unsigned char cdb[MAX_COMMAND_SIZE], *buf;
int ret;
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x80; /* Unit Serial Number */
put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL);
if (ret)
goto out_free;
snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
kfree(buf);
return 0;
out_free:
kfree(buf);
return -EPERM;
}
static void
pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
struct t10_wwn *wwn)
{
unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
int ident_len, page_len, off = 4, ret;
struct t10_vpd *vpd;
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
return;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x83; /* Device Identifier */
put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL);
if (ret)
goto out;
page_len = get_unaligned_be16(&buf[2]);
while (page_len > 0) {
/* Grab a pointer to the Identification descriptor */
page_83 = &buf[off];
ident_len = page_83[3];
if (!ident_len) {
pr_err("page_83[3]: identifier"
" length zero!\n");
break;
}
pr_debug("T10 VPD Identifier Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
pr_err("Unable to allocate memory for"
" struct t10_vpd\n");
goto out;
}
INIT_LIST_HEAD(&vpd->vpd_list);
transport_set_vpd_proto_id(vpd, page_83);
transport_set_vpd_assoc(vpd, page_83);
if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
off += (ident_len + 4);
page_len -= (ident_len + 4);
kfree(vpd);
continue;
}
if (transport_set_vpd_ident(vpd, page_83) < 0) {
off += (ident_len + 4);
page_len -= (ident_len + 4);
kfree(vpd);
continue;
}
list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
off += (ident_len + 4);
page_len -= (ident_len + 4);
}
out:
kfree(buf);
}
static int pscsi_add_device_to_list(struct se_device *dev,
struct scsi_device *sd)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct request_queue *q = sd->request_queue;
pdv->pdv_sd = sd;
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
pr_err("Set broken SCSI Device %d:%d:%llu"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
dev->dev_attrib.hw_block_size =
min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
* Setup our standard INQUIRY info into se_dev->t10_wwn
*/
pscsi_set_inquiry_info(sd, &dev->t10_wwn);
/*
* Locate VPD WWN Information used for various purposes within
* the Storage Engine.
*/
if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
/*
* If VPD Unit Serial returned GOOD status, try
* VPD Device Identification page (0x83).
*/
pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
}
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
dev->dev_attrib.hw_block_size = sd->sector_size;
}
return 0;
}
static struct se_device *pscsi_alloc_device(struct se_hba *hba,
const char *name)
{
struct pscsi_dev_virt *pdv;
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
if (!pdv) {
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
return &pdv->dev;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return -EIO;
}
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ,
pdv, NULL);
if (IS_ERR(bd)) {
pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return PTR_ERR(bd);
}
pdv->pdv_bd = bd;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
blkdev_put(pdv->pdv_bd, pdv);
scsi_device_put(sd);
return ret;
}
pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
sh->host_no, sd->channel, sd->id, sd->lun);
return 0;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return -EIO;
}
spin_unlock_irq(sh->host_lock);
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
scsi_device_put(sd);
return ret;
}
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return 0;
}
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd;
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
int ret;
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
return -EINVAL;
}
/*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
*/
if (!sh) {
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return -ENODEV;
}
/*
* For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
* reference, we enforce that udev_path has been set
*/
if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
return -EINVAL;
}
/*
* If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
* use the original TCM hba ID to reference Linux/SCSI Host No
* and enable for PHV_LLD_SCSI_HOST_NO mode.
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
if (hba->dev_count) {
pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
return -EEXIST;
}
if (pscsi_pmode_enable_hba(hba, 1) != 1)
return -ENODEV;
legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
if (!sh) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
return -EINVAL;
}
pdv->pdv_lld_host = sh;
}
} else {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
" struct Scsi_Host exists\n");
return -EEXIST;
}
}
spin_lock_irq(sh->host_lock);
list_for_each_entry(sd, &sh->__devices, siblings) {
if ((pdv->pdv_channel_id != sd->channel) ||
(pdv->pdv_target_id != sd->id) ||
(pdv->pdv_lun_id != sd->lun))
continue;
/*
* Functions will release the held struct scsi_host->host_lock
* before calling pscsi_add_device_to_list() to register
* struct scsi_device with target_core_mod.
*/
switch (sd->type) {
case TYPE_DISK:
case TYPE_ZBC:
ret = pscsi_create_type_disk(dev, sd);
break;
default:
ret = pscsi_create_type_nondisk(dev, sd);
break;
}
if (ret) {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
pscsi_pmode_enable_hba(hba, 0);
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
pdv->pdv_sd = NULL;
return ret;
}
return 0;
}
spin_unlock_irq(sh->host_lock);
pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
pscsi_pmode_enable_hba(hba, 0);
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
return -ENODEV;
}
static void pscsi_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
kfree(pdv);
}
static void pscsi_free_device(struct se_device *dev)
{
call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
}
static void pscsi_destroy_device(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
if (sd) {
/*
* Release exclusive pSCSI internal struct block_device claim for
* struct scsi_device with TYPE_DISK or TYPE_ZBC
* from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
pdv->pdv_bd) {
blkdev_put(pdv->pdv_bd, pdv);
pdv->pdv_bd = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
* to struct Scsi_Host now.
*/
if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
(phv->phv_lld_host != NULL))
scsi_host_put(phv->phv_lld_host);
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
}
static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
unsigned char *req_sense, int valid_data)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
unsigned char *cdb = cmd->priv;
/*
* Special case for REPORT_LUNs which is emulated and not passed on.
*/
if (!cdb)
return;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
*/
if (!cmd->data_length)
goto after_mode_sense;
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
scsi_status == SAM_STAT_GOOD) {
bool read_only = target_lun_is_rdonly(cmd);
if (read_only) {
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
if (!buf) {
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
} else {
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
buf[3] |= 0x80;
} else {
if (!(buf[2] & 0x80))
buf[2] |= 0x80;
}
transport_kunmap_data_sg(cmd);
}
}
}
after_mode_sense:
if (sd->type != TYPE_TAPE || !cmd->data_length)
goto after_mode_select;
/*
* Hack to correctly obtain the initiator requested blocksize for
* TYPE_TAPE. Since this value is dependent upon each tape media,
* struct scsi_device->sector_size will not contain the correct value
* by default, so we go ahead and set it so
* TRANSPORT(dev)->get_blockdev() returns the correct value to the
* storage engine.
*/
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
scsi_status == SAM_STAT_GOOD) {
unsigned char *buf;
u16 bdl;
u32 blocksize;
buf = sg_virt(&cmd->t_data_sg[0]);
if (!buf) {
pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
if (cdb[0] == MODE_SELECT)
bdl = buf[3];
else
bdl = get_unaligned_be16(&buf[6]);
if (!bdl)
goto after_mode_select;
if (cdb[0] == MODE_SELECT)
blocksize = get_unaligned_be24(&buf[9]);
else
blocksize = get_unaligned_be24(&buf[13]);
sd->sector_size = blocksize;
}
after_mode_select:
if (scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(cmd, req_sense);
/*
* check for TAPE device reads with
* FM/EOM/ILI set, so that we can get data
* back despite framework assumption that a
* check condition means there is no data
*/
if (sd->type == TYPE_TAPE && valid_data &&
cmd->data_direction == DMA_FROM_DEVICE) {
/*
* is sense data valid, fixed format,
* and have FM, EOM, or ILI set?
*/
if (req_sense[0] == 0xf0 && /* valid, fixed format */
req_sense[2] & 0xe0 && /* FM, EOM, or ILI */
(req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
}
}
}
}
enum {
Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
Opt_scsi_lun_id, Opt_err
};
static match_table_t tokens = {
{Opt_scsi_host_id, "scsi_host_id=%d"},
{Opt_scsi_channel_id, "scsi_channel_id=%d"},
{Opt_scsi_target_id, "scsi_target_id=%d"},
{Opt_scsi_lun_id, "scsi_lun_id=%d"},
{Opt_err, NULL}
};
static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_scsi_host_id:
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
pr_err("PSCSI[%d]: Unable to accept"
" scsi_host_id while phv_mode =="
" PHV_LLD_SCSI_HOST_NO\n",
phv->phv_host_id);
ret = -EINVAL;
goto out;
}
ret = match_int(args, &arg);
if (ret)
goto out;
pdv->pdv_host_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
ret = match_int(args, &arg);
if (ret)
goto out;
pdv->pdv_channel_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_channel_id);
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
break;
case Opt_scsi_target_id:
ret = match_int(args, &arg);
if (ret)
goto out;
pdv->pdv_target_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_target_id);
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
break;
case Opt_scsi_lun_id:
ret = match_int(args, &arg);
if (ret)
goto out;
pdv->pdv_lun_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
pdv->pdv_flags |= PDF_HAS_LUN_ID;
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
snprintf(host_id, 16, "%d", pdv->pdv_host_id);
else
snprintf(host_id, 16, "PHBA Mode");
bl = sprintf(b, "SCSI Device Bus Location:"
" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
host_id);
if (sd) {
bl += sprintf(b + bl, " Vendor: %."
__stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
bl += sprintf(b + bl, " Model: %."
__stringify(INQUIRY_MODEL_LEN) "s", sd->model);
bl += sprintf(b + bl, " Rev: %."
__stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
}
return bl;
}
static void pscsi_bi_endio(struct bio *bio)
{
bio_uninit(bio);
kfree(bio);
}
static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct request *req)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = cmd->data_length, i, len, bytes, off;
int nr_pages = (cmd->data_length + sgl[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc;
int rw = (cmd->data_direction == DMA_TO_DEVICE);
BUG_ON(!cmd->data_length);
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(sgl, sg, sgl_nents, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
/*
* We only have one page of data in each sg element,
* we can not cross a page boundary.
*/
if (off + len > PAGE_SIZE)
goto fail;
if (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
if (!bio) {
new_bio:
nr_vecs = bio_max_segs(nr_pages);
bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
if (!bio)
goto fail;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs,
rw ? REQ_OP_WRITE : REQ_OP_READ);
bio->bi_end_io = pscsi_bi_endio;
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
}
pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
bio, page, bytes, off);
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio_segments(bio), nr_vecs);
if (rc != bytes) {
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
rc = blk_rq_append_bio(req, bio);
if (rc) {
pr_err("pSCSI: failed to append bio\n");
goto fail;
}
goto new_bio;
}
data_len -= bytes;
}
}
if (bio) {
rc = blk_rq_append_bio(req, bio);
if (rc) {
pr_err("pSCSI: failed to append bio\n");
goto fail;
}
}
return 0;
fail:
if (bio)
bio_put(bio);
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
bio_put(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sense_reason_t
pscsi_parse_cdb(struct se_cmd *cmd)
{
if (cmd->se_cmd_flags & SCF_BIDI)
return TCM_UNSUPPORTED_SCSI_OPCODE;
return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
}
static sense_reason_t
pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_cmnd *scmd;
struct request *req;
sense_reason_t ret;
req = scsi_alloc_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (sgl) {
ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
if (ret)
goto fail_put_request;
}
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
scmd = blk_mq_rq_to_pdu(req);
scmd->cmd_len = scsi_command_size(cmd->t_task_cdb);
if (scmd->cmd_len > sizeof(scmd->cmnd)) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail_put_request;
}
memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len);
if (pdv->pdv_sd->type == TYPE_DISK ||
pdv->pdv_sd->type == TYPE_ZBC)
req->timeout = PS_TIMEOUT_DISK;
else
req->timeout = PS_TIMEOUT_OTHER;
scmd->allowed = PS_RETRY;
cmd->priv = scmd->cmnd;
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
return 0;
fail_put_request:
blk_mq_free_request(req);
return ret;
}
/* pscsi_get_device_type():
*
*
*/
static u32 pscsi_get_device_type(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
return (sd) ? sd->type : TYPE_NO_LUN;
}
static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (pdv->pdv_bd)
return bdev_nr_sectors(pdv->pdv_bd);
return 0;
}
static enum rq_end_io_ret pscsi_req_done(struct request *req,
blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum sam_status scsi_status = scmd->result & 0xff;
int valid_data = cmd->data_length - scmd->resid_len;
u8 *cdb = cmd->priv;
if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
switch (host_byte(scmd->result)) {
case DID_OK:
target_complete_cmd_with_length(cmd, scsi_status, valid_data);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
blk_mq_free_request(req);
return RQ_END_IO_NONE;
}
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
.transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
TRANSPORT_FLAG_PASSTHROUGH_ALUA |
TRANSPORT_FLAG_PASSTHROUGH_PGR,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
.alloc_device = pscsi_alloc_device,
.configure_device = pscsi_configure_device,
.destroy_device = pscsi_destroy_device,
.free_device = pscsi_free_device,
.parse_cdb = pscsi_parse_cdb,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
.tb_dev_attrib_attrs = passthrough_attrib_attrs,
};
static int __init pscsi_module_init(void)
{
return transport_backend_register(&pscsi_ops);
}
static void __exit pscsi_module_exit(void)
{
target_backend_unregister(&pscsi_ops);
}
MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
MODULE_AUTHOR("[email protected]");
MODULE_LICENSE("GPL");
module_init(pscsi_module_init);
module_exit(pscsi_module_exit);
|
linux-master
|
drivers/target/target_core_pscsi.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_alua.c
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
* (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_ua.h"
static sense_reason_t core_alua_check_transition(int state, int valid,
int *primary, int explicit);
static int core_alua_set_tg_pt_secondary_state(
struct se_lun *lun, int explicit, int offline);
static char *core_alua_dump_state(int state);
static void __target_attach_tg_pt_gp(struct se_lun *lun,
struct t10_alua_tg_pt_gp *tg_pt_gp);
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
static DEFINE_SPINLOCK(lu_gps_lock);
static LIST_HEAD(lu_gps_list);
struct t10_alua_lu_gp *default_lu_gp;
/*
* REPORT REFERRALS
*
* See sbc3r35 section 5.23
*/
sense_reason_t
target_emulate_report_referrals(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct t10_alua_lba_map *map;
struct t10_alua_lba_map_member *map_mem;
unsigned char *buf;
u32 rd_len = 0, off;
if (cmd->data_length < 4) {
pr_warn("REPORT REFERRALS allocation length %u too"
" small\n", cmd->data_length);
return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
off = 4;
spin_lock(&dev->t10_alua.lba_map_lock);
if (list_empty(&dev->t10_alua.lba_map_list)) {
spin_unlock(&dev->t10_alua.lba_map_lock);
transport_kunmap_data_sg(cmd);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
list_for_each_entry(map, &dev->t10_alua.lba_map_list,
lba_map_list) {
int desc_num = off + 3;
int pg_num;
off += 4;
if (cmd->data_length > off)
put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
off += 8;
if (cmd->data_length > off)
put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
off += 8;
rd_len += 20;
pg_num = 0;
list_for_each_entry(map_mem, &map->lba_map_mem_list,
lba_map_mem_list) {
int alua_state = map_mem->lba_map_mem_alua_state;
int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
if (cmd->data_length > off)
buf[off] = alua_state & 0x0f;
off += 2;
if (cmd->data_length > off)
buf[off] = (alua_pg_id >> 8) & 0xff;
off++;
if (cmd->data_length > off)
buf[off] = (alua_pg_id & 0xff);
off++;
rd_len += 4;
pg_num++;
}
if (cmd->data_length > desc_num)
buf[desc_num] = pg_num;
}
spin_unlock(&dev->t10_alua.lba_map_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
put_unaligned_be16(rd_len, &buf[2]);
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
/*
* REPORT_TARGET_PORT_GROUPS
*
* See spc4r17 section 6.27
*/
sense_reason_t
target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_lun *lun;
unsigned char *buf;
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
/*
* Skip over RESERVED area to first Target port group descriptor
* depending on the PARAMETER DATA FORMAT type..
*/
if (ext_hdr != 0)
off = 8;
else
off = 4;
if (cmd->data_length < off) {
pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
" small for %s header\n", cmd->data_length,
(ext_hdr) ? "extended" : "normal");
return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/* Skip empty port groups */
if (!tg_pt_gp->tg_pt_gp_members)
continue;
/*
* Check if the Target port group and Target port descriptor list
* based on tg_pt_gp_members count will fit into the response payload.
* Otherwise, bump rd_len to let the initiator know we have exceeded
* the allocation length and the response is truncated.
*/
if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
cmd->data_length) {
rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
continue;
}
/*
* PREF: Preferred target port bit, determine if this
* bit should be set for port group.
*/
if (tg_pt_gp->tg_pt_gp_pref)
buf[off] = 0x80;
/*
* Set the ASYMMETRIC ACCESS State
*/
buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
/*
* TARGET PORT GROUP
*/
put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
off += 2;
off++; /* Skip over Reserved */
/*
* STATUS CODE
*/
buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
/*
* Vendor Specific field
*/
buf[off++] = 0x00;
/*
* TARGET PORT COUNT
*/
buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
rd_len += 8;
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
lun_tg_pt_gp_link) {
/*
* Start Target Port descriptor format
*
* See spc4r17 section 6.2.7 Table 247
*/
off += 2; /* Skip over Obsolete */
/*
* Set RELATIVE TARGET PORT IDENTIFIER
*/
put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
off += 2;
rd_len += 4;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
put_unaligned_be32(rd_len, &buf[0]);
/*
* Fill in the Extended header parameter data format if requested
*/
if (ext_hdr != 0) {
buf[4] = 0x10;
/*
* Set the implicit transition time (in seconds) for the application
* client to use as a base for it's transition timeout value.
*
* Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
* this CDB was received upon to determine this value individually
* for ALUA target port group.
*/
rcu_read_lock();
tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp);
if (tg_pt_gp)
buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
rcu_read_unlock();
}
transport_kunmap_data_sg(cmd);
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
return 0;
}
/*
* SET_TARGET_PORT_GROUPS for explicit ALUA operation.
*
* See spc4r17 section 6.35
*/
sense_reason_t
target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *l_lun = cmd->se_lun;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
unsigned char *buf;
unsigned char *ptr;
sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, valid_states;
u16 tg_pt_id, rtpi;
if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length);
return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
rcu_read_lock();
l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
if (!l_tg_pt_gp) {
rcu_read_unlock();
pr_err("Unable to access l_lun->tg_pt_gp\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
rcu_read_unlock();
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
rcu_read_unlock();
ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) {
bool found = false;
alua_access_state = (ptr[0] & 0x0f);
/*
* Check the received ALUA access state, and determine if
* the state is a primary or secondary target port asymmetric
* access state.
*/
rc = core_alua_check_transition(alua_access_state, valid_states,
&primary, 1);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric
* access states or attempts to establish an
* unsupported target port asymmetric access state,
* then the command shall be terminated with CHECK
* CONDITION status, with the sense key set to ILLEGAL
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
goto out;
}
/*
* If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state,
* then the TARGET PORT GROUP OR TARGET PORT field specifies
* a primary target port group for which the primary target
* port asymmetric access state shall be changed. If the
* ASYMMETRIC ACCESS STATE field specifies a secondary target
* port asymmetric access state, then the TARGET PORT GROUP OR
* TARGET PORT field specifies the relative target port
* identifier (see 3.1.120) of the target port for which the
* secondary target port asymmetric access state shall be
* changed.
*/
if (primary) {
tg_pt_id = get_unaligned_be16(ptr + 2);
/*
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
&dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
continue;
atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!core_alua_do_port_transition(tg_pt_gp,
dev, l_lun, nacl,
alua_access_state, 1))
found = true;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
struct se_lun *lun;
/*
* Extract the RELATIVE TARGET PORT IDENTIFIER to identify
* the Target Port in question for the incoming
* SET_TARGET_PORT_GROUPS op.
*/
rtpi = get_unaligned_be16(ptr + 2);
/*
* Locate the matching relative target port identifier
* for the struct se_device storage object.
*/
spin_lock(&dev->se_port_lock);
list_for_each_entry(lun, &dev->dev_sep_list,
lun_dev_link) {
if (lun->lun_tpg->tpg_rtpi != rtpi)
continue;
// XXX: racy unlock
spin_unlock(&dev->se_port_lock);
if (!core_alua_set_tg_pt_secondary_state(
lun, 1, 1))
found = true;
spin_lock(&dev->se_port_lock);
break;
}
spin_unlock(&dev->se_port_lock);
}
if (!found) {
rc = TCM_INVALID_PARAMETER_LIST;
goto out;
}
ptr += 4;
len += 4;
}
out:
transport_kunmap_data_sg(cmd);
if (!rc)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return rc;
}
static inline void core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
int nonop_delay_msecs)
{
/*
* Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
* later to determine if processing of this cmd needs to be
* temporarily delayed for the Active/NonOptimized primary access state.
*/
cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
cmd->alua_nonop_delay = nonop_delay_msecs;
}
static inline sense_reason_t core_alua_state_lba_dependent(
struct se_cmd *cmd,
u16 tg_pt_gp_id)
{
struct se_device *dev = cmd->se_dev;
u64 segment_size, segment_mult, sectors, lba;
/* Only need to check for cdb actually containing LBAs */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
return 0;
spin_lock(&dev->t10_alua.lba_map_lock);
segment_size = dev->t10_alua.lba_map_segment_size;
segment_mult = dev->t10_alua.lba_map_segment_multiplier;
sectors = cmd->data_length / dev->dev_attrib.block_size;
lba = cmd->t_task_lba;
while (lba < cmd->t_task_lba + sectors) {
struct t10_alua_lba_map *cur_map = NULL, *map;
struct t10_alua_lba_map_member *map_mem;
list_for_each_entry(map, &dev->t10_alua.lba_map_list,
lba_map_list) {
u64 start_lba, last_lba;
u64 first_lba = map->lba_map_first_lba;
if (segment_mult) {
u64 tmp = lba;
start_lba = do_div(tmp, segment_size * segment_mult);
last_lba = first_lba + segment_size - 1;
if (start_lba >= first_lba &&
start_lba <= last_lba) {
lba += segment_size;
cur_map = map;
break;
}
} else {
last_lba = map->lba_map_last_lba;
if (lba >= first_lba && lba <= last_lba) {
lba = last_lba + 1;
cur_map = map;
break;
}
}
}
if (!cur_map) {
spin_unlock(&dev->t10_alua.lba_map_lock);
return TCM_ALUA_TG_PT_UNAVAILABLE;
}
list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
lba_map_mem_list) {
if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
continue;
switch(map_mem->lba_map_mem_alua_state) {
case ALUA_ACCESS_STATE_STANDBY:
spin_unlock(&dev->t10_alua.lba_map_lock);
return TCM_ALUA_TG_PT_STANDBY;
case ALUA_ACCESS_STATE_UNAVAILABLE:
spin_unlock(&dev->t10_alua.lba_map_lock);
return TCM_ALUA_TG_PT_UNAVAILABLE;
default:
break;
}
}
}
spin_unlock(&dev->t10_alua.lba_map_lock);
return 0;
}
static inline sense_reason_t core_alua_state_standby(
struct se_cmd *cmd,
unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
* spc4r17 section 5.9.2.4.4
*/
switch (cdb[0]) {
case INQUIRY:
case LOG_SELECT:
case LOG_SENSE:
case MODE_SELECT:
case MODE_SENSE:
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
case READ_CAPACITY:
return 0;
case SERVICE_ACTION_IN_16:
switch (cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
return 0;
default:
return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
return TCM_ALUA_TG_PT_STANDBY;
}
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
case PERSISTENT_RESERVE_OUT:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
return TCM_ALUA_TG_PT_STANDBY;
}
return 0;
}
static inline sense_reason_t core_alua_state_unavailable(
struct se_cmd *cmd,
unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
* spc4r17 section 5.9.2.4.5
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
return TCM_ALUA_TG_PT_UNAVAILABLE;
}
return 0;
}
static inline sense_reason_t core_alua_state_transition(
struct se_cmd *cmd,
unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
* spc4r17 section 5.9.2.5
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
return TCM_ALUA_STATE_TRANSITION;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
return TCM_ALUA_STATE_TRANSITION;
}
return 0;
}
/*
* return 1: Is used to signal LUN not accessible, and check condition/not ready
* return 0: Used to signal success
* return -1: Used to signal failure, and invalid cdb field
*/
sense_reason_t
target_alua_state_check(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun;
struct t10_alua_tg_pt_gp *tg_pt_gp;
int out_alua_state, nonop_delay_msecs;
u16 tg_pt_gp_id;
sense_reason_t rc = TCM_NO_SENSE;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
* access state: OFFLINE
*/
if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
return TCM_ALUA_OFFLINE;
}
rcu_read_lock();
tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
rcu_read_unlock();
return 0;
}
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
rcu_read_unlock();
/*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
* For the Optimized ALUA access state case, we want to process the
* incoming fabric cmd ASAP..
*/
if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
return 0;
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
break;
case ALUA_ACCESS_STATE_STANDBY:
rc = core_alua_state_standby(cmd, cdb);
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
rc = core_alua_state_unavailable(cmd, cdb);
break;
case ALUA_ACCESS_STATE_TRANSITION:
rc = core_alua_state_transition(cmd, cdb);
break;
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_lun->lun_tg_pt_secondary_offline=1
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
rc = TCM_INVALID_CDB_FIELD;
}
if (rc && rc != TCM_INVALID_CDB_FIELD) {
pr_debug("[%s]: ALUA TG Port not available, "
"SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
cmd->se_tfo->fabric_name, rc);
}
return rc;
}
/*
* Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
core_alua_check_transition(int state, int valid, int *primary, int explicit)
{
/*
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
* defined as primary target port asymmetric access states.
*/
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
if (!(valid & ALUA_AO_SUP))
goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
if (!(valid & ALUA_AN_SUP))
goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_STANDBY:
if (!(valid & ALUA_S_SUP))
goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
if (!(valid & ALUA_U_SUP))
goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
if (!(valid & ALUA_LBD_SUP))
goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_OFFLINE:
/*
* OFFLINE state is defined as a secondary target port
* asymmetric access state.
*/
if (!(valid & ALUA_O_SUP))
goto not_supported;
*primary = 0;
break;
case ALUA_ACCESS_STATE_TRANSITION:
if (!(valid & ALUA_T_SUP) || explicit)
/*
* Transitioning is set internally and by tcmu daemon,
* and cannot be selected through a STPG.
*/
goto not_supported;
*primary = 0;
break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST;
}
return 0;
not_supported:
pr_err("ALUA access state %s not supported",
core_alua_dump_state(state));
return TCM_INVALID_PARAMETER_LIST;
}
static char *core_alua_dump_state(int state)
{
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
return "LBA Dependent";
case ALUA_ACCESS_STATE_STANDBY:
return "Standby";
case ALUA_ACCESS_STATE_UNAVAILABLE:
return "Unavailable";
case ALUA_ACCESS_STATE_OFFLINE:
return "Offline";
case ALUA_ACCESS_STATE_TRANSITION:
return "Transitioning";
default:
return "Unknown";
}
return NULL;
}
char *core_alua_dump_status(int status)
{
switch (status) {
case ALUA_STATUS_NONE:
return "None";
case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
return "Altered by Explicit STPG";
case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
return "Altered by Implicit ALUA";
default:
return "Unknown";
}
return NULL;
}
/*
* Used by fabric modules to determine when we need to delay processing
* for the Active/NonOptimized paths..
*/
int core_alua_check_nonop_delay(
struct se_cmd *cmd)
{
if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
return 0;
/*
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
*/
if (!cmd->alua_nonop_delay)
return 0;
/*
* struct se_cmd->alua_nonop_delay gets set by a target port group
* defined interval in core_alua_state_nonoptimized()
*/
msleep_interruptible(cmd->alua_nonop_delay);
return 0;
}
EXPORT_SYMBOL(core_alua_check_nonop_delay);
static int core_alua_write_tpg_metadata(
const char *path,
unsigned char *md_buf,
u32 md_buf_len)
{
struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
loff_t pos = 0;
int ret;
if (IS_ERR(file)) {
pr_err("filp_open(%s) for ALUA metadata failed\n", path);
return -ENODEV;
}
ret = kernel_write(file, md_buf, md_buf_len, &pos);
if (ret < 0)
pr_err("Error writing ALUA metadata file: %s\n", path);
fput(file);
return (ret < 0) ? -EIO : 0;
}
static int core_alua_update_tpg_primary_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
char *path;
int len, rc;
lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
if (!md_buf) {
pr_err("Unable to allocate buf for ALUA metadata\n");
return -ENOMEM;
}
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
tg_pt_gp->tg_pt_gp_id,
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
rc = -ENOMEM;
path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
&wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
if (path) {
rc = core_alua_write_tpg_metadata(path, md_buf, len);
kfree(path);
}
kfree(md_buf);
return rc;
}
static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_dev_entry *se_deve;
struct se_lun *lun;
struct se_lun_acl *lacl;
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
lun_tg_pt_gp_link) {
/*
* After an implicit target port asymmetric access state
* change, a device server shall establish a unit attention
* condition for the initiator port associated with every I_T
* nexus with the additional sense code set to ASYMMETRIC
* ACCESS STATE CHANGED.
*
* After an explicit target port asymmetric access state
* change, a device server shall establish a unit attention
* condition with the additional sense code set to ASYMMETRIC
* ACCESS STATE CHANGED for the initiator port associated with
* every I_T nexus other than the I_T nexus on which the SET
* TARGET PORT GROUPS command
*/
if (!percpu_ref_tryget_live(&lun->lun_ref))
continue;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
lacl = se_deve->se_lun_acl;
/*
* spc4r37 p.242:
* After an explicit target port asymmetric access
* state change, a device server shall establish a
* unit attention condition with the additional sense
* code set to ASYMMETRIC ACCESS STATE CHANGED for
* the initiator port associated with every I_T nexus
* other than the I_T nexus on which the SET TARGET
* PORT GROUPS command was received.
*/
if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
(tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
(tg_pt_gp->tg_pt_gp_alua_lun == lun))
continue;
/*
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explicit Node+MappedLUN ACLs
*/
if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
(tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
continue;
core_scsi3_ua_allocate(se_deve, 0x2A,
ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
}
spin_unlock(&lun->lun_deve_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
percpu_ref_put(&lun->lun_ref);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
static int core_alua_do_transition_tg_pt(
struct t10_alua_tg_pt_gp *tg_pt_gp,
int new_state,
int explicit)
{
int prev_state;
mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
/* Nothing to be done here */
if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return -EAGAIN;
}
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
core_alua_queue_state_change_ua(tg_pt_gp);
if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
/*
* Check for the optional ALUA primary state transition delay
*/
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
/*
* Set the current primary ALUA access state to the requested new state
*/
tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
/*
* Update the ALUA metadata buf that has been allocated in
* core_alua_do_port_transition(), this metadata will be written
* to struct file.
*
* Note that there is the case where we do not want to update the
* metadata when the saved metadata is being parsed in userspace
* when setting the existing port access state and access status.
*
* Also note that the failure to write out the ALUA metadata to
* struct file does NOT affect the actual ALUA transition.
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
core_alua_update_tpg_primary_metadata(tg_pt_gp);
}
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(prev_state),
core_alua_dump_state(new_state));
core_alua_queue_state_change_ua(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *l_tg_pt_gp,
struct se_device *l_dev,
struct se_lun *l_lun,
struct se_node_acl *l_nacl,
int new_state,
int explicit)
{
struct se_device *dev;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return -ENODEV;
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
if (core_alua_check_transition(new_state, valid_states, &primary,
explicit) != 0)
return -EINVAL;
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
atomic_inc(&lu_gp->lu_gp_ref_cnt);
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
/*
* For storage objects that are members of the 'default_lu_gp',
* we only do transition on the passed *l_tp_pt_gp, and not
* on all of the matching target port groups IDs in default_lu_gp.
*/
if (!lu_gp->lu_gp_id) {
/*
* core_alua_do_transition_tg_pt() will always return
* success.
*/
l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
new_state, explicit);
atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
return rc;
}
/*
* For all other LU groups aside from 'default_lu_gp', walk all of
* the associated storage objects looking for a matching target port
* group ID from the local target port group.
*/
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
spin_unlock(&lu_gp->lu_gp_lock);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
&dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
/*
* If the target behavior port asymmetric access state
* is changed for any target port group accessible via
* a logical unit within a LU group, the target port
* behavior group asymmetric access states for the same
* target port group accessible via other logical units
* in that LU group will also change.
*/
if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
continue;
if (l_tg_pt_gp == tg_pt_gp) {
tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
} else {
tg_pt_gp->tg_pt_gp_alua_lun = NULL;
tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
*/
rc = core_alua_do_transition_tg_pt(tg_pt_gp,
new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
if (rc)
break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
}
spin_unlock(&lu_gp->lu_gp_lock);
if (!rc) {
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
l_tg_pt_gp->tg_pt_gp_id,
(explicit) ? "explicit" : "implicit",
core_alua_dump_state(new_state));
}
atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
return rc;
}
static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
char *path;
int len, rc;
mutex_lock(&lun->lun_tg_pt_md_mutex);
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
if (!md_buf) {
pr_err("Unable to allocate buf for ALUA metadata\n");
rc = -ENOMEM;
goto out_unlock;
}
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
db_root, se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
lun->unpacked_lun);
} else {
path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
db_root, se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
lun->unpacked_lun);
}
if (!path) {
rc = -ENOMEM;
goto out_free;
}
rc = core_alua_write_tpg_metadata(path, md_buf, len);
kfree(path);
out_free:
kfree(md_buf);
out_unlock:
mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
}
static int core_alua_set_tg_pt_secondary_state(
struct se_lun *lun,
int explicit,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
int trans_delay_msecs;
rcu_read_lock();
tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
rcu_read_unlock();
pr_err("Unable to complete secondary state"
" transition\n");
return -EINVAL;
}
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
* Set the secondary ALUA target port access state to OFFLINE
* or release the previously secondary state for struct se_lun
*/
if (offline)
atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
else
atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
lun->lun_tg_pt_secondary_stat = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" to secondary access state: %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
rcu_read_unlock();
/*
* Do the optional transition delay after we set the secondary
* ALUA access state.
*/
if (trans_delay_msecs != 0)
msleep_interruptible(trans_delay_msecs);
/*
* See if we need to update the ALUA fabric port metadata for
* secondary state and status
*/
if (lun->lun_tg_pt_secondary_write_md)
core_alua_update_tpg_secondary_metadata(lun);
return 0;
}
struct t10_alua_lba_map *
core_alua_allocate_lba_map(struct list_head *list,
u64 first_lba, u64 last_lba)
{
struct t10_alua_lba_map *lba_map;
lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
if (!lba_map) {
pr_err("Unable to allocate struct t10_alua_lba_map\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
lba_map->lba_map_first_lba = first_lba;
lba_map->lba_map_last_lba = last_lba;
list_add_tail(&lba_map->lba_map_list, list);
return lba_map;
}
int
core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
int pg_id, int state)
{
struct t10_alua_lba_map_member *lba_map_mem;
list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
lba_map_mem_list) {
if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
return -EINVAL;
}
}
lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
if (!lba_map_mem) {
pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
return -ENOMEM;
}
lba_map_mem->lba_map_mem_alua_state = state;
lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
list_add_tail(&lba_map_mem->lba_map_mem_list,
&lba_map->lba_map_mem_list);
return 0;
}
void
core_alua_free_lba_map(struct list_head *lba_list)
{
struct t10_alua_lba_map *lba_map, *lba_map_tmp;
struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
lba_map_list) {
list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
&lba_map->lba_map_mem_list,
lba_map_mem_list) {
list_del(&lba_map_mem->lba_map_mem_list);
kmem_cache_free(t10_alua_lba_map_mem_cache,
lba_map_mem);
}
list_del(&lba_map->lba_map_list);
kmem_cache_free(t10_alua_lba_map_cache, lba_map);
}
}
void
core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
int segment_size, int segment_mult)
{
struct list_head old_lba_map_list;
struct t10_alua_tg_pt_gp *tg_pt_gp;
int activate = 0, supported;
INIT_LIST_HEAD(&old_lba_map_list);
spin_lock(&dev->t10_alua.lba_map_lock);
dev->t10_alua.lba_map_segment_size = segment_size;
dev->t10_alua.lba_map_segment_multiplier = segment_mult;
list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
if (lba_map_list) {
list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
activate = 1;
}
spin_unlock(&dev->t10_alua.lba_map_lock);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
if (activate)
supported |= ALUA_LBD_SUP;
else
supported &= ~ALUA_LBD_SUP;
tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
core_alua_free_lba_map(&old_lba_map_list);
}
struct t10_alua_lu_gp *
core_alua_allocate_lu_gp(const char *name, int def_group)
{
struct t10_alua_lu_gp *lu_gp;
lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
if (!lu_gp) {
pr_err("Unable to allocate struct t10_alua_lu_gp\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp->lu_gp_node);
INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
spin_lock_init(&lu_gp->lu_gp_lock);
atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
if (def_group) {
lu_gp->lu_gp_id = alua_lu_gps_counter++;
lu_gp->lu_gp_valid_id = 1;
alua_lu_gps_count++;
}
return lu_gp;
}
int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
{
struct t10_alua_lu_gp *lu_gp_tmp;
u16 lu_gp_id_tmp;
/*
* The lu_gp->lu_gp_id may only be set once..
*/
if (lu_gp->lu_gp_valid_id) {
pr_warn("ALUA LU Group already has a valid ID,"
" ignoring request\n");
return -EINVAL;
}
spin_lock(&lu_gps_lock);
if (alua_lu_gps_count == 0x0000ffff) {
pr_err("Maximum ALUA alua_lu_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&lu_gps_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
return -ENOSPC;
}
again:
lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
alua_lu_gps_counter++;
list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
if (!lu_gp_id)
goto again;
pr_warn("ALUA Logical Unit Group ID: %hu"
" already exists, ignoring request\n",
lu_gp_id);
spin_unlock(&lu_gps_lock);
return -EINVAL;
}
}
lu_gp->lu_gp_id = lu_gp_id_tmp;
lu_gp->lu_gp_valid_id = 1;
list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
alua_lu_gps_count++;
spin_unlock(&lu_gps_lock);
return 0;
}
static struct t10_alua_lu_gp_member *
core_alua_allocate_lu_gp_mem(struct se_device *dev)
{
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
if (!lu_gp_mem) {
pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
lu_gp_mem->lu_gp_mem_dev = dev;
dev->dev_alua_lu_gp_mem = lu_gp_mem;
return lu_gp_mem;
}
void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
{
struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
/*
* Once we have reached this point, config_item_put() has
* already been called from target_core_alua_drop_lu_gp().
*
* Here, we remove the *lu_gp from the global list so that
* no associations can be made while we are releasing
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
/*
* Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
* in target_core_configfs.c:target_core_store_alua_lu_gp() to be
* released with core_alua_put_lu_gp_from_name()
*/
while (atomic_read(&lu_gp->lu_gp_ref_cnt))
cpu_relax();
/*
* Release reference to struct t10_alua_lu_gp * from all associated
* struct se_device.
*/
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
lu_gp->lu_gp_members--;
lu_gp_mem->lu_gp_assoc = 0;
}
spin_unlock(&lu_gp->lu_gp_lock);
/*
*
* lu_gp_mem is associated with a single
* struct se_device->dev_alua_lu_gp_mem, and is released when
* struct se_device is released via core_alua_free_lu_gp_mem().
*
* If the passed lu_gp does NOT match the default_lu_gp, assume
* we want to re-associate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
if (lu_gp != default_lu_gp)
__core_alua_attach_lu_gp_mem(lu_gp_mem,
default_lu_gp);
else
lu_gp_mem->lu_gp = NULL;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
spin_lock(&lu_gp->lu_gp_lock);
}
spin_unlock(&lu_gp->lu_gp_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
}
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return;
while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
cpu_relax();
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (lu_gp) {
spin_lock(&lu_gp->lu_gp_lock);
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
lu_gp->lu_gp_members--;
lu_gp_mem->lu_gp_assoc = 0;
}
spin_unlock(&lu_gp->lu_gp_lock);
lu_gp_mem->lu_gp = NULL;
}
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
}
struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
{
struct t10_alua_lu_gp *lu_gp;
struct config_item *ci;
spin_lock(&lu_gps_lock);
list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
if (!lu_gp->lu_gp_valid_id)
continue;
ci = &lu_gp->lu_gp_group.cg_item;
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&lu_gp->lu_gp_ref_cnt);
spin_unlock(&lu_gps_lock);
return lu_gp;
}
}
spin_unlock(&lu_gps_lock);
return NULL;
}
void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
{
spin_lock(&lu_gps_lock);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
spin_unlock(&lu_gps_lock);
}
/*
* Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
*/
void __core_alua_attach_lu_gp_mem(
struct t10_alua_lu_gp_member *lu_gp_mem,
struct t10_alua_lu_gp *lu_gp)
{
spin_lock(&lu_gp->lu_gp_lock);
lu_gp_mem->lu_gp = lu_gp;
lu_gp_mem->lu_gp_assoc = 1;
list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
lu_gp->lu_gp_members++;
spin_unlock(&lu_gp->lu_gp_lock);
}
/*
* Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
*/
void __core_alua_drop_lu_gp_mem(
struct t10_alua_lu_gp_member *lu_gp_mem,
struct t10_alua_lu_gp *lu_gp)
{
spin_lock(&lu_gp->lu_gp_lock);
list_del(&lu_gp_mem->lu_gp_mem_list);
lu_gp_mem->lu_gp = NULL;
lu_gp_mem->lu_gp_assoc = 0;
lu_gp->lu_gp_members--;
spin_unlock(&lu_gp->lu_gp_lock);
}
struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
const char *name, int def_group)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
if (!tg_pt_gp) {
pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
tg_pt_gp->tg_pt_gp_dev = dev;
tg_pt_gp->tg_pt_gp_alua_access_state =
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
/*
* Enable both explicit and implicit ALUA support by default
*/
tg_pt_gp->tg_pt_gp_alua_access_type =
TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
/*
* Set the default Active/NonOptimized Delay in milliseconds
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
/*
* Enable all supported states
*/
tg_pt_gp->tg_pt_gp_alua_supported_states =
ALUA_T_SUP | ALUA_O_SUP |
ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
if (def_group) {
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
&dev->t10_alua.tg_pt_gps_list);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
}
int core_alua_set_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
u16 tg_pt_gp_id)
{
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
u16 tg_pt_gp_id_tmp;
/*
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
if (tg_pt_gp->tg_pt_gp_valid_id) {
pr_warn("ALUA TG PT Group already has a valid ID,"
" ignoring request\n");
return -EINVAL;
}
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
dev->t10_alua.alua_tg_pt_gps_counter++;
list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!tg_pt_gp_id)
goto again;
pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return -EINVAL;
}
}
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
&dev->t10_alua.tg_pt_gps_list);
dev->t10_alua.alua_tg_pt_gps_count++;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return 0;
}
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct se_lun *lun, *next;
/*
* Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp().
*
* Here we remove *tg_pt_gp from the global list so that
* no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
if (tg_pt_gp->tg_pt_gp_valid_id) {
list_del(&tg_pt_gp->tg_pt_gp_list);
dev->t10_alua.alua_tg_pt_gps_count--;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
* target_core_configfs.c:target_core_store_alua_tg_pt_gp()
* to be released with core_alua_put_tg_pt_gp_from_name().
*/
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
cpu_relax();
/*
* Release reference to struct t10_alua_tg_pt_gp from all associated
* struct se_port.
*/
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry_safe(lun, next,
&tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
list_del_init(&lun->lun_tg_pt_gp_link);
tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
* assume we want to re-associate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
spin_lock(&lun->lun_tg_pt_gp_lock);
if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
__target_attach_tg_pt_gp(lun,
dev->t10_alua.default_tg_pt_gp);
} else
rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
spin_unlock(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
synchronize_rcu();
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
}
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct se_device *dev, const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
static void core_alua_put_tg_pt_gp_from_name(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
static void __target_attach_tg_pt_gp(struct se_lun *lun,
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_dev_entry *se_deve;
assert_spin_locked(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp);
list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
tg_pt_gp->tg_pt_gp_members++;
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
core_scsi3_ua_allocate(se_deve, 0x3f,
ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
spin_unlock(&lun->lun_deve_lock);
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
void target_attach_tg_pt_gp(struct se_lun *lun,
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
spin_lock(&lun->lun_tg_pt_gp_lock);
__target_attach_tg_pt_gp(lun, tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock);
synchronize_rcu();
}
static void __target_detach_tg_pt_gp(struct se_lun *lun,
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
assert_spin_locked(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_del_init(&lun->lun_tg_pt_gp_link);
tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
void target_detach_tg_pt_gp(struct se_lun *lun)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
lockdep_is_held(&lun->lun_tg_pt_gp_lock));
if (tg_pt_gp) {
__target_detach_tg_pt_gp(lun, tg_pt_gp);
rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
}
spin_unlock(&lun->lun_tg_pt_gp_lock);
synchronize_rcu();
}
static void target_swap_tg_pt_gp(struct se_lun *lun,
struct t10_alua_tg_pt_gp *old_tg_pt_gp,
struct t10_alua_tg_pt_gp *new_tg_pt_gp)
{
assert_spin_locked(&lun->lun_tg_pt_gp_lock);
if (old_tg_pt_gp)
__target_detach_tg_pt_gp(lun, old_tg_pt_gp);
__target_attach_tg_pt_gp(lun, new_tg_pt_gp);
}
ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
{
struct config_item *tg_pt_ci;
struct t10_alua_tg_pt_gp *tg_pt_gp;
ssize_t len = 0;
rcu_read_lock();
tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
" %hu\nTG Port Primary Access State: %s\nTG Port "
"Primary Access Status: %s\nTG Port Secondary Access"
" State: %s\nTG Port Secondary Access Status: %s\n",
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(
tg_pt_gp->tg_pt_gp_alua_access_state),
core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status),
atomic_read(&lun->lun_tg_pt_secondary_offline) ?
"Offline" : "None",
core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
}
rcu_read_unlock();
return len;
}
ssize_t core_alua_store_tg_pt_gp_info(
struct se_lun *lun,
const char *page,
size_t count)
{
struct se_portal_group *tpg = lun->lun_tpg;
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n");
return -EINVAL;
}
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA target port group alias besides "NULL" means we will be
* making a new group association.
*/
if (strcmp(strstrip(buf), "NULL")) {
/*
* core_alua_get_tg_pt_gp_by_name() will increment reference to
* struct t10_alua_tg_pt_gp. This reference is released with
* core_alua_put_tg_pt_gp_from_name() below.
*/
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
strstrip(buf));
if (!tg_pt_gp_new)
return -ENODEV;
}
spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
lockdep_is_held(&lun->lun_tg_pt_gp_lock));
if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
* with the default_tg_pt_gp.
*/
if (!tg_pt_gp_new) {
pr_debug("Target_Core_ConfigFS: Moving"
" %s/tpgt_%hu/%s from ALUA Target Port Group:"
" alua/%s, ID: %hu back to"
" default_tg_pt_gp\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(
&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id);
target_swap_tg_pt_gp(lun, tg_pt_gp,
dev->t10_alua.default_tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock);
goto sync_rcu;
}
move = 1;
}
target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new);
spin_unlock(&lun->lun_tg_pt_gp_lock);
pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
tg_pt_gp_new->tg_pt_gp_id);
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
sync_rcu:
synchronize_rcu();
return count;
}
ssize_t core_alua_show_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
return sprintf(page, "Implicit and Explicit\n");
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
return sprintf(page, "Implicit\n");
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
return sprintf(page, "Explicit\n");
else
return sprintf(page, "None\n");
}
ssize_t core_alua_store_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_access_type\n");
return ret;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
pr_err("Illegal value for alua_access_type:"
" %lu\n", tmp);
return -EINVAL;
}
if (tmp == 3)
tg_pt_gp->tg_pt_gp_alua_access_type =
TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
else if (tmp == 2)
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
else if (tmp == 1)
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
else
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
return count;
}
ssize_t core_alua_show_nonop_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
}
ssize_t core_alua_store_nonop_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract nonop_delay_msecs\n");
return ret;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
pr_err("Passed nonop_delay_msecs: %lu, exceeds"
" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_NONOP_DELAY_MSECS);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
return count;
}
ssize_t core_alua_show_trans_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
}
ssize_t core_alua_store_trans_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract trans_delay_msecs\n");
return ret;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
pr_err("Passed trans_delay_msecs: %lu, exceeds"
" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_TRANS_DELAY_MSECS);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
return count;
}
ssize_t core_alua_show_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
}
ssize_t core_alua_store_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract implicit_trans_secs\n");
return ret;
}
if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
pr_err("Passed implicit_trans_secs: %lu, exceeds"
" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
ALUA_MAX_IMPLICIT_TRANS_SECS);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
return count;
}
ssize_t core_alua_show_preferred_bit(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
}
ssize_t core_alua_store_preferred_bit(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract preferred ALUA value\n");
return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_pref = (int)tmp;
return count;
}
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
return sprintf(page, "%d\n",
atomic_read(&lun->lun_tg_pt_secondary_offline));
}
ssize_t core_alua_store_offline_bit(
struct se_lun *lun,
const char *page,
size_t count)
{
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
unsigned long tmp;
int ret;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_offline value\n");
return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
if (ret < 0)
return -EINVAL;
return count;
}
ssize_t core_alua_show_secondary_status(
struct se_lun *lun,
char *page)
{
return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
}
ssize_t core_alua_store_secondary_status(
struct se_lun *lun,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_status\n");
return ret;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
lun->lun_tg_pt_secondary_stat = (int)tmp;
return count;
}
ssize_t core_alua_show_secondary_write_metadata(
struct se_lun *lun,
char *page)
{
return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
}
ssize_t core_alua_store_secondary_write_metadata(
struct se_lun *lun,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_write_md\n");
return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
lun->lun_tg_pt_secondary_write_md = (int)tmp;
return count;
}
int core_setup_alua(struct se_device *dev)
{
if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
*/
lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
if (IS_ERR(lu_gp_mem))
return PTR_ERR(lu_gp_mem);
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
default_lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
dev->transport->name);
}
return 0;
}
|
linux-master
|
drivers/target/target_core_alua.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_xcopy.c
*
* This file contains support for SPC-4 Extended-Copy offload with generic
* TCM backends.
*
* Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
*
* Author:
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
#include "target_core_xcopy.h"
static struct workqueue_struct *xcopy_wq = NULL;
static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
/**
* target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
*
* @se_dev: device being considered for match
* @dev_wwn: XCOPY requested NAA dev_wwn
* @return: 1 on match, 0 on no-match
*/
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
const unsigned char *dev_wwn)
{
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
if (!se_dev->dev_attrib.emulate_3pc) {
pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
}
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
spc_gen_naa_6h_vendor_specific(se_dev, &tmp_dev_wwn[0]);
rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
pr_debug("XCOPY: skip non-matching: %*ph\n",
XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
return 0;
}
pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
return 1;
}
static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
const unsigned char *dev_wwn,
struct se_device **_found_dev,
struct percpu_ref **_found_lun_ref)
{
struct se_dev_entry *deve;
struct se_node_acl *nacl;
struct se_lun *this_lun = NULL;
struct se_device *found_dev = NULL;
/* cmd with NULL sess indicates no associated $FABRIC_MOD */
if (!sess)
goto err_out;
pr_debug("XCOPY 0xe4: searching for: %*ph\n",
XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
nacl = sess->se_node_acl;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_device *this_dev;
int rc;
this_lun = deve->se_lun;
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
if (rc) {
if (percpu_ref_tryget_live(&this_lun->lun_ref))
found_dev = this_dev;
break;
}
}
rcu_read_unlock();
if (found_dev == NULL)
goto err_out;
pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
found_dev, &found_dev->dev_group);
*_found_dev = found_dev;
*_found_lun_ref = &this_lun->lun_ref;
return 0;
err_out:
pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
unsigned char *p, unsigned short cscd_index)
{
unsigned char *desc = p;
unsigned short ript;
u8 desig_len;
/*
* Extract RELATIVE INITIATOR PORT IDENTIFIER
*/
ript = get_unaligned_be16(&desc[2]);
pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
/*
* Check for supported code set, association, and designator type
*/
if ((desc[4] & 0x0f) != 0x1) {
pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
return -EINVAL;
}
if ((desc[5] & 0x30) != 0x00) {
pr_err("XCOPY 0xe4: association other than LUN not supported\n");
return -EINVAL;
}
if ((desc[5] & 0x0f) != 0x3) {
pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
(desc[5] & 0x0f));
return -EINVAL;
}
/*
* Check for matching 16 byte length for NAA IEEE Registered Extended
* Assigned designator
*/
desig_len = desc[7];
if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) {
pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
return -EINVAL;
}
pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
/*
* Check for NAA IEEE Registered Extended Assigned header..
*/
if ((desc[8] & 0xf0) != 0x60) {
pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
(desc[8] & 0xf0));
return -EINVAL;
}
if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
"dest\n", cscd_index);
return 0;
}
if (cscd_index == xop->stdi) {
memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
* Determine if the source designator matches the local device
*/
if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
XCOPY_NAA_IEEE_REGEX_LEN)) {
xop->op_origin = XCOL_SOURCE_RECV_OP;
xop->src_dev = se_cmd->se_dev;
pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
" received xop\n", xop->src_dev);
}
}
if (cscd_index == xop->dtdi) {
memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
* Determine if the destination designator matches the local
* device. If @cscd_index corresponds to both source (stdi) and
* destination (dtdi), or dtdi comes after stdi, then
* XCOL_DEST_RECV_OP wins.
*/
if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
XCOPY_NAA_IEEE_REGEX_LEN)) {
xop->op_origin = XCOL_DEST_RECV_OP;
xop->dst_dev = se_cmd->se_dev;
pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
" received xop\n", xop->dst_dev);
}
}
return 0;
}
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
unsigned short cscd_index = 0;
unsigned short start = 0;
*sense_ret = TCM_INVALID_PARAMETER_LIST;
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
*sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
return -EINVAL;
}
if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
pr_err("XCOPY target descriptor supports a maximum"
" two src/dest descriptors, tdll: %hu too large..\n", tdll);
/* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
*sense_ret = TCM_TOO_MANY_TARGET_DESCS;
return -EINVAL;
}
/*
* Generate an IEEE Registered Extended designator based upon the
* se_device the XCOPY was received upon..
*/
memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
spc_gen_naa_6h_vendor_specific(local_dev, &xop->local_dev_wwn[0]);
while (start < tdll) {
/*
* Check target descriptor identification with 0xE4 type, and
* compare the current index with the CSCD descriptor IDs in
* the segment descriptor. Use VPD 0x83 WWPN matching ..
*/
switch (desc[0]) {
case 0xe4:
rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
&desc[0], cscd_index);
if (rc != 0)
goto out;
start += XCOPY_TARGET_DESC_LEN;
desc += XCOPY_TARGET_DESC_LEN;
cscd_index++;
break;
default:
pr_err("XCOPY unsupported descriptor type code:"
" 0x%02x\n", desc[0]);
*sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
goto out;
}
}
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
xop->dst_tid_wwn,
&xop->dst_dev,
&xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
xop->src_tid_wwn,
&xop->src_dev,
&xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
"stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
rc = -EINVAL;
break;
}
/*
* If a matching IEEE NAA 0x83 descriptor for the requested device
* is not located on this node, return COPY_ABORTED with ASQ/ASQC
* 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
* initiator to fall back to normal copy method.
*/
if (rc < 0) {
*sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
}
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->dst_dev, &xop->dst_tid_wwn[0]);
return cscd_index;
out:
return -EINVAL;
}
static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p)
{
unsigned char *desc = p;
int dc = (desc[1] & 0x02);
unsigned short desc_len;
desc_len = get_unaligned_be16(&desc[2]);
if (desc_len != 0x18) {
pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
" %hu\n", desc_len);
return -EINVAL;
}
xop->stdi = get_unaligned_be16(&desc[4]);
xop->dtdi = get_unaligned_be16(&desc[6]);
if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
return -EINVAL;
}
pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
desc_len, xop->stdi, xop->dtdi, dc);
xop->nolb = get_unaligned_be16(&desc[10]);
xop->src_lba = get_unaligned_be64(&desc[12]);
xop->dst_lba = get_unaligned_be64(&desc[20]);
pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
xop->nolb, (unsigned long long)xop->src_lba,
(unsigned long long)xop->dst_lba);
return 0;
}
static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop,
unsigned char *p, unsigned int sdll,
sense_reason_t *sense_ret)
{
unsigned char *desc = p;
unsigned int start = 0;
int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
*sense_ret = TCM_INVALID_PARAMETER_LIST;
if (offset != 0) {
pr_err("XCOPY segment descriptor list length is not"
" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
*sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
return -EINVAL;
}
if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
" large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
/* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
*sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
return -EINVAL;
}
while (start < sdll) {
/*
* Check segment descriptor type code for block -> block
*/
switch (desc[0]) {
case 0x02:
rc = target_xcopy_parse_segdesc_02(xop, desc);
if (rc < 0)
goto out;
ret++;
start += XCOPY_SEGMENT_DESC_LEN;
desc += XCOPY_SEGMENT_DESC_LEN;
break;
default:
pr_err("XCOPY unsupported segment descriptor"
"type: 0x%02x\n", desc[0]);
*sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
goto out;
}
}
return ret;
out:
return -EINVAL;
}
/*
* Start xcopy_pt ops
*/
struct xcopy_pt_cmd {
struct se_cmd se_cmd;
struct completion xpt_passthrough_sem;
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
struct se_portal_group xcopy_pt_tpg;
static struct se_session xcopy_pt_sess;
static struct se_node_acl xcopy_pt_nacl;
static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
}
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
pr_debug("putting src lun_ref for %p\n", xop->src_dev);
percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
{
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
/* xpt_cmd is on the stack, nothing to free here */
pr_debug("xpt_cmd done: %p\n", xpt_cmd);
}
static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
{
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
complete(&xpt_cmd->xpt_passthrough_sem);
return 0;
}
static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
{
return 0;
}
static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
{
return 0;
}
static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
{
return 0;
}
static const struct target_core_fabric_ops xcopy_pt_tfo = {
.fabric_name = "xcopy-pt",
.get_cmd_state = xcopy_pt_get_cmd_state,
.release_cmd = xcopy_pt_release_cmd,
.check_stop_free = xcopy_pt_check_stop_free,
.write_pending = xcopy_pt_write_pending,
.queue_data_in = xcopy_pt_queue_data_in,
.queue_status = xcopy_pt_queue_status,
};
/*
* End xcopy_pt_ops
*/
int target_xcopy_setup_pt(void)
{
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
return -ENOMEM;
}
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
transport_init_session(&xcopy_pt_sess);
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
return 0;
}
void target_xcopy_release_pt(void)
{
if (xcopy_wq)
destroy_workqueue(xcopy_wq);
}
/*
* target_xcopy_setup_pt_cmd - set up a pass-through command
* @xpt_cmd: Data structure to initialize.
* @xop: Describes the XCOPY operation received from an initiator.
* @se_dev: Backend device to associate with @xpt_cmd if
* @remote_port == true.
* @cdb: SCSI CDB to be copied into @xpt_cmd.
* @remote_port: If false, use the LUN through which the XCOPY command has
* been received. If true, use @se_dev->xcopy_lun.
*
* Set up a SCSI command (READ or WRITE) that will be used to execute an
* XCOPY command.
*/
static int target_xcopy_setup_pt_cmd(
struct xcopy_pt_cmd *xpt_cmd,
struct xcopy_op *xop,
struct se_device *se_dev,
unsigned char *cdb,
bool remote_port)
{
struct se_cmd *cmd = &xpt_cmd->se_cmd;
/*
* Setup LUN+port to honor reservations based upon xop->op_origin for
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
if (remote_port) {
cmd->se_lun = &se_dev->xcopy_lun;
cmd->se_dev = se_dev;
} else {
cmd->se_lun = xop->xop_se_cmd->se_lun;
cmd->se_dev = xop->xop_se_cmd->se_dev;
}
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL))
return -EINVAL;
cmd->tag = 0;
if (target_cmd_parse_cdb(cmd))
return -EINVAL;
if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
xop->xop_data_nents, NULL, 0))
return -EINVAL;
pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
" %u\n", cmd->t_data_sg, cmd->t_data_nents);
return 0;
}
static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
{
struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
sense_reason_t sense_rc;
sense_rc = transport_generic_new_cmd(se_cmd);
if (sense_rc)
return -EINVAL;
if (se_cmd->data_direction == DMA_TO_DEVICE)
target_execute_cmd(se_cmd);
wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
se_cmd->scsi_status);
return (se_cmd->scsi_status) ? -EINVAL : 0;
}
static int target_xcopy_read_source(
struct se_cmd *ec_cmd,
struct xcopy_op *xop,
struct se_device *src_dev,
sector_t src_lba,
u32 src_bytes)
{
struct xcopy_pt_cmd xpt_cmd;
struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size;
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
memset(&xpt_cmd, 0, sizeof(xpt_cmd));
init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = READ_16;
put_unaligned_be64(src_lba, &cdb[2]);
put_unaligned_be32(transfer_length_block, &cdb[10]);
pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n",
(unsigned long long)src_lba, transfer_length_block, src_bytes);
__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
NULL);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
remote_port);
if (rc < 0) {
ec_cmd->scsi_status = se_cmd->scsi_status;
goto out;
}
pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
" memory\n", xop->xop_data_sg, xop->xop_data_nents);
rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
if (rc < 0)
ec_cmd->scsi_status = se_cmd->scsi_status;
out:
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
static int target_xcopy_write_destination(
struct se_cmd *ec_cmd,
struct xcopy_op *xop,
struct se_device *dst_dev,
sector_t dst_lba,
u32 dst_bytes)
{
struct xcopy_pt_cmd xpt_cmd;
struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size;
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
memset(&xpt_cmd, 0, sizeof(xpt_cmd));
init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = WRITE_16;
put_unaligned_be64(dst_lba, &cdb[2]);
put_unaligned_be32(transfer_length_block, &cdb[10]);
pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n",
(unsigned long long)dst_lba, transfer_length_block, dst_bytes);
__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
NULL);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
remote_port);
if (rc < 0) {
ec_cmd->scsi_status = se_cmd->scsi_status;
goto out;
}
rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
if (rc < 0)
ec_cmd->scsi_status = se_cmd->scsi_status;
out:
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
static void target_xcopy_do_work(struct work_struct *work)
{
struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
struct se_cmd *ec_cmd = xop->xop_se_cmd;
struct se_device *src_dev, *dst_dev;
sector_t src_lba, dst_lba, end_lba;
unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks;
int rc = 0;
unsigned short nolb;
unsigned int copied_bytes = 0;
sense_reason_t sense_rc;
sense_rc = target_parse_xcopy_cmd(xop);
if (sense_rc != TCM_NO_SENSE)
goto err_free;
if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) {
sense_rc = TCM_INVALID_PARAMETER_LIST;
goto err_free;
}
src_dev = xop->src_dev;
dst_dev = xop->dst_dev;
src_lba = xop->src_lba;
dst_lba = xop->dst_lba;
nolb = xop->nolb;
end_lba = src_lba + nolb;
/*
* Break up XCOPY I/O into hw_max_sectors * hw_block_size sized
* I/O based on the smallest max_bytes between src_dev + dst_dev
*/
max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors *
src_dev->dev_attrib.hw_block_size;
max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors *
dst_dev->dev_attrib.hw_block_size;
max_bytes = min_t(u64, max_bytes_src, max_bytes_dst);
max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES);
/*
* Using shift instead of the division because otherwise GCC
* generates __udivdi3 that is missing on i386
*/
max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size);
pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__,
nolb, max_blocks, (unsigned long long)end_lba);
pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__,
(unsigned long long)src_lba, (unsigned long long)dst_lba);
while (nolb) {
u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size);
unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size;
if (cur_bytes != xop->xop_data_bytes) {
/*
* (Re)allocate a buffer large enough to hold the XCOPY
* I/O size, which can be reused each read / write loop.
*/
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
rc = target_alloc_sgl(&xop->xop_data_sg,
&xop->xop_data_nents,
cur_bytes,
false, false);
if (rc < 0)
goto out;
xop->xop_data_bytes = cur_bytes;
}
pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n",
__func__, src_dev, (unsigned long long)src_lba, cur_nolb);
rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes);
if (rc < 0)
goto out;
src_lba += cur_bytes / src_dev->dev_attrib.block_size;
pr_debug("%s: Incremented READ src_lba to %llu\n", __func__,
(unsigned long long)src_lba);
pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n",
__func__, dst_dev, (unsigned long long)dst_lba, cur_nolb);
rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
dst_lba, cur_bytes);
if (rc < 0)
goto out;
dst_lba += cur_bytes / dst_dev->dev_attrib.block_size;
pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__,
(unsigned long long)dst_lba);
copied_bytes += cur_bytes;
nolb -= cur_bytes / src_dev->dev_attrib.block_size;
}
xcopy_pt_undepend_remotedev(xop);
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
kfree(xop);
pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__,
(unsigned long long)src_lba, (unsigned long long)dst_lba);
pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__,
copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes);
pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__);
target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
return;
out:
/*
* The XCOPY command was aborted after some data was transferred.
* Terminate command with CHECK CONDITION status, with the sense key
* set to COPY ABORTED.
*/
sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
xcopy_pt_undepend_remotedev(xop);
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
err_free:
kfree(xop);
pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n",
__func__, rc, sense_rc);
target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
}
/*
* Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
* fails.
*/
static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
{
struct se_cmd *se_cmd = xop->xop_se_cmd;
unsigned char *p = NULL, *seg_desc;
unsigned int list_id, list_id_usage, sdll, inline_dl;
sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
int rc;
unsigned short tdll;
p = transport_kmap_data_sg(se_cmd);
if (!p) {
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
return TCM_OUT_OF_RESOURCES;
}
list_id = p[0];
list_id_usage = (p[1] & 0x18) >> 3;
/*
* Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
*/
tdll = get_unaligned_be16(&p[2]);
sdll = get_unaligned_be32(&p[8]);
if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
goto out;
}
inline_dl = get_unaligned_be32(&p[12]);
if (inline_dl != 0) {
pr_err("XCOPY with non zero inline data length\n");
goto out;
}
if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
pr_err("XCOPY parameter truncation: data length %u too small "
"for tdll: %hu sdll: %u inline_dl: %u\n",
se_cmd->data_length, tdll, sdll, inline_dl);
ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
goto out;
}
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
/*
* skip over the target descriptors until segment descriptors
* have been passed - CSCD ids are needed to determine src and dest.
*/
seg_desc = &p[16] + tdll;
rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret);
if (rc <= 0)
goto out;
pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
rc * XCOPY_SEGMENT_DESC_LEN);
rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
if (xop->src_dev->dev_attrib.block_size !=
xop->dst_dev->dev_attrib.block_size) {
pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
" block_size: %u currently unsupported\n",
xop->src_dev->dev_attrib.block_size,
xop->dst_dev->dev_attrib.block_size);
xcopy_pt_undepend_remotedev(xop);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
transport_kunmap_data_sg(se_cmd);
return TCM_NO_SENSE;
out:
if (p)
transport_kunmap_data_sg(se_cmd);
return ret;
}
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
{
struct se_device *dev = se_cmd->se_dev;
struct xcopy_op *xop;
unsigned int sa;
if (!dev->dev_attrib.emulate_3pc) {
pr_err("EXTENDED_COPY operation explicitly disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sa = se_cmd->t_task_cdb[1] & 0x1f;
if (sa != 0x00) {
pr_err("EXTENDED_COPY(LID4) not supported\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
if (se_cmd->data_length == 0) {
target_complete_cmd(se_cmd, SAM_STAT_GOOD);
return TCM_NO_SENSE;
}
if (se_cmd->data_length < XCOPY_HDR_LEN) {
pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
se_cmd->data_length, XCOPY_HDR_LEN);
return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
if (!xop)
goto err;
xop->xop_se_cmd = se_cmd;
INIT_WORK(&xop->xop_work, target_xcopy_do_work);
if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
goto free;
return TCM_NO_SENSE;
free:
kfree(xop);
err:
return TCM_OUT_OF_RESOURCES;
}
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
{
unsigned char *p;
p = transport_kmap_data_sg(se_cmd);
if (!p) {
pr_err("transport_kmap_data_sg failed in"
" target_rcr_operating_parameters\n");
return TCM_OUT_OF_RESOURCES;
}
if (se_cmd->data_length < 54) {
pr_err("Receive Copy Results Op Parameters length"
" too small: %u\n", se_cmd->data_length);
transport_kunmap_data_sg(se_cmd);
return TCM_INVALID_CDB_FIELD;
}
/*
* Set SNLID=1 (Supports no List ID)
*/
p[4] = 0x1;
/*
* MAXIMUM TARGET DESCRIPTOR COUNT
*/
put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
/*
* MAXIMUM SEGMENT DESCRIPTOR COUNT
*/
put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
/*
* MAXIMUM DESCRIPTOR LIST LENGTH
*/
put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
/*
* MAXIMUM SEGMENT LENGTH
*/
put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
/*
* MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
*/
put_unaligned_be32(0x0, &p[20]);
/*
* HELD DATA LIMIT
*/
put_unaligned_be32(0x0, &p[24]);
/*
* MAXIMUM STREAM DEVICE TRANSFER SIZE
*/
put_unaligned_be32(0x0, &p[28]);
/*
* TOTAL CONCURRENT COPIES
*/
put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
/*
* MAXIMUM CONCURRENT COPIES
*/
p[36] = RCR_OP_MAX_CONCURR_COPIES;
/*
* DATA SEGMENT GRANULARITY (log 2)
*/
p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
/*
* INLINE DATA GRANULARITY log 2)
*/
p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
/*
* HELD DATA GRANULARITY
*/
p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
/*
* IMPLEMENTED DESCRIPTOR LIST LENGTH
*/
p[43] = 0x2;
/*
* List of implemented descriptor type codes (ordered)
*/
p[44] = 0x02; /* Copy Block to Block device */
p[45] = 0xe4; /* Identification descriptor target descriptor */
/*
* AVAILABLE DATA (n-3)
*/
put_unaligned_be32(42, &p[0]);
transport_kunmap_data_sg(se_cmd);
target_complete_cmd(se_cmd, SAM_STAT_GOOD);
return TCM_NO_SENSE;
}
sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
{
unsigned char *cdb = &se_cmd->t_task_cdb[0];
int sa = (cdb[1] & 0x1f), list_id = cdb[2];
struct se_device *dev = se_cmd->se_dev;
sense_reason_t rc = TCM_NO_SENSE;
if (!dev->dev_attrib.emulate_3pc) {
pr_debug("Third-party copy operations explicitly disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
" 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
if (list_id != 0) {
pr_err("Receive Copy Results with non zero list identifier"
" not supported\n");
return TCM_INVALID_CDB_FIELD;
}
switch (sa) {
case RCR_SA_OPERATING_PARAMETERS:
rc = target_rcr_operating_parameters(se_cmd);
break;
case RCR_SA_COPY_STATUS:
case RCR_SA_RECEIVE_DATA:
case RCR_SA_FAILED_SEGMENT_DETAILS:
default:
pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
return TCM_INVALID_CDB_FIELD;
}
return rc;
}
|
linux-master
|
drivers/target/target_core_xcopy.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_fabric_lib.c
*
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
* (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
/*
* See SPC4, section 7.5 "Protocol specific parameters" for details
* on the formats implemented in this file.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
static int sas_get_pr_transport_id(
struct se_node_acl *nacl,
int *format_code,
unsigned char *buf)
{
int ret;
/* Skip over 'naa. prefix */
ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8);
if (ret) {
pr_debug("%s: invalid hex string\n", __func__);
return ret;
}
return 24;
}
static int fc_get_pr_transport_id(
struct se_node_acl *se_nacl,
int *format_code,
unsigned char *buf)
{
unsigned char *ptr;
int i, ret;
u32 off = 8;
/*
* We convert the ASCII formatted N Port name into a binary
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 23; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
ret = hex2bin(&buf[off++], &ptr[i], 1);
if (ret < 0) {
pr_debug("%s: invalid hex string\n", __func__);
return ret;
}
i += 2;
}
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
static int sbp_get_pr_transport_id(
struct se_node_acl *nacl,
int *format_code,
unsigned char *buf)
{
int ret;
ret = hex2bin(&buf[8], nacl->initiatorname, 8);
if (ret) {
pr_debug("%s: invalid hex string\n", __func__);
return ret;
}
return 24;
}
static int srp_get_pr_transport_id(
struct se_node_acl *nacl,
int *format_code,
unsigned char *buf)
{
const char *p;
unsigned len, count, leading_zero_bytes;
int rc;
p = nacl->initiatorname;
if (strncasecmp(p, "0x", 2) == 0)
p += 2;
len = strlen(p);
if (len % 2)
return -EINVAL;
count = min(len / 2, 16U);
leading_zero_bytes = 16 - count;
memset(buf + 8, 0, leading_zero_bytes);
rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
if (rc < 0) {
pr_debug("hex2bin failed for %s: %d\n", p, rc);
return rc;
}
return 24;
}
static int iscsi_get_pr_transport_id(
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
u32 off = 4, padding = 0;
int isid_len;
u16 len = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
* Only null terminate the last field.
*
* From spc4r37 section 7.6.4.6: TransportID for initiator ports using
* SCSI over iSCSI.
*
* Table 507 TPID=0 Initiator device TransportID
*
* The null-terminated, null-padded (see 4.3.2) ISCSI NAME field shall
* contain the iSCSI name of an iSCSI initiator node (see RFC 7143).
* The first ISCSI NAME field byte containing an ASCII null character
* terminates the ISCSI NAME field without regard for the specified
* length of the iSCSI TransportID or the contents of the ADDITIONAL
* LENGTH field.
*/
len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
off += len;
if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
/*
* Set FORMAT CODE 01b for iSCSI Initiator port TransportID
* format.
*/
buf[0] |= 0x40;
/*
* From spc4r37 Section 7.6.4.6
*
* Table 508 TPID=1 Initiator port TransportID.
*
* The ISCSI NAME field shall not be null-terminated
* (see 4.3.2) and shall not be padded.
*
* The SEPARATOR field shall contain the five ASCII
* characters ",i,0x".
*
* The null-terminated, null-padded ISCSI INITIATOR SESSION ID
* field shall contain the iSCSI initiator session identifier
* (see RFC 3720) in the form of ASCII characters that are the
* hexadecimal digits converted from the binary iSCSI initiator
* session identifier value. The first ISCSI INITIATOR SESSION
* ID field byte containing an ASCII null character terminates
* the ISCSI INITIATOR SESSION ID field without regard for the
* specified length of the iSCSI TransportID or the contents
* of the ADDITIONAL LENGTH field.
*/
buf[off++] = 0x2c; /* ASCII Character: "," */
buf[off++] = 0x69; /* ASCII Character: "i" */
buf[off++] = 0x2c; /* ASCII Character: "," */
buf[off++] = 0x30; /* ASCII Character: "0" */
buf[off++] = 0x78; /* ASCII Character: "x" */
len += 5;
isid_len = sprintf(buf + off, "%s", pr_reg->pr_reg_isid);
off += isid_len;
len += isid_len;
}
buf[off] = '\0';
len += 1;
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
put_unaligned_be16(len, &buf[2]);
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
static int iscsi_get_pr_transport_id_len(
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
u32 len = 0, padding = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
len = strlen(se_nacl->initiatorname);
/*
* Add extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration, use format code:
* 01b: iSCSI Initiator port TransportID format
*
* If there is not an active iSCSI session, use format code:
* 00b: iSCSI Initiator device TransportID format
*/
if (pr_reg->isid_present_at_reg) {
len += 5; /* For ",i,0x" ASCII separator */
len += strlen(pr_reg->pr_reg_isid);
*format_code = 1;
} else
*format_code = 0;
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
static char *iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
char *p;
int i;
u8 format_code = (buf[0] & 0xc0);
/*
* Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
*
* TransportID for initiator ports using SCSI over iSCSI,
* from Table 388 -- iSCSI TransportID formats.
*
* 00b Initiator port is identified using the world wide unique
* SCSI device name of the iSCSI initiator
* device containing the initiator port (see table 389).
* 01b Initiator port is identified using the world wide unique
* initiator port identifier (see table 390).10b to 11b
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
/*
* If the caller wants the TransportID Length, we set that value for the
* entire iSCSI Tarnsport ID now.
*/
if (out_tid_len) {
/* The shift works thanks to integer promotion rules */
*out_tid_len = get_unaligned_be16(&buf[2]);
/* Add four bytes for iSCSI Transport ID header */
*out_tid_len += 4;
}
/*
* Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator
* Session ID as defined in Table 390 - iSCSI initiator port TransportID
* format.
*/
if (format_code == 0x40) {
p = strstr(&buf[4], ",i,0x");
if (!p) {
pr_err("Unable to locate \",i,0x\" separator"
" for Initiator port identifier: %s\n",
&buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
p += 5; /* Skip over ",i,0x" separator */
*port_nexus_ptr = p;
/*
* Go ahead and do the lower case conversion of the received
* 12 ASCII characters representing the ISID in the TransportID
* for comparison against the running iSCSI session's ISID from
* iscsi_target.c:lio_sess_get_initiator_sid()
*/
for (i = 0; i < 12; i++) {
/*
* The first ISCSI INITIATOR SESSION ID field byte
* containing an ASCII null character terminates the
* ISCSI INITIATOR SESSION ID field without regard for
* the specified length of the iSCSI TransportID or the
* contents of the ADDITIONAL LENGTH field.
*/
if (*p == '\0')
break;
if (isdigit(*p)) {
p++;
continue;
}
*p = tolower(*p);
p++;
}
} else
*port_nexus_ptr = NULL;
return &buf[4];
}
int target_get_pr_transport_id_len(struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg, int *format_code)
{
switch (nacl->se_tpg->proto_id) {
case SCSI_PROTOCOL_FCP:
case SCSI_PROTOCOL_SBP:
case SCSI_PROTOCOL_SRP:
case SCSI_PROTOCOL_SAS:
break;
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code);
default:
pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
return -EINVAL;
}
/*
* Most transports use a fixed length 24 byte identifier.
*/
*format_code = 0;
return 24;
}
int target_get_pr_transport_id(struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg, int *format_code,
unsigned char *buf)
{
switch (nacl->se_tpg->proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_pr_transport_id(nacl, format_code, buf);
case SCSI_PROTOCOL_SBP:
return sbp_get_pr_transport_id(nacl, format_code, buf);
case SCSI_PROTOCOL_SRP:
return srp_get_pr_transport_id(nacl, format_code, buf);
case SCSI_PROTOCOL_FCP:
return fc_get_pr_transport_id(nacl, format_code, buf);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_pr_transport_id(nacl, pr_reg, format_code,
buf);
default:
pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
return -EINVAL;
}
}
const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
char *buf, u32 *out_tid_len, char **port_nexus_ptr)
{
u32 offset;
switch (tpg->proto_id) {
case SCSI_PROTOCOL_SAS:
/*
* Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
* for initiator ports using SCSI over SAS Serial SCSI Protocol.
*/
offset = 4;
break;
case SCSI_PROTOCOL_SBP:
case SCSI_PROTOCOL_SRP:
case SCSI_PROTOCOL_FCP:
offset = 8;
break;
case SCSI_PROTOCOL_ISCSI:
return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
port_nexus_ptr);
default:
pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
return NULL;
}
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return buf + offset;
}
|
linux-master
|
drivers/target/target_core_fabric_lib.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_tmr.c
*
* This file contains SPC-3 task management infrastructure
*
* (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/export.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
int core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function,
gfp_t gfp_flags)
{
struct se_tmr_req *tmr;
tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
return -ENOMEM;
}
se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
se_cmd->se_tmr_req = tmr;
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
return 0;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
void core_tmr_release_req(struct se_tmr_req *tmr)
{
kfree(tmr);
}
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
struct t10_pr_registration *reg;
if (!list)
return 0;
list_for_each_entry(reg, list, pr_reg_abort_list) {
if (reg->pr_res_key == cmd->pr_res_key)
return 0;
}
return 1;
}
static bool __target_check_io_state(struct se_cmd *se_cmd,
struct se_session *tmr_sess, bool tas)
{
struct se_session *sess = se_cmd->se_sess;
lockdep_assert_held(&sess->sess_cmd_lock);
/*
* If command already reached CMD_T_COMPLETE state within
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
* this se_cmd has been passed to fabric driver and will
* not be aborted.
*
* Otherwise, obtain a local se_cmd->cmd_kref now for TMR
* ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
* long as se_cmd->cmd_kref is still active unless zero.
*/
spin_lock(&se_cmd->t_state_lock);
if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
pr_debug("Attempted to abort io tag: %llu already complete or"
" fabric stop, skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
return false;
}
se_cmd->transport_state |= CMD_T_ABORTED;
if ((tmr_sess != se_cmd->se_sess) && tas)
se_cmd->transport_state |= CMD_T_TAS;
spin_unlock(&se_cmd->t_state_lock);
return kref_get_unless_zero(&se_cmd->cmd_kref);
}
void core_tmr_abort_task(
struct se_device *dev,
struct se_tmr_req *tmr,
struct se_session *se_sess)
{
LIST_HEAD(aborted_list);
struct se_cmd *se_cmd, *next;
unsigned long flags;
bool rc;
u64 ref_tag;
int i;
for (i = 0; i < dev->queue_cnt; i++) {
flush_work(&dev->queues[i].sq.work);
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
state_list) {
if (se_sess != se_cmd->se_sess)
continue;
/*
* skip task management functions, including
* tmr->task_cmd
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
ref_tag = se_cmd->tag;
if (tmr->ref_task_tag != ref_tag)
continue;
pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->fabric_name, ref_tag);
spin_lock(&se_sess->sess_cmd_lock);
rc = __target_check_io_state(se_cmd, se_sess, 0);
spin_unlock(&se_sess->sess_cmd_lock);
if (!rc)
continue;
list_move_tail(&se_cmd->state_list, &aborted_list);
se_cmd->state_active = false;
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
&aborted_list);
list_del_init(&se_cmd->state_list);
target_put_cmd_and_wait(se_cmd);
pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n",
ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE;
atomic_long_inc(&dev->aborts_complete);
return;
}
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list);
printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
tmr->ref_task_tag);
tmr->response = TMR_TASK_DOES_NOT_EXIST;
atomic_long_inc(&dev->aborts_no_task);
}
static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
bool rc;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
if (tmr_p == tmr)
continue;
cmd = tmr_p->task_cmd;
if (!cmd) {
pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
* We only execute one LUN_RESET at a time so we can't wait
* on them below.
*/
if (tmr_p->function == TMR_LUN_RESET)
continue;
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non registration key matching TMRs.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
spin_lock(&sess->sess_cmd_lock);
rc = __target_check_io_state(cmd, sess, 0);
spin_unlock(&sess->sess_cmd_lock);
if (!rc) {
printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
continue;
}
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
tmr_p->tmr_dev = NULL;
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
list_del_init(&tmr_p->tmr_list);
cmd = tmr_p->task_cmd;
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
target_put_cmd_and_wait(cmd);
}
}
/**
* core_tmr_drain_state_list() - abort SCSI commands associated with a device
*
* @dev: Device for which to abort outstanding SCSI commands.
* @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called
* to realize the PREEMPT AND ABORT functionality.
* @tmr_sess: Session through which the LUN RESET has been received.
* @tas: Task Aborted Status (TAS) bit from the SCSI control mode page.
* A quote from SPC-4, paragraph "7.5.10 Control mode page":
* "A task aborted status (TAS) bit set to zero specifies that
* aborted commands shall be terminated by the device server
* without any response to the application client. A TAS bit set
* to one specifies that commands aborted by the actions of an I_T
* nexus other than the I_T nexus on which the command was
* received shall be completed with TASK ABORTED status."
* @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list
* with registrations that will be preempted.
*/
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_session *tmr_sess,
bool tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
int rc, i;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
*
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
* a) "Yes" indicates that each command that is aborted on an I_T nexus
* other than the one that caused the SCSI device condition is
* completed with TASK ABORTED status, if the TAS bit is set to one in
* the Control mode page (see SPC-4). "No" indicates that no status is
* returned for aborted commands.
*
* d) If the logical unit reset is caused by a particular I_T nexus
* (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
* (TASK_ABORTED status) applies.
*
* Otherwise (e.g., if triggered by a hard reset), "no"
* (no TASK_ABORTED SAM status) applies.
*
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
for (i = 0; i < dev->queue_cnt; i++) {
flush_work(&dev->queues[i].sq.work);
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
state_list) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list,
cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
spin_lock(&sess->sess_cmd_lock);
rc = __target_check_io_state(cmd, tmr_sess, tas);
spin_unlock(&sess->sess_cmd_lock);
if (!rc)
continue;
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
}
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, preempt_and_abort_list ?
TMR_LUN_RESET_PRO : TMR_LUN_RESET,
&drain_task_list);
while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
list_del_init(&cmd->state_list);
target_show_cmd("LUN_RESET: ", cmd);
pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
cmd->pr_res_key);
target_put_cmd_and_wait(cmd);
}
}
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
struct se_session *tmr_sess = NULL;
bool tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
tas = dev->dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
tmr_sess = tmr->task_cmd->se_sess;
tmr_nacl = tmr_sess->se_node_acl;
tmr_tpg = tmr_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
tmr_tpg->se_tpg_tfo->fabric_name,
tmr_nacl->initiatorname);
}
}
/*
* We only allow one reset or preempt and abort to execute at a time
* to prevent one call from claiming all the cmds causing a second
* call from returning while cmds it should have waited on are still
* running.
*/
mutex_lock(&dev->lun_reset_mutex);
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);
mutex_unlock(&dev->lun_reset_mutex);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!preempt_and_abort_list &&
(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->reservation_holder = NULL;
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
atomic_long_inc(&dev->num_resets);
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name);
return 0;
}
|
linux-master
|
drivers/target/target_core_tmr.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
* (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/crc-t10dif.h>
#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_ua.h"
#include "target_core_alua.h"
static sense_reason_t
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
unsigned char *rbuf;
unsigned char buf[8];
u32 blocks;
/*
* SBC-2 says:
* If the PMI bit is set to zero and the LOGICAL BLOCK
* ADDRESS field is not set to zero, the device server shall
* terminate the command with CHECK CONDITION status with
* the sense key set to ILLEGAL REQUEST and the additional
* sense code set to INVALID FIELD IN CDB.
*
* In SBC-3, these fields are obsolete, but some SCSI
* compliance tests actually check this, so we might as well
* follow SBC-2.
*/
if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
return TCM_INVALID_CDB_FIELD;
if (blocks_long >= 0x00000000ffffffff)
blocks = 0xffffffff;
else
blocks = (u32)blocks_long;
put_unaligned_be32(blocks, &buf[0]);
put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8);
return 0;
}
static sense_reason_t
sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
int pi_prot_type = dev->dev_attrib.pi_prot_type;
unsigned char *rbuf;
unsigned char buf[32];
unsigned long long blocks = dev->transport->get_blocks(dev);
memset(buf, 0, sizeof(buf));
put_unaligned_be64(blocks, &buf[0]);
put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
/*
* Set P_TYPE and PROT_EN bits for DIF support
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
/*
* Only override a device's pi_prot_type if no T10-PI is
* available, and sess_prot_type has been explicitly enabled.
*/
if (!pi_prot_type)
pi_prot_type = sess->sess_prot_type;
if (pi_prot_type)
buf[12] = (pi_prot_type - 1) << 1 | 0x1;
}
if (dev->transport->get_lbppbe)
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
if (dev->transport->get_alignment_offset_lbas) {
u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
put_unaligned_be16(lalba, &buf[14]);
}
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
buf[14] |= 0x80;
/*
* LBPRZ signifies that zeroes will be read back from an LBA after
* an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
*/
if (dev->dev_attrib.unmap_zeroes_data)
buf[14] |= 0x40;
}
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32);
return 0;
}
static sense_reason_t
sbc_emulate_startstop(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
/*
* See sbc3r36 section 5.25
* Immediate bit should be set since there is nothing to complete
* POWER CONDITION MODIFIER 0h
*/
if (!(cdb[1] & 1) || cdb[2] || cdb[3])
return TCM_INVALID_CDB_FIELD;
/*
* See sbc3r36 section 5.25
* POWER CONDITION 0h START_VALID - process START and LOEJ
*/
if (cdb[4] >> 4 & 0xf)
return TCM_INVALID_CDB_FIELD;
/*
* See sbc3r36 section 5.25
* LOEJ 0h - nothing to load or unload
* START 1h - we are ready
*/
if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
return TCM_INVALID_CDB_FIELD;
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
if (cmd->t_task_cdb[0] == WRITE_SAME)
num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
/*
* Use the explicit range when non zero is supplied, otherwise calculate
* the remaining range based on ->get_blocks() - starting LBA.
*/
if (num_blocks)
return num_blocks;
return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
cmd->t_task_lba + 1;
}
EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
sbc_execute_write_same_unmap(struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
sector_t nolb = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
if (nolb) {
ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
if (ret)
return ret;
}
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static sense_reason_t
sbc_emulate_noop(struct se_cmd *cmd)
{
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
return cmd->se_dev->dev_attrib.block_size * sectors;
}
static inline u32 transport_get_sectors_6(unsigned char *cdb)
{
/*
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/
return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(unsigned char *cdb)
{
return get_unaligned_be16(&cdb[7]);
}
static inline u32 transport_get_sectors_12(unsigned char *cdb)
{
return get_unaligned_be32(&cdb[6]);
}
static inline u32 transport_get_sectors_16(unsigned char *cdb)
{
return get_unaligned_be32(&cdb[10]);
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 transport_get_sectors_32(unsigned char *cdb)
{
return get_unaligned_be32(&cdb[28]);
}
static inline u32 transport_lba_21(unsigned char *cdb)
{
return get_unaligned_be24(&cdb[1]) & 0x1fffff;
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
return get_unaligned_be32(&cdb[2]);
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
return get_unaligned_be64(&cdb[2]);
}
static sense_reason_t
sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags,
struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
if ((flags & 0x04) || (flags & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD;
}
/*
* Sanity check for LBA wrap and request past end of device.
*/
if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
((cmd->t_task_lba + sectors) > end_lba)) {
pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
(unsigned long long)end_lba, cmd->t_task_lba, sectors);
return TCM_ADDRESS_OUT_OF_RANGE;
}
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
if (flags & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
if (flags & 0x01) {
pr_warn("WRITE SAME with NDOB not supported\n");
return TCM_INVALID_CDB_FIELD;
}
/*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
*/
if (flags & 0x08) {
if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (!dev->dev_attrib.emulate_tpws) {
pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
" has emulate_tpws disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
cmd->execute_cmd = sbc_execute_write_same_unmap;
return 0;
}
if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE;
ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
if (ret)
return ret;
cmd->execute_cmd = ops->execute_write_same;
return 0;
}
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
cmd->data_direction);
}
static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
sense_reason_t ret = TCM_NO_SENSE;
spin_lock_irq(&cmd->t_state_lock);
if (success) {
*post_ret = 1;
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_unlock_irq(&cmd->t_state_lock);
/*
* Unlock ->caw_sem originally obtained during sbc_compare_and_write()
* before the original READ I/O submission.
*/
up(&dev->caw_sem);
return ret;
}
/*
* compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill
* @miscmp_off and return TCM_MISCOMPARE_VERIFY.
*/
static sense_reason_t
compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
struct scatterlist *cmp_sgl, unsigned int cmp_nents,
unsigned int cmp_len, unsigned int *miscmp_off)
{
unsigned char *buf = NULL;
struct scatterlist *sg;
sense_reason_t ret;
unsigned int offset;
size_t rc;
int sg_cnt;
buf = kzalloc(cmp_len, GFP_KERNEL);
if (!buf) {
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len);
if (!rc) {
pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
/*
* Compare SCSI READ payload against verify payload
*/
offset = 0;
ret = TCM_NO_SENSE;
for_each_sg(read_sgl, sg, read_nents, sg_cnt) {
unsigned int len = min(sg->length, cmp_len);
unsigned char *addr = kmap_atomic(sg_page(sg));
if (memcmp(addr, buf + offset, len)) {
unsigned int i;
for (i = 0; i < len && addr[i] == buf[offset + i]; i++)
;
*miscmp_off = offset + i;
pr_warn("Detected MISCOMPARE at offset %u\n",
*miscmp_off);
ret = TCM_MISCOMPARE_VERIFY;
}
kunmap_atomic(addr);
if (ret != TCM_NO_SENSE)
goto out;
offset += len;
cmp_len -= len;
if (!cmp_len)
break;
}
pr_debug("COMPARE AND WRITE read data matches compare data\n");
out:
kfree(buf);
return ret;
}
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
struct sg_table write_tbl = { };
struct scatterlist *write_sg;
struct sg_mapping_iter m;
unsigned int len;
unsigned int block_size = dev->dev_attrib.block_size;
unsigned int compare_len = (cmd->t_task_nolb * block_size);
unsigned int miscmp_off = 0;
sense_reason_t ret = TCM_NO_SENSE;
int i;
if (!success) {
/*
* Handle early failure in transport_generic_request_failure(),
* which will not have taken ->caw_sem yet..
*/
if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
return TCM_NO_SENSE;
/*
* The command has been stopped or aborted so
* we don't have to perform the write operation.
*/
WARN_ON(!(cmd->transport_state &
(CMD_T_ABORTED | CMD_T_STOP)));
goto out;
}
/*
* Handle special case for zero-length COMPARE_AND_WRITE
*/
if (!cmd->data_length)
goto out;
/*
* Immediately exit + release dev->caw_sem if command has already
* been failed with a non-zero SCSI status.
*/
if (cmd->scsi_status) {
pr_debug("compare_and_write_callback: non zero scsi_status:"
" 0x%02x\n", cmd->scsi_status);
*post_ret = 1;
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg,
cmd->t_bidi_data_nents,
cmd->t_data_sg,
cmd->t_data_nents,
compare_len,
&miscmp_off);
if (ret == TCM_MISCOMPARE_VERIFY) {
/*
* SBC-4 r15: 5.3 COMPARE AND WRITE command
* In the sense data (see 4.18 and SPC-5) the offset from the
* start of the Data-Out Buffer to the first byte of data that
* was not equal shall be reported in the INFORMATION field.
*/
cmd->sense_info = miscmp_off;
goto out;
} else if (ret)
goto out;
if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
pr_err("Unable to allocate compare_and_write sg\n");
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
write_sg = write_tbl.sgl;
i = 0;
len = compare_len;
sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
/*
* Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
*/
while (len) {
sg_miter_next(&m);
if (block_size < PAGE_SIZE) {
sg_set_page(&write_sg[i], m.page, block_size,
m.piter.sg->offset + block_size);
} else {
sg_miter_next(&m);
sg_set_page(&write_sg[i], m.page, block_size,
m.piter.sg->offset);
}
len -= block_size;
i++;
}
sg_miter_stop(&m);
/*
* Save the original SGL + nents values before updating to new
* assignments, to be released in transport_free_pages() ->
* transport_reset_sgl_orig()
*/
cmd->t_data_sg_orig = cmd->t_data_sg;
cmd->t_data_sg = write_sg;
cmd->t_data_nents_orig = cmd->t_data_nents;
cmd->t_data_nents = 1;
cmd->sam_task_attr = TCM_HEAD_TAG;
cmd->transport_complete_callback = compare_and_write_post;
/*
* Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
* for submitting the adjusted SGL to write instance user-data.
*/
cmd->execute_cmd = sbc_execute_rw;
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
__target_execute_cmd(cmd, false);
return ret;
out:
/*
* In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
* sbc_compare_and_write() before the original READ I/O submission.
*/
up(&dev->caw_sem);
sg_free_table(&write_tbl);
return ret;
}
static sense_reason_t
sbc_compare_and_write(struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
sense_reason_t ret;
int rc;
/*
* Submit the READ first for COMPARE_AND_WRITE to perform the
* comparision using SGLs at cmd->t_bidi_data_sg..
*/
rc = down_interruptible(&dev->caw_sem);
if (rc != 0) {
cmd->transport_complete_callback = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Reset cmd->data_length to individual block_size in order to not
* confuse backend drivers that depend on this value matching the
* size of the I/O being submitted.
*/
cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
DMA_FROM_DEVICE);
if (ret) {
cmd->transport_complete_callback = NULL;
up(&dev->caw_sem);
return ret;
}
/*
* Unlock of dev->caw_sem to occur in compare_and_write_callback()
* upon MISCOMPARE, or in compare_and_write_done() upon completion
* of WRITE instance user-data.
*/
return TCM_NO_SENSE;
}
static int
sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
bool is_write, struct se_cmd *cmd)
{
if (is_write) {
cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
protect ? TARGET_PROT_DOUT_PASS :
TARGET_PROT_DOUT_INSERT;
switch (protect) {
case 0x0:
case 0x3:
cmd->prot_checks = 0;
break;
case 0x1:
case 0x5:
cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
if (prot_type == TARGET_DIF_TYPE1_PROT)
cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
break;
case 0x2:
if (prot_type == TARGET_DIF_TYPE1_PROT)
cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
break;
case 0x4:
cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
break;
default:
pr_err("Unsupported protect field %d\n", protect);
return -EINVAL;
}
} else {
cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
protect ? TARGET_PROT_DIN_PASS :
TARGET_PROT_DIN_STRIP;
switch (protect) {
case 0x0:
case 0x1:
case 0x5:
cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
if (prot_type == TARGET_DIF_TYPE1_PROT)
cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
break;
case 0x2:
if (prot_type == TARGET_DIF_TYPE1_PROT)
cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
break;
case 0x3:
cmd->prot_checks = 0;
break;
case 0x4:
cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
break;
default:
pr_err("Unsupported protect field %d\n", protect);
return -EINVAL;
}
}
return 0;
}
static sense_reason_t
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
u32 sectors, bool is_write)
{
int sp_ops = cmd->se_sess->sup_prot_ops;
int pi_prot_type = dev->dev_attrib.pi_prot_type;
bool fabric_prot = false;
if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
if (unlikely(protect &&
!dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
pr_err("CDB contains protect bit, but device + fabric does"
" not advertise PROTECT=1 feature bit\n");
return TCM_INVALID_CDB_FIELD;
}
if (cmd->prot_pto)
return TCM_NO_SENSE;
}
switch (dev->dev_attrib.pi_prot_type) {
case TARGET_DIF_TYPE3_PROT:
cmd->reftag_seed = 0xffffffff;
break;
case TARGET_DIF_TYPE2_PROT:
if (protect)
return TCM_INVALID_CDB_FIELD;
cmd->reftag_seed = cmd->t_task_lba;
break;
case TARGET_DIF_TYPE1_PROT:
cmd->reftag_seed = cmd->t_task_lba;
break;
case TARGET_DIF_TYPE0_PROT:
/*
* See if the fabric supports T10-PI, and the session has been
* configured to allow export PROTECT=1 feature bit with backend
* devices that don't support T10-PI.
*/
fabric_prot = is_write ?
!!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
!!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
if (fabric_prot && cmd->se_sess->sess_prot_type) {
pi_prot_type = cmd->se_sess->sess_prot_type;
break;
}
if (!protect)
return TCM_NO_SENSE;
fallthrough;
default:
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
"PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
return TCM_INVALID_CDB_FIELD;
}
if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
return TCM_INVALID_CDB_FIELD;
cmd->prot_type = pi_prot_type;
cmd->prot_length = dev->prot_length * sectors;
/**
* In case protection information exists over the wire
* we modify command data length to describe pure data.
* The actual transfer length is data length + protection
* length
**/
if (protect)
cmd->data_length = sectors * dev->dev_attrib.block_size;
pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
"prot_op=%d prot_checks=%d\n",
__func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
cmd->prot_op, cmd->prot_checks);
return TCM_NO_SENSE;
}
static int
sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
{
if (cdb[1] & 0x10) {
/* see explanation in spc_emulate_modesense */
if (!target_check_fua(dev)) {
pr_err("Got CDB: 0x%02x with DPO bit set, but device"
" does not advertise support for DPO\n", cdb[0]);
return -EINVAL;
}
}
if (cdb[1] & 0x8) {
if (!target_check_fua(dev)) {
pr_err("Got CDB: 0x%02x with FUA bit set, but device"
" does not advertise support for FUA write\n",
cdb[0]);
return -EINVAL;
}
cmd->se_cmd_flags |= SCF_FUA;
}
return 0;
}
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned int size;
u32 sectors = 0;
sense_reason_t ret;
cmd->protocol_data = ops;
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_16:
case WRITE_VERIFY_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
ret = sbc_setup_write_same(cmd, cdb[10], ops);
if (ret)
return ret;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
break;
}
case COMPARE_AND_WRITE:
if (!dev->dev_attrib.emulate_caw) {
pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
dev->se_hba->backend->ops->name,
config_item_name(&dev->dev_group.cg_item),
dev->t10_wwn.unit_serial);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sectors = cdb[13];
/*
* Currently enforce COMPARE_AND_WRITE for a single sector
*/
if (sectors > 1) {
pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
" than 1\n", sectors);
return TCM_INVALID_CDB_FIELD;
}
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
/*
* Double size because we have two buffers, note that
* zero is not an error..
*/
size = 2 * sbc_get_size(cmd, sectors);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
cmd->t_task_nolb = sectors;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
cmd->execute_cmd = sbc_compare_and_write;
cmd->transport_complete_callback = compare_and_write_callback;
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->execute_cmd = sbc_emulate_readcapacity;
break;
case SERVICE_ACTION_IN_16:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
cmd->execute_cmd = sbc_emulate_readcapacity_16;
break;
case SAI_REPORT_REFERRALS:
cmd->execute_cmd = target_emulate_report_referrals;
break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
return TCM_INVALID_CDB_FIELD;
}
size = get_unaligned_be32(&cdb[10]);
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
if (ops->execute_sync_cache) {
cmd->execute_cmd = ops->execute_sync_cache;
goto check_lba;
}
size = 0;
cmd->execute_cmd = sbc_emulate_noop;
break;
case UNMAP:
if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (!dev->dev_attrib.emulate_tpu) {
pr_err("Got UNMAP, but backend device has"
" emulate_tpu disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = sbc_execute_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
case WRITE_SAME:
sectors = transport_get_sectors_10(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
/*
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
case VERIFY:
case VERIFY_16:
size = 0;
if (cdb[0] == VERIFY) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
/*
* There are still clients out there which use these old SCSI-2
* commands. This mainly happens when running VMs with legacy
* guest systems, connected via SCSI command pass-through to
* iSCSI targets. Make them happy and return status GOOD.
*/
size = 0;
cmd->execute_cmd = sbc_emulate_noop;
break;
case START_STOP:
size = 0;
cmd->execute_cmd = sbc_emulate_startstop;
break;
default:
ret = spc_parse_cdb(cmd, &size);
if (ret)
return ret;
}
/* reject any command that we don't have a handler for */
if (!cmd->execute_cmd)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
((cmd->t_task_lba + sectors) > end_lba)) {
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
return TCM_ADDRESS_OUT_OF_RANGE;
}
if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
size = sbc_get_size(cmd, sectors);
}
return target_cmd_size_check(cmd, size);
}
EXPORT_SYMBOL(sbc_parse_cdb);
u32 sbc_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
EXPORT_SYMBOL(sbc_get_device_type);
static sense_reason_t
sbc_execute_unmap(struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
unsigned char *buf, *ptr = NULL;
sector_t lba;
int size;
u32 range;
sense_reason_t ret = 0;
int dl, bd_dl;
/* We never set ANC_SUP */
if (cmd->t_task_cdb[1])
return TCM_INVALID_CDB_FIELD;
if (cmd->data_length == 0) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length);
return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
dl = get_unaligned_be16(&buf[0]);
bd_dl = get_unaligned_be16(&buf[2]);
size = cmd->data_length - 8;
if (bd_dl > size)
pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
cmd->data_length, bd_dl);
else
size = bd_dl;
if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
/* First UNMAP block descriptor starts at 8 byte offset */
ptr = &buf[8];
pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size >= 16) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
if (range > dev->dev_attrib.max_unmap_lba_count) {
ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
if (lba + range > dev->transport->get_blocks(dev) + 1) {
ret = TCM_ADDRESS_OUT_OF_RANGE;
goto err;
}
if (range) {
ret = ops->execute_unmap(cmd, lba, range);
if (ret)
goto err;
}
ptr += 16;
size -= 16;
}
err:
transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
void
sbc_dif_generate(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct t10_pi_tuple *sdt;
struct scatterlist *dsg = cmd->t_data_sg, *psg;
sector_t sector = cmd->t_task_lba;
void *daddr, *paddr;
int i, j, offset = 0;
unsigned int block_size = dev->dev_attrib.block_size;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
for (j = 0; j < psg->length;
j += sizeof(*sdt)) {
__u16 crc;
unsigned int avail;
if (offset >= dsg->length) {
offset -= dsg->length;
kunmap_atomic(daddr - dsg->offset);
dsg = sg_next(dsg);
if (!dsg) {
kunmap_atomic(paddr - psg->offset);
return;
}
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
}
sdt = paddr + j;
avail = min(block_size, dsg->length - offset);
crc = crc_t10dif(daddr + offset, avail);
if (avail < block_size) {
kunmap_atomic(daddr - dsg->offset);
dsg = sg_next(dsg);
if (!dsg) {
kunmap_atomic(paddr - psg->offset);
return;
}
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
offset = block_size - avail;
crc = crc_t10dif_update(crc, daddr, offset);
} else {
offset += block_size;
}
sdt->guard_tag = cpu_to_be16(crc);
if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
sdt->app_tag = 0;
pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
" app_tag: 0x%04x ref_tag: %u\n",
(cmd->data_direction == DMA_TO_DEVICE) ?
"WRITE" : "READ", (unsigned long long)sector,
sdt->guard_tag, sdt->app_tag,
be32_to_cpu(sdt->ref_tag));
sector++;
}
kunmap_atomic(daddr - dsg->offset);
kunmap_atomic(paddr - psg->offset);
}
}
static sense_reason_t
sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
__u16 crc, sector_t sector, unsigned int ei_lba)
{
__be16 csum;
if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
goto check_ref;
csum = cpu_to_be16(crc);
if (sdt->guard_tag != csum) {
pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
" csum 0x%04x\n", (unsigned long long)sector,
be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
}
check_ref:
if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
return 0;
if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
" sector MSB: 0x%08x\n", (unsigned long long)sector,
be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
}
if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
" ei_lba: 0x%08x\n", (unsigned long long)sector,
be32_to_cpu(sdt->ref_tag), ei_lba);
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
}
return 0;
}
void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
struct scatterlist *sg, int sg_off)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *psg;
void *paddr, *addr;
unsigned int i, len, left;
unsigned int offset = sg_off;
if (!sg)
return;
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
unsigned int psg_len, copied = 0;
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
psg_len = min(left, psg->length);
while (psg_len) {
len = min(psg_len, sg->length - offset);
addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
if (read)
memcpy(paddr + copied, addr, len);
else
memcpy(addr, paddr + copied, len);
left -= len;
offset += len;
copied += len;
psg_len -= len;
kunmap_atomic(addr - sg->offset - offset);
if (offset >= sg->length) {
sg = sg_next(sg);
offset = 0;
}
}
kunmap_atomic(paddr - psg->offset);
}
}
EXPORT_SYMBOL(sbc_dif_copy_prot);
sense_reason_t
sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
unsigned int ei_lba, struct scatterlist *psg, int psg_off)
{
struct se_device *dev = cmd->se_dev;
struct t10_pi_tuple *sdt;
struct scatterlist *dsg = cmd->t_data_sg;
sector_t sector = start;
void *daddr, *paddr;
int i;
sense_reason_t rc;
int dsg_off = 0;
unsigned int block_size = dev->dev_attrib.block_size;
for (; psg && sector < start + sectors; psg = sg_next(psg)) {
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
for (i = psg_off; i < psg->length &&
sector < start + sectors;
i += sizeof(*sdt)) {
__u16 crc;
unsigned int avail;
if (dsg_off >= dsg->length) {
dsg_off -= dsg->length;
kunmap_atomic(daddr - dsg->offset);
dsg = sg_next(dsg);
if (!dsg) {
kunmap_atomic(paddr - psg->offset);
return 0;
}
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
}
sdt = paddr + i;
pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
" app_tag: 0x%04x ref_tag: %u\n",
(unsigned long long)sector, sdt->guard_tag,
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
if (sdt->app_tag == T10_PI_APP_ESCAPE) {
dsg_off += block_size;
goto next;
}
avail = min(block_size, dsg->length - dsg_off);
crc = crc_t10dif(daddr + dsg_off, avail);
if (avail < block_size) {
kunmap_atomic(daddr - dsg->offset);
dsg = sg_next(dsg);
if (!dsg) {
kunmap_atomic(paddr - psg->offset);
return 0;
}
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
dsg_off = block_size - avail;
crc = crc_t10dif_update(crc, daddr, dsg_off);
} else {
dsg_off += block_size;
}
rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
if (rc) {
kunmap_atomic(daddr - dsg->offset);
kunmap_atomic(paddr - psg->offset);
cmd->sense_info = sector;
return rc;
}
next:
sector++;
ei_lba++;
}
psg_off = 0;
kunmap_atomic(daddr - dsg->offset);
kunmap_atomic(paddr - psg->offset);
}
return 0;
}
EXPORT_SYMBOL(sbc_dif_verify);
|
linux-master
|
drivers/target/target_core_sbc.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_ua.c
*
* This file contains logic for SPC-3 Unit Attention emulation
*
* (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
sense_reason_t
target_scsi3_ua_check(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
if (!sess)
return 0;
nacl = sess->se_node_acl;
if (!nacl)
return 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
return 0;
}
if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
/*
* From sam4r14, section 5.14 Unit attention condition:
*
* a) if an INQUIRY command enters the enabled command state, the
* device server shall process the INQUIRY command and shall neither
* report nor clear any unit attention condition;
* b) if a REPORT LUNS command enters the enabled command state, the
* device server shall process the REPORT LUNS command and shall not
* report any unit attention condition;
* e) if a REQUEST SENSE command enters the enabled command state while
* a unit attention condition exists for the SCSI initiator port
* associated with the I_T nexus on which the REQUEST SENSE command
* was received, then the device server shall process the command
* and either:
*/
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
return TCM_CHECK_CONDITION_UNIT_ATTENTION;
}
}
int core_scsi3_ua_allocate(
struct se_dev_entry *deve,
u8 asc,
u8 ascq)
{
struct se_ua *ua, *ua_p, *ua_tmp;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!ua) {
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_asc = asc;
ua->ua_ascq = ascq;
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
* Do not report the same UNIT ATTENTION twice..
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
kmem_cache_free(se_ua_cache, ua);
return 0;
}
/*
* Attach the highest priority Unit Attention to
* the head of the list following sam4r14,
* Section 5.14 Unit Attention Condition:
*
* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
* POWER ON OCCURRED or
* DEVICE INTERNAL RESET
* SCSI BUS RESET OCCURRED or
* MICROCODE HAS BEEN CHANGED or
* protocol specific
* BUS DEVICE RESET FUNCTION OCCURRED
* I_T NEXUS LOSS OCCURRED
* COMMANDS CLEARED BY POWER LOSS NOTIFICATION
* all others Lowest
*
* Each of the ASCQ codes listed above are defined in
* the 29h ASC family, see spc4r17 Table D.1
*/
if (ua_p->ua_asc == 0x29) {
if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else if (ua_p->ua_asc == 0x2a) {
/*
* Incoming Family 29h ASCQ codes will override
* Family 2AHh ASCQ codes for Unit Attention condition.
*/
if ((asc == 0x29) || (ascq > ua_p->ua_asc))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
return 0;
}
void target_ua_allocate_lun(struct se_node_acl *nacl,
u32 unpacked_lun, u8 asc, u8 ascq)
{
struct se_dev_entry *deve;
if (!nacl)
return;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (!deve) {
rcu_read_unlock();
return;
}
core_scsi3_ua_allocate(deve, asc, ascq);
rcu_read_unlock();
}
void core_scsi3_ua_release_all(
struct se_dev_entry *deve)
{
struct se_ua *ua, *ua_p;
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
}
spin_unlock(&deve->ua_lock);
}
/*
* Dequeue a unit attention from the unit attention list. This function
* returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have
* been set.
*/
bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
bool dev_ua_intlck_clear = (dev->dev_attrib.emulate_ua_intlck_ctrl
== TARGET_UA_INTLCK_CTRL_CLEAR);
if (WARN_ON_ONCE(!sess))
return false;
nacl = sess->se_node_acl;
if (WARN_ON_ONCE(!nacl))
return false;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
*key = ILLEGAL_REQUEST;
*asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */
*ascq = 0;
return true;
}
*key = UNIT_ATTENTION;
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
* sense data for the received CDB.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
/*
* For ua_intlck_ctrl code not equal to 00b, only report the
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
if (!dev_ua_intlck_clear) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
}
/*
* Otherwise for the default 00b, release the UNIT ATTENTION
* condition. Return the ASC/ASCQ of the highest priority UA
* (head of the list) in the outgoing CHECK_CONDITION + sense.
*/
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->fabric_name,
dev_ua_intlck_clear ? "Releasing" : "Reporting",
dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
return head == 0;
}
int core_scsi3_ua_clear_for_request_sense(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!sess)
return -EINVAL;
nacl = sess->se_node_acl;
if (!nacl)
return -EINVAL;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
return -EINVAL;
}
if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return -EPERM;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list. The First (and hence highest priority)
* ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
* matching struct se_lun.
*
* Once the returning ASC/ASCQ values are set, we go ahead and
* release all of the Unit Attention conditions for the associated
* struct se_lun.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name,
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -EPERM : 0;
}
|
linux-master
|
drivers/target/target_core_ua.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_tpg.c
*
* This file contains generic Target Portal Group related functions.
*
* (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/export.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
extern struct se_device *g_lun0_dev;
static DEFINE_XARRAY_ALLOC(tpg_xa);
/* __core_tpg_get_initiator_node_acl():
*
* mutex_lock(&tpg->acl_node_mutex); must be held when calling
*/
struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname))
return acl;
}
return NULL;
}
/* core_tpg_get_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
/*
* Obtain se_node_acl->acl_kref using fabric driver provided
* initiatorname[] during node acl endpoint lookup driven by
* new se_session login.
*
* The reference is held until se_session shutdown -> release
* occurs via fabric driver invoked transport_deregister_session()
* or transport_free_session() code.
*/
mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (!kref_get_unless_zero(&acl->acl_kref))
acl = NULL;
}
mutex_unlock(&tpg->acl_node_mutex);
return acl;
}
EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
void core_allocate_nexus_loss_ua(
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
if (!nacl)
return;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
core_scsi3_ua_allocate(deve, 0x29,
ASCQ_29H_NEXUS_LOSS_OCCURRED);
rcu_read_unlock();
}
EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
/* core_tpg_add_node_to_devs():
*
*
*/
void core_tpg_add_node_to_devs(
struct se_node_acl *acl,
struct se_portal_group *tpg,
struct se_lun *lun_orig)
{
bool lun_access_ro = true;
struct se_lun *lun;
struct se_device *dev;
mutex_lock(&tpg->tpg_lun_mutex);
hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
if (lun_orig && lun != lun_orig)
continue;
dev = rcu_dereference_check(lun->lun_se_dev,
lockdep_is_held(&tpg->tpg_lun_mutex));
/*
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
lun_access_ro = false;
} else {
/*
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access_ro = true;
else
lun_access_ro = false;
}
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
" access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lun_access_ro ? "READ-ONLY" : "READ-WRITE");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access_ro, acl, tpg);
/*
* Check to see if there are any existing persistent reservation
* APTPL pre-registrations that need to be enabled for this dynamic
* LUN ACL now..
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
lun->unpacked_lun);
}
mutex_unlock(&tpg->tpg_lun_mutex);
}
static void
target_set_nacl_queue_depth(struct se_portal_group *tpg,
struct se_node_acl *acl, u32 queue_depth)
{
acl->queue_depth = queue_depth;
if (!acl->queue_depth) {
pr_warn("Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
acl->initiatorname);
acl->queue_depth = 1;
}
}
static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
const unsigned char *initiatorname)
{
struct se_node_acl *acl;
u32 queue_depth;
acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
GFP_KERNEL);
if (!acl)
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
INIT_HLIST_HEAD(&acl->lun_entry_hlist);
kref_init(&acl->acl_kref);
init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->nacl_sess_lock);
mutex_init(&acl->lun_entry_mutex);
atomic_set(&acl->acl_pr_ref_count, 0);
if (tpg->se_tpg_tfo->tpg_get_default_depth)
queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
else
queue_depth = 1;
target_set_nacl_queue_depth(tpg, acl, queue_depth);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
return acl;
}
static void target_add_node_acl(struct se_node_acl *acl)
{
struct se_portal_group *tpg = acl->se_tpg;
mutex_lock(&tpg->acl_node_mutex);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
mutex_unlock(&tpg->acl_node_mutex);
pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n",
tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg),
acl->dynamic_node_acl ? "DYNAMIC" : "",
acl->queue_depth,
tpg->se_tpg_tfo->fabric_name,
acl->initiatorname);
}
bool target_tpg_has_node_acl(struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
bool found = false;
mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname)) {
found = true;
break;
}
}
mutex_unlock(&tpg->acl_node_mutex);
return found;
}
EXPORT_SYMBOL(target_tpg_has_node_acl);
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl)
return acl;
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
acl = target_alloc_node_acl(tpg, initiatorname);
if (!acl)
return NULL;
/*
* When allocating a dynamically generated node_acl, go ahead
* and take the extra kref now before returning to the fabric
* driver caller.
*
* Note this reference will be released at session shutdown
* time within transport_free_session() code.
*/
kref_get(&acl->acl_kref);
acl->dynamic_node_acl = 1;
/*
* Here we only create demo-mode MappedLUNs from the active
* TPG LUNs if the fabric is not explicitly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg, NULL);
target_add_node_acl(acl);
return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
{
while (atomic_read(&nacl->acl_pr_ref_count) != 0)
cpu_relax();
}
struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
mutex_unlock(&tpg->acl_node_mutex);
return acl;
}
pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->fabric_name,
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
mutex_unlock(&tpg->acl_node_mutex);
return ERR_PTR(-EEXIST);
}
mutex_unlock(&tpg->acl_node_mutex);
acl = target_alloc_node_acl(tpg, initiatorname);
if (!acl)
return ERR_PTR(-ENOMEM);
target_add_node_acl(acl);
return acl;
}
static void target_shutdown_sessions(struct se_node_acl *acl)
{
struct se_session *sess;
unsigned long flags;
restart:
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
continue;
list_del_init(&sess->sess_acl_list);
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
if (acl->se_tpg->se_tpg_tfo->close_session)
acl->se_tpg->se_tpg_tfo->close_session(sess);
goto restart;
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
}
void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
{
struct se_portal_group *tpg = acl->se_tpg;
mutex_lock(&tpg->acl_node_mutex);
if (acl->dynamic_node_acl)
acl->dynamic_node_acl = 0;
list_del_init(&acl->acl_list);
mutex_unlock(&tpg->acl_node_mutex);
target_shutdown_sessions(acl);
target_put_nacl(acl);
/*
* Wait for last target_put_nacl() to complete in target_complete_nacl()
* for active fabric session transport_deregister_session() callbacks.
*/
wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
core_free_device_list_for_node(acl, tpg);
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
kfree(acl);
}
/* core_tpg_set_initiator_node_queue_depth():
*
*
*/
int core_tpg_set_initiator_node_queue_depth(
struct se_node_acl *acl,
u32 queue_depth)
{
struct se_portal_group *tpg = acl->se_tpg;
/*
* Allow the setting of se_node_acl queue_depth to be idempotent,
* and not force a session shutdown event if the value is not
* changing.
*/
if (acl->queue_depth == queue_depth)
return 0;
/*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
* target_set_nacl_queue_depth() to set the new queue depth.
*/
target_set_nacl_queue_depth(tpg, acl, queue_depth);
/*
* Shutdown all pending sessions to force session reinstatement.
*/
target_shutdown_sessions(acl);
pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
/* core_tpg_set_initiator_node_tag():
*
* Initiator nodeacl tags are not used internally, but may be used by
* userspace to emulate aliases or groups.
* Returns length of newly-set tag or -EINVAL.
*/
int core_tpg_set_initiator_node_tag(
struct se_portal_group *tpg,
struct se_node_acl *acl,
const char *new_tag)
{
if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
return -EINVAL;
if (!strncmp("NULL", new_tag, 4)) {
acl->acl_tag[0] = '\0';
return 0;
}
return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
static void core_tpg_lun_ref_release(struct percpu_ref *ref)
{
struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
complete(&lun->lun_shutdown_comp);
}
static int target_tpg_register_rtpi(struct se_portal_group *se_tpg)
{
u32 val;
int ret;
if (se_tpg->rtpi_manual) {
ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL);
if (ret) {
pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
se_tpg->tpg_rtpi);
return -EINVAL;
}
} else {
ret = xa_alloc(&tpg_xa, &val, se_tpg,
XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
if (!ret)
se_tpg->tpg_rtpi = val;
}
return ret;
}
static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg)
{
if (se_tpg->tpg_rtpi && se_tpg->enabled)
xa_erase(&tpg_xa, se_tpg->tpg_rtpi);
}
int target_tpg_enable(struct se_portal_group *se_tpg)
{
int ret;
ret = target_tpg_register_rtpi(se_tpg);
if (ret)
return ret;
ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true);
if (ret) {
target_tpg_deregister_rtpi(se_tpg);
return ret;
}
se_tpg->enabled = true;
return 0;
}
int target_tpg_disable(struct se_portal_group *se_tpg)
{
int ret;
target_tpg_deregister_rtpi(se_tpg);
ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false);
if (!ret)
se_tpg->enabled = false;
return ret;
}
/* Does not change se_wwn->priv. */
int core_tpg_register(
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
int proto_id)
{
int ret;
if (!se_tpg)
return -EINVAL;
/*
* For the typical case where core_tpg_register() is called by a
* fabric driver from target_core_fabric_ops->fabric_make_tpg()
* configfs context, use the original tf_ops pointer already saved
* by target-core in target_fabric_make_wwn().
*
* Otherwise, for special cases like iscsi-target discovery TPGs
* the caller is responsible for setting ->se_tpg_tfo ahead of
* calling core_tpg_register().
*/
if (se_wwn)
se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
if (!se_tpg->se_tpg_tfo) {
pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
return -EINVAL;
}
INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
se_tpg->proto_id = proto_id;
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->session_lock);
mutex_init(&se_tpg->tpg_lun_mutex);
mutex_init(&se_tpg->acl_node_mutex);
if (se_tpg->proto_id >= 0) {
se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
if (IS_ERR(se_tpg->tpg_virt_lun0))
return PTR_ERR(se_tpg->tpg_virt_lun0);
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
true, g_lun0_dev);
if (ret < 0) {
kfree(se_tpg->tpg_virt_lun0);
return ret;
}
}
pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
"Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
return 0;
}
EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
struct se_node_acl *nacl, *nacl_tmp;
LIST_HEAD(node_list);
pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
"Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
mutex_lock(&se_tpg->acl_node_mutex);
list_splice_init(&se_tpg->acl_node_list, &node_list);
mutex_unlock(&se_tpg->acl_node_mutex);
/*
* Release any remaining demo-mode generated se_node_acl that have
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session().
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
list_del_init(&nacl->acl_list);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
kfree(nacl);
}
if (se_tpg->proto_id >= 0) {
core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
}
target_tpg_deregister_rtpi(se_tpg);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
u64 unpacked_lun)
{
struct se_lun *lun;
lun = kzalloc(sizeof(*lun), GFP_KERNEL);
if (!lun) {
pr_err("Unable to allocate se_lun memory\n");
return ERR_PTR(-ENOMEM);
}
lun->unpacked_lun = unpacked_lun;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_deve_list);
INIT_LIST_HEAD(&lun->lun_dev_link);
atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
spin_lock_init(&lun->lun_deve_lock);
mutex_init(&lun->lun_tg_pt_md_mutex);
INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
spin_lock_init(&lun->lun_tg_pt_gp_lock);
lun->lun_tpg = tpg;
return lun;
}
int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
bool lun_access_ro,
struct se_device *dev)
{
int ret;
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
GFP_KERNEL);
if (ret < 0)
goto out;
if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
mutex_lock(&tpg->tpg_lun_mutex);
spin_lock(&dev->se_port_lock);
lun->lun_index = dev->dev_index;
rcu_assign_pointer(lun->lun_se_dev, dev);
dev->export_count++;
list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
if (dev->dev_flags & DF_READ_ONLY)
lun->lun_access_ro = true;
else
lun->lun_access_ro = lun_access_ro;
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
mutex_unlock(&tpg->tpg_lun_mutex);
return 0;
out:
return ret;
}
void core_tpg_remove_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
lun->lun_shutdown = true;
core_clear_lun_from_tpg(lun, tpg);
/*
* Wait for any active I/O references to percpu se_lun->lun_ref to
* be released. Also, se_lun->lun_ref is now used by PR and ALUA
* logic when referencing a remote target port during ALL_TGT_PT=1
* and generating UNIT_ATTENTIONs for ALUA access state transition.
*/
transport_clear_lun_ref(lun);
mutex_lock(&tpg->tpg_lun_mutex);
if (lun->lun_se_dev) {
target_detach_tg_pt_gp(lun);
spin_lock(&dev->se_port_lock);
list_del(&lun->lun_dev_link);
dev->export_count--;
rcu_assign_pointer(lun->lun_se_dev, NULL);
spin_unlock(&dev->se_port_lock);
}
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_del_rcu(&lun->link);
lun->lun_shutdown = false;
mutex_unlock(&tpg->tpg_lun_mutex);
percpu_ref_exit(&lun->lun_ref);
}
|
linux-master
|
drivers/target/target_core_tpg.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_transport.c
*
* This file contains the Generic Target Engine Core.
*
* (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_common.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
#define CREATE_TRACE_POINTS
#include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
static struct workqueue_struct *target_submission_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
0, NULL);
if (!se_sess_cache) {
pr_err("kmem_cache_create() for struct se_session"
" failed\n");
goto out;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
if (!se_ua_cache) {
pr_err("kmem_cache_create() for struct se_ua failed\n");
goto out_free_sess_cache;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
__alignof__(struct t10_pr_registration), 0, NULL);
if (!t10_pr_reg_cache) {
pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
goto out_free_ua_cache;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
0, NULL);
if (!t10_alua_lu_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
goto out_free_pr_reg_cache;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
if (!t10_alua_lu_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
goto out_free_lu_gp_cache;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
if (!t10_alua_tg_pt_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
goto out_free_lu_gp_mem_cache;
}
t10_alua_lba_map_cache = kmem_cache_create(
"t10_alua_lba_map_cache",
sizeof(struct t10_alua_lba_map),
__alignof__(struct t10_alua_lba_map), 0, NULL);
if (!t10_alua_lba_map_cache) {
pr_err("kmem_cache_create() for t10_alua_lba_map_"
"cache failed\n");
goto out_free_tg_pt_gp_cache;
}
t10_alua_lba_map_mem_cache = kmem_cache_create(
"t10_alua_lba_map_mem_cache",
sizeof(struct t10_alua_lba_map_member),
__alignof__(struct t10_alua_lba_map_member), 0, NULL);
if (!t10_alua_lba_map_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
"cache failed\n");
goto out_free_lba_map_cache;
}
target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0);
if (!target_completion_wq)
goto out_free_lba_map_mem_cache;
target_submission_wq = alloc_workqueue("target_submission",
WQ_MEM_RECLAIM, 0);
if (!target_submission_wq)
goto out_free_completion_wq;
return 0;
out_free_completion_wq:
destroy_workqueue(target_completion_wq);
out_free_lba_map_mem_cache:
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
out_free_lba_map_cache:
kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out:
return -ENOMEM;
}
void release_se_kmem_caches(void)
{
destroy_workqueue(target_submission_wq);
destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
kmem_cache_destroy(t10_pr_reg_cache);
kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_lba_map_cache);
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
/*
* Allocate a new row index for the entry type specified
*/
u32 scsi_get_new_index(scsi_index_t type)
{
u32 new_index;
BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
spin_lock(&scsi_mib_index_lock);
new_index = ++scsi_mib_index[type];
spin_unlock(&scsi_mib_index_lock);
return new_index;
}
void transport_subsystem_check_init(void)
{
int ret;
static int sub_api_initialized;
if (sub_api_initialized)
return;
ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
if (ret != 0)
pr_err("Unable to load target_core_iblock\n");
ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
if (ret != 0)
pr_err("Unable to load target_core_file\n");
ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
if (ret != 0)
pr_err("Unable to load target_core_pscsi\n");
ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
if (ret != 0)
pr_err("Unable to load target_core_user\n");
sub_api_initialized = 1;
}
static void target_release_cmd_refcnt(struct percpu_ref *ref)
{
struct target_cmd_counter *cmd_cnt = container_of(ref,
typeof(*cmd_cnt),
refcnt);
wake_up(&cmd_cnt->refcnt_wq);
}
struct target_cmd_counter *target_alloc_cmd_counter(void)
{
struct target_cmd_counter *cmd_cnt;
int rc;
cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
if (!cmd_cnt)
return NULL;
init_completion(&cmd_cnt->stop_done);
init_waitqueue_head(&cmd_cnt->refcnt_wq);
atomic_set(&cmd_cnt->stopped, 0);
rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
GFP_KERNEL);
if (rc)
goto free_cmd_cnt;
return cmd_cnt;
free_cmd_cnt:
kfree(cmd_cnt);
return NULL;
}
EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
{
/*
* Drivers like loop do not call target_stop_session during session
* shutdown so we have to drop the ref taken at init time here.
*/
if (!atomic_read(&cmd_cnt->stopped))
percpu_ref_put(&cmd_cnt->refcnt);
percpu_ref_exit(&cmd_cnt->refcnt);
kfree(cmd_cnt);
}
EXPORT_SYMBOL_GPL(target_free_cmd_counter);
/**
* transport_init_session - initialize a session object
* @se_sess: Session object pointer.
*
* The caller must have zero-initialized @se_sess before calling this function.
*/
void transport_init_session(struct se_session *se_sess)
{
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
spin_lock_init(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(transport_init_session);
/**
* transport_alloc_session - allocate a session object and initialize it
* @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
*/
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
if (!se_sess) {
pr_err("Unable to allocate struct se_session from"
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
transport_init_session(se_sess);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
EXPORT_SYMBOL(transport_alloc_session);
/**
* transport_alloc_session_tags - allocate target driver private data
* @se_sess: Session pointer.
* @tag_num: Maximum number of in-flight commands between initiator and target.
* @tag_size: Size in bytes of the private data a target driver associates with
* each command.
*/
int transport_alloc_session_tags(struct se_session *se_sess,
unsigned int tag_num, unsigned int tag_size)
{
int rc;
se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!se_sess->sess_cmd_map) {
pr_err("Unable to allocate se_sess->sess_cmd_map\n");
return -ENOMEM;
}
rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
false, GFP_KERNEL, NUMA_NO_NODE);
if (rc < 0) {
pr_err("Unable to init se_sess->sess_tag_pool,"
" tag_num: %u\n", tag_num);
kvfree(se_sess->sess_cmd_map);
se_sess->sess_cmd_map = NULL;
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(transport_alloc_session_tags);
/**
* transport_init_session_tags - allocate a session and target driver private data
* @tag_num: Maximum number of in-flight commands between initiator and target.
* @tag_size: Size in bytes of the private data a target driver associates with
* each command.
* @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
*/
static struct se_session *
transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
if (tag_num != 0 && !tag_size) {
pr_err("init_session_tags called with percpu-ida tag_num:"
" %u, but zero tag_size\n", tag_num);
return ERR_PTR(-EINVAL);
}
if (!tag_num && tag_size) {
pr_err("init_session_tags called with percpu-ida tag_size:"
" %u, but zero tag_num\n", tag_size);
return ERR_PTR(-EINVAL);
}
se_sess = transport_alloc_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
if (rc < 0) {
transport_free_session(se_sess);
return ERR_PTR(-ENOMEM);
}
return se_sess;
}
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
*/
void __transport_register_session(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct se_session *se_sess,
void *fabric_sess_ptr)
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
/*
* Used by struct se_node_acl's under ConfigFS to locate active se_session-t
*
* Only set for struct se_session's that will actually be moving I/O.
* eg: *NOT* discovery sessions.
*/
if (se_nacl) {
/*
*
* Determine if fabric allows for T10-PI feature bits exposed to
* initiators for device backends with !dev->dev_attrib.pi_prot_type.
*
* If so, then always save prot_type on a per se_node_acl node
* basis and re-instate the previous sess_prot_type to avoid
* disabling PI from below any previously initiator side
* registered LUNs.
*/
if (se_nacl->saved_prot_type)
se_sess->sess_prot_type = se_nacl->saved_prot_type;
else if (tfo->tpg_check_prot_fabric_only)
se_sess->sess_prot_type = se_nacl->saved_prot_type =
tfo->tpg_check_prot_fabric_only(se_tpg);
/*
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
*/
se_nacl->nacl_sess = se_sess;
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
}
EXPORT_SYMBOL(__transport_register_session);
void transport_register_session(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct se_session *se_sess,
void *fabric_sess_ptr)
{
unsigned long flags;
spin_lock_irqsave(&se_tpg->session_lock, flags);
__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
}
EXPORT_SYMBOL(transport_register_session);
struct se_session *
target_setup_session(struct se_portal_group *tpg,
unsigned int tag_num, unsigned int tag_size,
enum target_prot_op prot_op,
const char *initiatorname, void *private,
int (*callback)(struct se_portal_group *,
struct se_session *, void *))
{
struct target_cmd_counter *cmd_cnt;
struct se_session *sess;
int rc;
cmd_cnt = target_alloc_cmd_counter();
if (!cmd_cnt)
return ERR_PTR(-ENOMEM);
/*
* If the fabric driver is using percpu-ida based pre allocation
* of I/O descriptor tags, go ahead and perform that setup now..
*/
if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
else
sess = transport_alloc_session(prot_op);
if (IS_ERR(sess)) {
rc = PTR_ERR(sess);
goto free_cnt;
}
sess->cmd_cnt = cmd_cnt;
sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
(unsigned char *)initiatorname);
if (!sess->se_node_acl) {
rc = -EACCES;
goto free_sess;
}
/*
* Go ahead and perform any remaining fabric setup that is
* required before transport_register_session().
*/
if (callback != NULL) {
rc = callback(tpg, sess, private);
if (rc)
goto free_sess;
}
transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
free_sess:
transport_free_session(sess);
return ERR_PTR(rc);
free_cnt:
target_free_cmd_counter(cmd_cnt);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(target_setup_session);
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
struct se_session *se_sess;
ssize_t len = 0;
spin_lock_bh(&se_tpg->session_lock);
list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
if (!se_sess->se_node_acl)
continue;
if (!se_sess->se_node_acl->dynamic_node_acl)
continue;
if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
break;
len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
se_sess->se_node_acl->initiatorname);
len += 1; /* Include NULL terminator */
}
spin_unlock_bh(&se_tpg->session_lock);
return len;
}
EXPORT_SYMBOL(target_show_dynamic_sessions);
static void target_complete_nacl(struct kref *kref)
{
struct se_node_acl *nacl = container_of(kref,
struct se_node_acl, acl_kref);
struct se_portal_group *se_tpg = nacl->se_tpg;
if (!nacl->dynamic_stop) {
complete(&nacl->acl_free_comp);
return;
}
mutex_lock(&se_tpg->acl_node_mutex);
list_del_init(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
kfree(nacl);
}
void target_put_nacl(struct se_node_acl *nacl)
{
kref_put(&nacl->acl_kref, target_complete_nacl);
}
EXPORT_SYMBOL(target_put_nacl);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
struct se_node_acl *se_nacl;
unsigned long flags;
/*
* Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl;
if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
if (!list_empty(&se_sess->sess_acl_list))
list_del_init(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
* Otherwise, set the struct se_session pointer from the tail
* element of the per struct se_node_acl active session list.
*/
if (list_empty(&se_nacl->acl_sess_list))
se_nacl->nacl_sess = NULL;
else {
se_nacl->nacl_sess = container_of(
se_nacl->acl_sess_list.prev,
struct se_session, sess_acl_list);
}
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
struct se_node_acl *se_nacl = se_sess->se_node_acl;
/*
* Drop the se_node_acl->nacl_kref obtained from within
* core_tpg_get_initiator_node_acl().
*/
if (se_nacl) {
struct se_portal_group *se_tpg = se_nacl->se_tpg;
const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
unsigned long flags;
se_sess->se_node_acl = NULL;
/*
* Also determine if we need to drop the extra ->cmd_kref if
* it had been previously dynamically generated, and
* the endpoint is not caching dynamic ACLs.
*/
mutex_lock(&se_tpg->acl_node_mutex);
if (se_nacl->dynamic_node_acl &&
!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
if (list_empty(&se_nacl->acl_sess_list))
se_nacl->dynamic_stop = true;
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
list_del_init(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
if (se_nacl->dynamic_stop)
target_put_nacl(se_nacl);
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
if (se_sess->cmd_cnt)
target_free_cmd_counter(se_sess->cmd_cnt);
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
static int target_release_res(struct se_device *dev, void *data)
{
struct se_session *sess = data;
if (dev->reservation_holder == sess)
target_release_reservation(dev);
return 0;
}
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
unsigned long flags;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
se_sess->se_tpg = NULL;
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
/*
* Since the session is being removed, release SPC-2
* reservations held by the session that is disappearing.
*/
target_for_each_device(target_release_res, se_sess);
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->fabric_name);
/*
* If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context from within transport_free_session() code.
*
* For dynamic ACL, target_put_nacl() uses target_complete_nacl()
* to release all remaining generate_node_acl=1 created ACL resources.
*/
transport_free_session(se_sess);
}
EXPORT_SYMBOL(transport_deregister_session);
void target_remove_session(struct se_session *se_sess)
{
transport_deregister_session_configfs(se_sess);
transport_deregister_session(se_sess);
}
EXPORT_SYMBOL(target_remove_session);
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (cmd->state_active) {
list_del(&cmd->state_list);
cmd->state_active = false;
}
spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
static void target_remove_from_tmr_list(struct se_cmd *cmd)
{
struct se_device *dev = NULL;
unsigned long flags;
if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
dev = cmd->se_tmr_req->tmr_dev;
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags);
if (cmd->se_tmr_req->tmr_dev)
list_del_init(&cmd->se_tmr_req->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
}
/*
* This function is called by the target core after the target core has
* finished processing a SCSI command or SCSI TMF. Both the regular command
* processing code and the code for aborting commands can call this
* function. CMD_T_STOP is set if and only if another thread is waiting
* inside transport_wait_for_tasks() for t_transport_stop_comp.
*/
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete_all(&cmd->t_transport_stop_comp);
return 1;
}
cmd->transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* Some fabric modules like tcm_loop can release their internally
* allocated I/O reference and struct se_cmd now.
*
* Fabric modules are expected to return '1' here if the se_cmd being
* passed is released at this point, or zero if not being released.
*/
return cmd->se_tfo->check_stop_free(cmd);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
if (!lun)
return;
target_remove_from_state_list(cmd);
target_remove_from_tmr_list(cmd);
if (cmpxchg(&cmd->lun_ref_active, true, false))
percpu_ref_put(&lun->lun_ref);
/*
* Clear struct se_cmd->se_lun before the handoff to FE.
*/
cmd->se_lun = NULL;
}
static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
transport_generic_request_failure(cmd, cmd->sense_reason);
}
/*
* Used when asking transport to copy Sense Data from the underlying
* Linux/SCSI struct scsi_cmnd
*/
static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
WARN_ON(!cmd->se_lun);
if (!dev)
return NULL;
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return NULL;
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
return cmd->sense_buffer;
}
void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
{
unsigned char *cmd_sense_buf;
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd_sense_buf = transport_get_sense_buffer(cmd);
if (!cmd_sense_buf) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
EXPORT_SYMBOL(transport_copy_sense_to_cmd);
static void target_handle_abort(struct se_cmd *cmd)
{
bool tas = cmd->transport_state & CMD_T_TAS;
bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
int ret;
pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
if (tas) {
if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
cmd->t_task_cdb[0], cmd->tag);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret) {
transport_handle_queue_full(cmd, cmd->se_dev,
ret, false);
return;
}
} else {
cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
cmd->se_tfo->queue_tm_rsp(cmd);
}
} else {
/*
* Allow the fabric driver to unmap any resources before
* releasing the descriptor via TFO->release_cmd().
*/
cmd->se_tfo->aborted_task(cmd);
if (ack_kref)
WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
/*
* To do: establish a unit attention condition on the I_T
* nexus associated with cmd. See also the paragraph "Aborting
* commands" in SAM.
*/
}
WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
static void target_abort_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
target_handle_abort(cmd);
}
static bool target_cmd_interrupted(struct se_cmd *cmd)
{
int post_ret;
if (cmd->transport_state & CMD_T_ABORTED) {
if (cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
INIT_WORK(&cmd->work, target_abort_work);
queue_work(target_completion_wq, &cmd->work);
return true;
} else if (cmd->transport_state & CMD_T_STOP) {
if (cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
complete_all(&cmd->t_transport_stop_comp);
return true;
}
return false;
}
/* May be called from interrupt context so must not sleep. */
void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
sense_reason_t sense_reason)
{
struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
int success, cpu;
unsigned long flags;
if (target_cmd_interrupted(cmd))
return;
cmd->scsi_status = scsi_status;
cmd->sense_reason = sense_reason;
spin_lock_irqsave(&cmd->t_state_lock, flags);
switch (cmd->scsi_status) {
case SAM_STAT_CHECK_CONDITION:
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
success = 1;
else
success = 0;
break;
default:
success = 1;
break;
}
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
cpu = cmd->cpuid;
else
cpu = wwn->cmd_compl_affinity;
queue_work_on(cpu, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd_with_sense);
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{
target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ?
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE :
TCM_NO_SENSE);
}
EXPORT_SYMBOL(target_complete_cmd);
void target_set_cmd_data_length(struct se_cmd *cmd, int length)
{
if (length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = cmd->data_length - length;
}
cmd->data_length = length;
}
}
EXPORT_SYMBOL(target_set_cmd_data_length);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
if (scsi_status == SAM_STAT_GOOD ||
cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
target_set_cmd_data_length(cmd, length);
}
target_complete_cmd(cmd, scsi_status);
}
EXPORT_SYMBOL(target_complete_cmd_with_length);
static void target_add_to_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long flags;
spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (!cmd->state_active) {
list_add_tail(&cmd->state_list,
&dev->queues[cmd->cpuid].state_list);
cmd->state_active = true;
}
spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
* Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);
void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
LIST_HEAD(qf_cmd_list);
struct se_cmd *cmd, *cmd_tmp;
spin_lock_irq(&dev->qf_cmd_lock);
list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
spin_unlock_irq(&dev->qf_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec_mb(&dev->dev_qf_count);
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->fabric_name, cmd,
(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
transport_write_pending_qf(cmd);
else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
transport_complete_qf(cmd);
}
}
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
switch (cmd->data_direction) {
case DMA_NONE:
return "NONE";
case DMA_FROM_DEVICE:
return "READ";
case DMA_TO_DEVICE:
return "WRITE";
case DMA_BIDIRECTIONAL:
return "BIDI";
default:
break;
}
return "UNKNOWN";
}
void transport_dump_dev_state(
struct se_device *dev,
char *b,
int *bl)
{
*bl += sprintf(b + *bl, "Status: ");
if (dev->export_count)
*bl += sprintf(b + *bl, "ACTIVATED");
else
*bl += sprintf(b + *bl, "DEACTIVATED");
*bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
dev->dev_attrib.block_size,
dev->dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " ");
}
void transport_dump_vpd_proto_id(
struct t10_vpd *vpd,
unsigned char *p_buf,
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Protocol Identifier: ");
switch (vpd->protocol_identifier) {
case 0x00:
sprintf(buf+len, "Fibre Channel\n");
break;
case 0x10:
sprintf(buf+len, "Parallel SCSI\n");
break;
case 0x20:
sprintf(buf+len, "SSA\n");
break;
case 0x30:
sprintf(buf+len, "IEEE 1394\n");
break;
case 0x40:
sprintf(buf+len, "SCSI Remote Direct Memory Access"
" Protocol\n");
break;
case 0x50:
sprintf(buf+len, "Internet SCSI (iSCSI)\n");
break;
case 0x60:
sprintf(buf+len, "SAS Serial SCSI Protocol\n");
break;
case 0x70:
sprintf(buf+len, "Automation/Drive Interface Transport"
" Protocol\n");
break;
case 0x80:
sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
break;
default:
sprintf(buf+len, "Unknown 0x%02x\n",
vpd->protocol_identifier);
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
pr_debug("%s", buf);
}
void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
/*
* Check if the Protocol Identifier Valid (PIV) bit is set..
*
* from spc3r23.pdf section 7.5.1
*/
if (page_83[1] & 0x80) {
vpd->protocol_identifier = (page_83[0] & 0xf0);
vpd->protocol_identifier_set = 1;
transport_dump_vpd_proto_id(vpd, NULL, 0);
}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);
int transport_dump_vpd_assoc(
struct t10_vpd *vpd,
unsigned char *p_buf,
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
int ret = 0;
int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Association: ");
switch (vpd->association) {
case 0x00:
sprintf(buf+len, "addressed logical unit\n");
break;
case 0x10:
sprintf(buf+len, "target port\n");
break;
case 0x20:
sprintf(buf+len, "SCSI target device\n");
break;
default:
sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
pr_debug("%s", buf);
return ret;
}
int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
/*
* The VPD identification association..
*
* from spc3r23.pdf Section 7.6.3.1 Table 297
*/
vpd->association = (page_83[1] & 0x30);
return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);
int transport_dump_vpd_ident_type(
struct t10_vpd *vpd,
unsigned char *p_buf,
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
int ret = 0;
int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Type: ");
switch (vpd->device_identifier_type) {
case 0x00:
sprintf(buf+len, "Vendor specific\n");
break;
case 0x01:
sprintf(buf+len, "T10 Vendor ID based\n");
break;
case 0x02:
sprintf(buf+len, "EUI-64 based\n");
break;
case 0x03:
sprintf(buf+len, "NAA\n");
break;
case 0x04:
sprintf(buf+len, "Relative target port identifier\n");
break;
case 0x08:
sprintf(buf+len, "SCSI name string\n");
break;
default:
sprintf(buf+len, "Unsupported: 0x%02x\n",
vpd->device_identifier_type);
ret = -EINVAL;
break;
}
if (p_buf) {
if (p_buf_len < strlen(buf)+1)
return -EINVAL;
strncpy(p_buf, buf, p_buf_len);
} else {
pr_debug("%s", buf);
}
return ret;
}
int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
/*
* The VPD identifier type..
*
* from spc3r23.pdf Section 7.6.3.1 Table 298
*/
vpd->device_identifier_type = (page_83[1] & 0x0f);
return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);
int transport_dump_vpd_ident(
struct t10_vpd *vpd,
unsigned char *p_buf,
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
int ret = 0;
memset(buf, 0, VPD_TMP_BUF_SIZE);
switch (vpd->device_identifier_code_set) {
case 0x01: /* Binary */
snprintf(buf, sizeof(buf),
"T10 VPD Binary Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x02: /* ASCII */
snprintf(buf, sizeof(buf),
"T10 VPD ASCII Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x03: /* UTF-8 */
snprintf(buf, sizeof(buf),
"T10 VPD UTF-8 Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
default:
sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
" 0x%02x", vpd->device_identifier_code_set);
ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
pr_debug("%s", buf);
return ret;
}
int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
static const char hex_str[] = "0123456789abcdef";
int j = 0, i = 4; /* offset to start of the identifier */
/*
* The VPD Code Set (encoding)
*
* from spc3r23.pdf Section 7.6.3.1 Table 296
*/
vpd->device_identifier_code_set = (page_83[0] & 0x0f);
switch (vpd->device_identifier_code_set) {
case 0x01: /* Binary */
vpd->device_identifier[j++] =
hex_str[vpd->device_identifier_type];
while (i < (4 + page_83[3])) {
vpd->device_identifier[j++] =
hex_str[(page_83[i] & 0xf0) >> 4];
vpd->device_identifier[j++] =
hex_str[page_83[i] & 0x0f];
i++;
}
break;
case 0x02: /* ASCII */
case 0x03: /* UTF-8 */
while (i < (4 + page_83[3]))
vpd->device_identifier[j++] = page_83[i++];
break;
default:
break;
}
return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);
static sense_reason_t
target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
unsigned int size)
{
u32 mtl;
if (!cmd->se_tfo->max_data_sg_nents)
return TCM_NO_SENSE;
/*
* Check if fabric enforced maximum SGL entries per I/O descriptor
* exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
* residual_count and reduce original cmd->data_length to maximum
* length based on single PAGE_SIZE entry scatter-lists.
*/
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
if (cmd->data_length > mtl) {
/*
* If an existing CDB overflow is present, calculate new residual
* based on CDB size minus fabric maximum transfer length.
*
* If an existing CDB underflow is present, calculate new residual
* based on original cmd->data_length minus fabric maximum transfer
* length.
*
* Otherwise, set the underflow residual based on cmd->data_length
* minus fabric maximum transfer length.
*/
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
cmd->residual_count = (size - mtl);
} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
u32 orig_dl = size + cmd->residual_count;
cmd->residual_count = (orig_dl - mtl);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - mtl);
}
cmd->data_length = mtl;
/*
* Reset sbc_check_prot() calculated protection payload
* length based upon the new smaller MTL.
*/
if (cmd->prot_length) {
u32 sectors = (mtl / dev->dev_attrib.block_size);
cmd->prot_length = dev->prot_length * sectors;
}
}
return TCM_NO_SENSE;
}
/**
* target_cmd_size_check - Check whether there will be a residual.
* @cmd: SCSI command.
* @size: Data buffer size derived from CDB. The data buffer size provided by
* the SCSI transport driver is available in @cmd->data_length.
*
* Compare the data buffer size from the CDB with the data buffer limit from the transport
* header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
*
* Note: target drivers set @cmd->data_length by calling __target_init_cmd().
*
* Return: TCM_NO_SENSE
*/
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
struct se_device *dev = cmd->se_dev;
if (cmd->unknown_data_length) {
cmd->data_length = size;
} else if (size != cmd->data_length) {
pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->fabric_name,
cmd->data_length, size, cmd->t_task_cdb[0]);
/*
* For READ command for the overflow case keep the existing
* fabric provided ->data_length. Otherwise for the underflow
* case, reset ->data_length to the smaller SCSI expected data
* transfer length.
*/
if (size > cmd->data_length) {
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
cmd->residual_count = (size - cmd->data_length);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size);
/*
* Do not truncate ->data_length for WRITE command to
* dump all payload
*/
if (cmd->data_direction == DMA_FROM_DEVICE) {
cmd->data_length = size;
}
}
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
pr_err_ratelimited("Rejecting underflow/overflow"
" for WRITE data CDB\n");
return TCM_INVALID_FIELD_IN_COMMAND_IU;
}
/*
* Some fabric drivers like iscsi-target still expect to
* always reject overflow writes. Reject this case until
* full fabric driver level support for overflow writes
* is introduced tree-wide.
*/
if (size > cmd->data_length) {
pr_err_ratelimited("Rejecting overflow for"
" WRITE control CDB\n");
return TCM_INVALID_CDB_FIELD;
}
}
}
return target_check_max_data_sg_nents(cmd, dev, size);
}
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
*
* Preserves the value of @cmd->tag.
*/
void __target_init_cmd(struct se_cmd *cmd,
const struct target_core_fabric_ops *tfo,
struct se_session *se_sess, u32 data_length,
int data_direction, int task_attr,
unsigned char *sense_buffer, u64 unpacked_lun,
struct target_cmd_counter *cmd_cnt)
{
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
cmd->free_compl = NULL;
cmd->abrt_compl = NULL;
spin_lock_init(&cmd->t_state_lock);
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
cmd->data_length = data_length;
cmd->data_direction = data_direction;
cmd->sam_task_attr = task_attr;
cmd->sense_buffer = sense_buffer;
cmd->orig_fe_lun = unpacked_lun;
cmd->cmd_cnt = cmd_cnt;
if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
cmd->cpuid = raw_smp_processor_id();
cmd->state_active = false;
}
EXPORT_SYMBOL(__target_init_cmd);
static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
/*
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
if (cmd->sam_task_attr == TCM_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
return TCM_INVALID_CDB_FIELD;
}
return 0;
}
sense_reason_t
target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
{
sense_reason_t ret;
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
*/
if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
ret = TCM_INVALID_CDB_FIELD;
goto err;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
* allocate the additional extended CDB buffer now.. Otherwise
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
if (!cmd->t_task_cdb) {
pr_err("Unable to allocate cmd->t_task_cdb"
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
ret = TCM_OUT_OF_RESOURCES;
goto err;
}
}
/*
* Copy the original CDB into cmd->
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
trace_target_sequencer_start(cmd);
return 0;
err:
/*
* Copy the CDB here to allow trace_target_cmd_complete() to
* print the cdb to the trace buffers.
*/
memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
(unsigned int)TCM_MAX_COMMAND_SIZE));
return ret;
}
EXPORT_SYMBOL(target_cmd_init_cdb);
sense_reason_t
target_cmd_parse_cdb(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
sense_reason_t ret;
ret = dev->transport->parse_cdb(cmd);
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->fabric_name,
cmd->se_sess->se_node_acl->initiatorname,
cmd->t_task_cdb[0]);
if (ret)
return ret;
ret = transport_check_alloc_task_attr(cmd);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
return 0;
}
EXPORT_SYMBOL(target_cmd_parse_cdb);
/*
* Used by fabric module frontends to queue tasks directly.
* May only be used from process context.
*/
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
sense_reason_t ret;
might_sleep();
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n");
return -EINVAL;
}
/*
* Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
* outstanding descriptors are handled correctly during shutdown via
* transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
*/
cmd->t_state = TRANSPORT_NEW_CMD;
cmd->transport_state |= CMD_T_ACTIVE;
/*
* transport_generic_new_cmd() is already handling QUEUE_FULL,
* so follow TRANSPORT_NEW_CMD processing thread context usage
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
if (ret)
transport_generic_request_failure(cmd, ret);
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
sense_reason_t
transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
if (!sgl || !sgl_count)
return 0;
/*
* Reject SCSI data overflow with map_mem_to_cmd() as incoming
* scatterlists already have been set to follow what the fabric
* passes for the original expected data transfer length.
*/
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
pr_warn("Rejecting SCSI DATA overflow for fabric using"
" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
return TCM_INVALID_CDB_FIELD;
}
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
cmd->t_bidi_data_sg = sgl_bidi;
cmd->t_bidi_data_nents = sgl_bidi_count;
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
/**
* target_init_cmd - initialize se_cmd
* @se_cmd: command descriptor to init
* @se_sess: associated se_sess for endpoint
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
* @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
* Task tags are supported if the caller has set @se_cmd->tag.
*
* Returns:
* - less than zero to signal active I/O shutdown failure.
* - zero on success.
*
* If the fabric driver calls target_stop_session, then it must check the
* return code and handle failures. This will never fail for other drivers,
* and the return code can be ignored.
*/
int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
struct se_portal_group *se_tpg;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/*
* Signal bidirectional data payloads to target-core
*/
if (flags & TARGET_SCF_BIDI_OP)
se_cmd->se_cmd_flags |= SCF_BIDI;
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1;
/*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
* target_core_fabric_ops->queue_status() callback
*/
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
data_dir, task_attr, sense, unpacked_lun,
se_sess->cmd_cnt);
/*
* Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
* necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
}
EXPORT_SYMBOL_GPL(target_init_cmd);
/**
* target_submit_prep - prepare cmd for submission
* @se_cmd: command descriptor to prep
* @cdb: pointer to SCSI CDB
* @sgl: struct scatterlist memory for unidirectional mapping
* @sgl_count: scatterlist count for unidirectional mapping
* @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
* @sgl_bidi_count: scatterlist count for bidirectional READ mapping
* @sgl_prot: struct scatterlist memory protection information
* @sgl_prot_count: scatterlist count for protection information
* @gfp: gfp allocation type
*
* Returns:
* - less than zero to signal failure.
* - zero on success.
*
* If failure is returned, lio will the callers queue_status to complete
* the cmd.
*/
int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
struct scatterlist *sgl, u32 sgl_count,
struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
struct scatterlist *sgl_prot, u32 sgl_prot_count,
gfp_t gfp)
{
sense_reason_t rc;
rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
if (rc)
goto send_cc_direct;
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
rc = transport_lookup_cmd_lun(se_cmd);
if (rc)
goto send_cc_direct;
rc = target_cmd_parse_cdb(se_cmd);
if (rc != 0)
goto generic_fail;
/*
* Save pointers for SGLs containing protection information,
* if present.
*/
if (sgl_prot_count) {
se_cmd->t_prot_sg = sgl_prot;
se_cmd->t_prot_nents = sgl_prot_count;
se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
}
/*
* When a non zero sgl_count has been passed perform SGL passthrough
* mapping for pre-allocated fabric memory instead of having target
* core perform an internal SGL allocation..
*/
if (sgl_count != 0) {
BUG_ON(!sgl);
rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
sgl_bidi, sgl_bidi_count);
if (rc != 0)
goto generic_fail;
}
return 0;
send_cc_direct:
transport_send_check_condition_and_sense(se_cmd, rc, 0);
target_put_sess_cmd(se_cmd);
return -EIO;
generic_fail:
transport_generic_request_failure(se_cmd, rc);
return -EIO;
}
EXPORT_SYMBOL_GPL(target_submit_prep);
/**
* target_submit - perform final initialization and submit cmd to LIO core
* @se_cmd: command descriptor to submit
*
* target_submit_prep must have been called on the cmd, and this must be
* called from process context.
*/
void target_submit(struct se_cmd *se_cmd)
{
struct scatterlist *sgl = se_cmd->t_data_sg;
unsigned char *buf = NULL;
might_sleep();
if (se_cmd->t_data_nents != 0) {
BUG_ON(!sgl);
/*
* A work-around for tcm_loop as some userspace code via
* scsi-generic do not memset their associated read buffers,
* so go ahead and do that here for type non-data CDBs. Also
* note that this is currently guaranteed to be a single SGL
* for this case by target core in target_setup_cmd_from_cdb()
* -> transport_generic_cmd_sequencer().
*/
if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
se_cmd->data_direction == DMA_FROM_DEVICE) {
if (sgl)
buf = kmap(sg_page(sgl)) + sgl->offset;
if (buf) {
memset(buf, 0, sgl->length);
kunmap(sg_page(sgl));
}
}
}
/*
* Check if we need to delay processing because of ALUA
* Active/NonOptimized primary access state..
*/
core_alua_check_nonop_delay(se_cmd);
transport_handle_cdb_direct(se_cmd);
}
EXPORT_SYMBOL_GPL(target_submit);
/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
*
* @se_cmd: command descriptor to submit
* @se_sess: associated se_sess for endpoint
* @cdb: pointer to SCSI CDB
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
* @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
* Task tags are supported if the caller has set @se_cmd->tag.
*
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
*
* It also assumes interal target core SGL memory allocation.
*
* This function must only be used by drivers that do their own
* sync during shutdown and does not use target_stop_session. If there
* is a failure this function will call into the fabric driver's
* queue_status with a CHECK_CONDITION.
*/
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
int rc;
rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
task_attr, data_dir, flags);
WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
if (rc)
return;
if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
GFP_KERNEL))
return;
target_submit(se_cmd);
}
EXPORT_SYMBOL(target_submit_cmd);
static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
{
struct se_dev_plug *se_plug;
if (!se_dev->transport->plug_device)
return NULL;
se_plug = se_dev->transport->plug_device(se_dev);
if (!se_plug)
return NULL;
se_plug->se_dev = se_dev;
/*
* We have a ref to the lun at this point, but the cmds could
* complete before we unplug, so grab a ref to the se_device so we
* can call back into the backend.
*/
config_group_get(&se_dev->dev_group);
return se_plug;
}
static void target_unplug_device(struct se_dev_plug *se_plug)
{
struct se_device *se_dev = se_plug->se_dev;
se_dev->transport->unplug_device(se_plug);
config_group_put(&se_dev->dev_group);
}
void target_queued_submit_work(struct work_struct *work)
{
struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
struct se_cmd *se_cmd, *next_cmd;
struct se_dev_plug *se_plug = NULL;
struct se_device *se_dev = NULL;
struct llist_node *cmd_list;
cmd_list = llist_del_all(&sq->cmd_list);
if (!cmd_list)
/* Previous call took what we were queued to submit */
return;
cmd_list = llist_reverse_order(cmd_list);
llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
if (!se_dev) {
se_dev = se_cmd->se_dev;
se_plug = target_plug_device(se_dev);
}
target_submit(se_cmd);
}
if (se_plug)
target_unplug_device(se_plug);
}
/**
* target_queue_submission - queue the cmd to run on the LIO workqueue
* @se_cmd: command descriptor to submit
*/
void target_queue_submission(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
int cpu = se_cmd->cpuid;
struct se_cmd_queue *sq;
sq = &se_dev->queues[cpu].sq;
llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
queue_work_on(cpu, target_submission_wq, &sq->work);
}
EXPORT_SYMBOL_GPL(target_queue_submission);
static void target_complete_tmr_failure(struct work_struct *work)
{
struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
transport_lun_remove_cmd(se_cmd);
transport_cmd_check_stop_to_fabric(se_cmd);
}
/**
* target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
* for TMR CDBs
*
* @se_cmd: command descriptor to submit
* @se_sess: associated se_sess for endpoint
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @fabric_tmr_ptr: fabric context for TMR req
* @tm_type: Type of TM request
* @gfp: gfp type for caller
* @tag: referenced task tag for TMR_ABORT_TASK
* @flags: submit cmd flags
*
* Callable from all contexts.
**/
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t gfp, u64 tag, int flags)
{
struct se_portal_group *se_tpg;
int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
se_sess->cmd_cnt);
/*
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
* allocation failure.
*/
ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
if (ret < 0)
return -ENOMEM;
if (tm_type == TMR_ABORT_TASK)
se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */
ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
if (ret) {
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
}
ret = transport_lookup_tmr_lun(se_cmd);
if (ret)
goto failure;
transport_generic_handle_tmr(se_cmd);
return 0;
/*
* For callback during failure handling, push this work off
* to process context with TMR_LUN_DOES_NOT_EXIST status.
*/
failure:
INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
schedule_work(&se_cmd->work);
return 0;
}
EXPORT_SYMBOL(target_submit_tmr);
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
int ret = 0, post_ret;
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
target_show_cmd("-----[ ", cmd);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
if (cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
if (cmd->transport_state & CMD_T_ABORTED) {
INIT_WORK(&cmd->work, target_abort_work);
queue_work(target_completion_wq, &cmd->work);
return;
}
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
case TCM_INVALID_PARAMETER_LIST:
case TCM_PARAMETER_LIST_LENGTH_ERROR:
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
case TCM_TOO_MANY_TARGET_DESCS:
case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
case TCM_TOO_MANY_SEGMENT_DESCS:
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
case TCM_INVALID_FIELD_IN_COMMAND_IU:
case TCM_ALUA_TG_PT_STANDBY:
case TCM_ALUA_TG_PT_UNAVAILABLE:
case TCM_ALUA_STATE_TRANSITION:
case TCM_ALUA_OFFLINE:
break;
case TCM_OUT_OF_RESOURCES:
cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
goto queue_status;
case TCM_LUN_BUSY:
cmd->scsi_status = SAM_STAT_BUSY;
goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
*
* Uses linux/include/scsi/scsi.h SAM status codes defs
*/
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
/*
* For UA Interlock Code 11b, a RESERVATION CONFLICT will
* establish a UNIT ATTENTION with PREVIOUS RESERVATION
* CONFLICT STATUS.
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
== TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
target_ua_allocate_lun(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
if (ret)
goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (!ret)
goto check_stop;
queue_full:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
EXPORT_SYMBOL(transport_generic_request_failure);
void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
{
sense_reason_t ret;
if (!cmd->execute_cmd) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto err;
}
if (do_checks) {
/*
* Check for an existing UNIT ATTENTION condition after
* target_handle_task_attr() has done SAM task attr
* checking, and possibly have already defered execution
* out to target_restart_delayed_cmds() context.
*/
ret = target_scsi3_ua_check(cmd);
if (ret)
goto err;
ret = target_alua_state_check(cmd);
if (ret)
goto err;
ret = target_check_reservation(cmd);
if (ret) {
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
goto err;
}
}
ret = cmd->execute_cmd(cmd);
if (!ret)
return;
err:
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd, ret);
}
static int target_write_prot_action(struct se_cmd *cmd)
{
u32 sectors;
/*
* Perform WRITE_INSERT of PI using software emulation when backend
* device has PI enabled, if the transport has not already generated
* PI using hardware WRITE_INSERT offload.
*/
switch (cmd->prot_op) {
case TARGET_PROT_DOUT_INSERT:
if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
sbc_dif_generate(cmd);
break;
case TARGET_PROT_DOUT_STRIP:
if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
break;
sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
sectors, 0, cmd->t_prot_sg, 0);
if (unlikely(cmd->pi_err)) {
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd, cmd->pi_err);
return -1;
}
break;
default:
break;
}
return 0;
}
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
switch (cmd->sam_task_attr) {
case TCM_HEAD_TAG:
atomic_inc_mb(&dev->non_ordered);
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
cmd->t_task_cdb[0]);
return false;
case TCM_ORDERED_TAG:
atomic_inc_mb(&dev->delayed_cmd_count);
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
cmd->t_task_cdb[0]);
break;
default:
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
atomic_inc_mb(&dev->non_ordered);
if (atomic_read(&dev->delayed_cmd_count) == 0)
return false;
break;
}
if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
atomic_inc_mb(&dev->delayed_cmd_count);
/*
* We will account for this when we dequeue from the delayed
* list.
*/
atomic_dec_mb(&dev->non_ordered);
}
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
spin_lock(&dev->delayed_cmd_lock);
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
spin_unlock(&dev->delayed_cmd_lock);
pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
cmd->t_task_cdb[0], cmd->sam_task_attr);
/*
* We may have no non ordered cmds when this function started or we
* could have raced with the last simple/head cmd completing, so kick
* the delayed handler here.
*/
schedule_work(&dev->delayed_cmd_work);
return true;
}
void target_execute_cmd(struct se_cmd *cmd)
{
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*
* If the received CDB has already been aborted stop processing it here.
*/
if (target_cmd_interrupted(cmd))
return;
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
if (target_write_prot_action(cmd))
return;
if (target_handle_task_attr(cmd))
return;
__target_execute_cmd(cmd, true);
}
EXPORT_SYMBOL(target_execute_cmd);
/*
* Process all commands up to the last received ORDERED task attribute which
* requires another blocking boundary
*/
void target_do_delayed_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
delayed_cmd_work);
spin_lock(&dev->delayed_cmd_lock);
while (!dev->ordered_sync_in_progress) {
struct se_cmd *cmd;
if (list_empty(&dev->delayed_cmd_list))
break;
cmd = list_entry(dev->delayed_cmd_list.next,
struct se_cmd, se_delayed_node);
if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
/*
* Check if we started with:
* [ordered] [simple] [ordered]
* and we are now at the last ordered so we have to wait
* for the simple cmd.
*/
if (atomic_read(&dev->non_ordered) > 0)
break;
dev->ordered_sync_in_progress = true;
}
list_del(&cmd->se_delayed_node);
atomic_dec_mb(&dev->delayed_cmd_count);
spin_unlock(&dev->delayed_cmd_lock);
if (cmd->sam_task_attr != TCM_ORDERED_TAG)
atomic_inc_mb(&dev->non_ordered);
cmd->transport_state |= CMD_T_SENT;
__target_execute_cmd(cmd, true);
spin_lock(&dev->delayed_cmd_lock);
}
spin_unlock(&dev->delayed_cmd_lock);
}
/*
* Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
goto restart;
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->non_ordered);
dev->dev_cur_ordered_id++;
} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
atomic_dec_mb(&dev->non_ordered);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
dev->dev_cur_ordered_id);
} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
spin_lock(&dev->delayed_cmd_lock);
dev->ordered_sync_in_progress = false;
spin_unlock(&dev->delayed_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
restart:
if (atomic_read(&dev->delayed_cmd_count) > 0)
schedule_work(&dev->delayed_cmd_work);
}
static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
transport_complete_task_attr(cmd);
/*
* If a fabric driver ->write_pending() or ->queue_data_in() callback
* has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
* the same callbacks should not be retried. Return CHECK_CONDITION
* if a scsi_status is not already set.
*
* If a fabric driver ->queue_status() has returned non zero, always
* keep retrying no matter what..
*/
if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
if (cmd->scsi_status)
goto queue_status;
translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
goto queue_status;
}
/*
* Check if we need to send a sense buffer from
* the struct se_cmd in question. We do NOT want
* to take this path of the IO has been marked as
* needing to be treated like a "normal read". This
* is the case if it's a tape read, and either the
* FM, EOM, or ILI bits are set, but there is no
* sense data.
*/
if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
goto queue_status;
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
/* queue status if not treating this as a normal read */
if (cmd->scsi_status &&
!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
break;
case DMA_TO_DEVICE:
if (cmd->se_cmd_flags & SCF_BIDI) {
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
fallthrough;
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
break;
default:
break;
}
if (ret < 0) {
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return;
}
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
int err, bool write_pending)
{
/*
* -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
* ->queue_data_in() callbacks from new process context.
*
* Otherwise for other errors, transport_complete_qf() will send
* CHECK_CONDITION via ->queue_status() instead of attempting to
* retry associated fabric driver data-transfer callbacks.
*/
if (err == -EAGAIN || err == -ENOMEM) {
cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
TRANSPORT_COMPLETE_QF_OK;
} else {
pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
}
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc_mb(&dev->dev_qf_count);
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
schedule_work(&cmd->se_dev->qf_work_queue);
}
static bool target_read_prot_action(struct se_cmd *cmd)
{
switch (cmd->prot_op) {
case TARGET_PROT_DIN_STRIP:
if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
u32 sectors = cmd->data_length >>
ilog2(cmd->se_dev->dev_attrib.block_size);
cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
sectors, 0, cmd->t_prot_sg,
0);
if (cmd->pi_err)
return true;
}
break;
case TARGET_PROT_DIN_INSERT:
if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
break;
sbc_dif_generate(cmd);
break;
default:
break;
}
return false;
}
static void target_complete_ok_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
int ret;
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
transport_complete_task_attr(cmd);
/*
* Check to schedule QUEUE_FULL work, or execute an existing
* cmd->transport_qf_callback()
*/
if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
schedule_work(&cmd->se_dev->qf_work_queue);
/*
* Check if we need to send a sense buffer from
* the struct se_cmd in question. We do NOT want
* to take this path of the IO has been marked as
* needing to be treated like a "normal read". This
* is the case if it's a tape read, and either the
* FM, EOM, or ILI bits are set, but there is no
* sense data.
*/
if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense(
cmd, 0, 1);
if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
/*
* Check for a callback, used by amongst other things
* XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
*/
if (cmd->transport_complete_callback) {
sense_reason_t rc;
bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
bool zero_dl = !(cmd->data_length);
int post_ret = 0;
rc = cmd->transport_complete_callback(cmd, true, &post_ret);
if (!rc && !post_ret) {
if (caw && zero_dl)
goto queue_rsp;
return;
} else if (rc) {
ret = transport_send_check_condition_and_sense(cmd,
rc, 0);
if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
}
queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
/*
* if this is a READ-type IO, but SCSI status
* is set, then skip returning data and just
* return the status -- unless this IO is marked
* as needing to be treated as a normal read,
* in which case we want to go ahead and return
* the data. This happens, for example, for tape
* reads with the FM, EOM, or ILI bits set, with
* no sense data.
*/
if (cmd->scsi_status &&
!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
/*
* Perform READ_STRIP of PI using software emulation when
* backend had PI enabled, if the transport will not be
* performing hardware READ_STRIP offload.
*/
if (target_read_prot_action(cmd)) {
ret = transport_send_check_condition_and_sense(cmd,
cmd->pi_err, 0);
if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret)
goto queue_full;
break;
case DMA_TO_DEVICE:
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.rx_data_octets);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
if (cmd->se_cmd_flags & SCF_BIDI) {
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret)
goto queue_full;
break;
}
fallthrough;
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret)
goto queue_full;
break;
default:
break;
}
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction);
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
void target_free_sgl(struct scatterlist *sgl, int nents)
{
sgl_free_n_order(sgl, nents, 0);
}
EXPORT_SYMBOL(target_free_sgl);
static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
{
/*
* Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
* emulation, and free + reset pointers if necessary..
*/
if (!cmd->t_data_sg_orig)
return;
kfree(cmd->t_data_sg);
cmd->t_data_sg = cmd->t_data_sg_orig;
cmd->t_data_sg_orig = NULL;
cmd->t_data_nents = cmd->t_data_nents_orig;
cmd->t_data_nents_orig = 0;
}
static inline void transport_free_pages(struct se_cmd *cmd)
{
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
cmd->t_prot_sg = NULL;
cmd->t_prot_nents = 0;
}
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
/*
* Release special case READ buffer payload required for
* SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
*/
if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
target_free_sgl(cmd->t_bidi_data_sg,
cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
}
transport_reset_sgl_orig(cmd);
return;
}
transport_reset_sgl_orig(cmd);
target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL;
cmd->t_data_nents = 0;
target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
}
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
struct page **pages;
int i;
/*
* We need to take into account a possible offset here for fabrics like
* tcm_loop who may be using a contig buffer from the SCSI midlayer for
* control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
*/
if (!cmd->t_data_nents)
return NULL;
BUG_ON(!sg);
if (cmd->t_data_nents == 1)
return kmap(sg_page(sg)) + sg->offset;
/* >1 page. use vmap */
pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
/* convert sg[] to pages[] */
for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
pages[i] = sg_page(sg);
}
cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!cmd->t_data_vmap)
return NULL;
return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
}
EXPORT_SYMBOL(transport_kmap_data_sg);
void transport_kunmap_data_sg(struct se_cmd *cmd)
{
if (!cmd->t_data_nents) {
return;
} else if (cmd->t_data_nents == 1) {
kunmap(sg_page(cmd->t_data_sg));
return;
}
vunmap(cmd->t_data_vmap);
cmd->t_data_vmap = NULL;
}
EXPORT_SYMBOL(transport_kunmap_data_sg);
int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page, bool chainable)
{
gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
*sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
return *sgl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(target_alloc_sgl);
/*
* Allocate any required resources to execute the command. For writes we
* might not have the payload yet, so notify the fabric via a call to
* ->write_pending instead. Otherwise place it on the execution queue.
*/
sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
unsigned long flags;
int ret = 0;
bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
if (cmd->prot_op != TARGET_PROT_NORMAL &&
!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
cmd->prot_length, true, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Determine if the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
* beforehand.
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
if ((cmd->se_cmd_flags & SCF_BIDI) ||
(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
u32 bidi_length;
if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
bidi_length = cmd->t_task_nolb *
cmd->se_dev->dev_attrib.block_size;
else
bidi_length = cmd->data_length;
ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents,
bidi_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
cmd->data_length) {
/*
* Special case for COMPARE_AND_WRITE with fabrics
* using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
*/
u32 caw_length = cmd->t_task_nolb *
cmd->se_dev->dev_attrib.block_size;
ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents,
caw_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* If this command is not a write we can execute it right here,
* for write buffers we need to notify the fabric driver first
* and let it call back once the write buffers are ready.
*/
target_add_to_state_list(cmd);
if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
target_execute_cmd(cmd);
return 0;
}
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = TRANSPORT_WRITE_PENDING;
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP &&
!cmd->se_tfo->write_pending_must_be_called) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete_all(&cmd->t_transport_stop_comp);
return 0;
}
cmd->transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd);
if (ret)
goto queue_full;
return 0;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
unsigned long flags;
int ret;
bool stop;
spin_lock_irqsave(&cmd->t_state_lock, flags);
stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (stop) {
pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
complete_all(&cmd->t_transport_stop_comp);
return;
}
ret = cmd->se_tfo->write_pending(cmd);
if (ret) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd);
transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
}
}
static bool
__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
unsigned long *flags);
static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
/*
* Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
* finished.
*/
void target_put_cmd_and_wait(struct se_cmd *cmd)
{
DECLARE_COMPLETION_ONSTACK(compl);
WARN_ON_ONCE(cmd->abrt_compl);
cmd->abrt_compl = &compl;
target_put_sess_cmd(cmd);
wait_for_completion(&compl);
}
/*
* This function is called by frontend drivers after processing of a command
* has finished.
*
* The protocol for ensuring that either the regular frontend command
* processing flow or target_handle_abort() code drops one reference is as
* follows:
* - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
* the frontend driver to call this function synchronously or asynchronously.
* That will cause one reference to be dropped.
* - During regular command processing the target core sets CMD_T_COMPLETE
* before invoking one of the .queue_*() functions.
* - The code that aborts commands skips commands and TMFs for which
* CMD_T_COMPLETE has been set.
* - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
* commands that will be aborted.
* - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
* transport_generic_free_cmd() skips its call to target_put_sess_cmd().
* - For aborted commands for which CMD_T_TAS has been set .queue_status() will
* be called and will drop a reference.
* - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
* will be called. target_handle_abort() will drop the final reference.
*/
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
DECLARE_COMPLETION_ONSTACK(compl);
int ret = 0;
bool aborted = false, tas = false;
if (wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
* failed command before I/O submission.
*/
if (cmd->state_active)
target_remove_from_state_list(cmd);
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
}
if (aborted)
cmd->free_compl = &compl;
ret = target_put_sess_cmd(cmd);
if (aborted) {
pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
wait_for_completion(&compl);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL(transport_generic_free_cmd);
/**
* target_get_sess_cmd - Verify the session is accepting cmds and take ref
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{
int ret = 0;
/*
* Add a second kref if the fabric caller is expecting to handle
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
if (ack_kref) {
kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
/*
* Users like xcopy do not use counters since they never do a stop
* and wait.
*/
if (se_cmd->cmd_cnt) {
if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
ret = -ESHUTDOWN;
}
if (ret && ack_kref)
target_put_sess_cmd(se_cmd);
return ret;
}
EXPORT_SYMBOL(target_get_sess_cmd);
static void target_free_cmd_mem(struct se_cmd *cmd)
{
transport_free_pages(cmd);
if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
}
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
struct completion *free_compl = se_cmd->free_compl;
struct completion *abrt_compl = se_cmd->abrt_compl;
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
if (free_compl)
complete(free_compl);
if (abrt_compl)
complete(abrt_compl);
if (cmd_cnt)
percpu_ref_put(&cmd_cnt->refcnt);
}
/**
* target_put_sess_cmd - decrease the command reference count
* @se_cmd: command to drop a reference from
*
* Returns 1 if and only if this target_put_sess_cmd() call caused the
* refcount to drop to zero. Returns zero otherwise.
*/
int target_put_sess_cmd(struct se_cmd *se_cmd)
{
return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);
static const char *data_dir_name(enum dma_data_direction d)
{
switch (d) {
case DMA_BIDIRECTIONAL: return "BIDI";
case DMA_TO_DEVICE: return "WRITE";
case DMA_FROM_DEVICE: return "READ";
case DMA_NONE: return "NONE";
}
return "(?)";
}
static const char *cmd_state_name(enum transport_state_table t)
{
switch (t) {
case TRANSPORT_NO_STATE: return "NO_STATE";
case TRANSPORT_NEW_CMD: return "NEW_CMD";
case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
case TRANSPORT_PROCESSING: return "PROCESSING";
case TRANSPORT_COMPLETE: return "COMPLETE";
case TRANSPORT_ISTATE_PROCESSING:
return "ISTATE_PROCESSING";
case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
}
return "(?)";
}
static void target_append_str(char **str, const char *txt)
{
char *prev = *str;
*str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
kstrdup(txt, GFP_ATOMIC);
kfree(prev);
}
/*
* Convert a transport state bitmask into a string. The caller is
* responsible for freeing the returned pointer.
*/
static char *target_ts_to_str(u32 ts)
{
char *str = NULL;
if (ts & CMD_T_ABORTED)
target_append_str(&str, "aborted");
if (ts & CMD_T_ACTIVE)
target_append_str(&str, "active");
if (ts & CMD_T_COMPLETE)
target_append_str(&str, "complete");
if (ts & CMD_T_SENT)
target_append_str(&str, "sent");
if (ts & CMD_T_STOP)
target_append_str(&str, "stop");
if (ts & CMD_T_FABRIC_STOP)
target_append_str(&str, "fabric_stop");
return str;
}
static const char *target_tmf_name(enum tcm_tmreq_table tmf)
{
switch (tmf) {
case TMR_ABORT_TASK: return "ABORT_TASK";
case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
case TMR_CLEAR_ACA: return "CLEAR_ACA";
case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
case TMR_LUN_RESET: return "LUN_RESET";
case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO";
case TMR_UNKNOWN: break;
}
return "(?)";
}
void target_show_cmd(const char *pfx, struct se_cmd *cmd)
{
char *ts_str = target_ts_to_str(cmd->transport_state);
const u8 *cdb = cmd->t_task_cdb;
struct se_tmr_req *tmf = cmd->se_tmr_req;
if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
pfx, cdb[0], cdb[1], cmd->tag,
data_dir_name(cmd->data_direction),
cmd->se_tfo->get_cmd_state(cmd),
cmd_state_name(cmd->t_state), cmd->data_length,
kref_read(&cmd->cmd_kref), ts_str);
} else {
pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
pfx, target_tmf_name(tmf->function), cmd->tag,
tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
cmd_state_name(cmd->t_state),
kref_read(&cmd->cmd_kref), ts_str);
}
kfree(ts_str);
}
EXPORT_SYMBOL(target_show_cmd);
static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
{
struct target_cmd_counter *cmd_cnt = container_of(ref,
struct target_cmd_counter,
refcnt);
complete_all(&cmd_cnt->stop_done);
}
/**
* target_stop_cmd_counter - Stop new IO from being added to the counter.
* @cmd_cnt: counter to stop
*/
void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
{
pr_debug("Stopping command counter.\n");
if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
target_stop_cmd_counter_confirm);
}
EXPORT_SYMBOL_GPL(target_stop_cmd_counter);
/**
* target_stop_session - Stop new IO from being queued on the session.
* @se_sess: session to stop
*/
void target_stop_session(struct se_session *se_sess)
{
target_stop_cmd_counter(se_sess->cmd_cnt);
}
EXPORT_SYMBOL(target_stop_session);
/**
* target_wait_for_cmds - Wait for outstanding cmds.
* @cmd_cnt: counter to wait for active I/O for.
*/
void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
{
int ret;
WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));
do {
pr_debug("Waiting for running cmds to complete.\n");
ret = wait_event_timeout(cmd_cnt->refcnt_wq,
percpu_ref_is_zero(&cmd_cnt->refcnt),
180 * HZ);
} while (ret <= 0);
wait_for_completion(&cmd_cnt->stop_done);
pr_debug("Waiting for cmds done.\n");
}
EXPORT_SYMBOL_GPL(target_wait_for_cmds);
/**
* target_wait_for_sess_cmds - Wait for outstanding commands
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
target_wait_for_cmds(se_sess->cmd_cnt);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
/*
* Prevent that new percpu_ref_tryget_live() calls succeed and wait until
* all references to the LUN have been released. Called during LUN shutdown.
*/
void transport_clear_lun_ref(struct se_lun *lun)
{
percpu_ref_kill(&lun->lun_ref);
wait_for_completion(&lun->lun_shutdown_comp);
}
static bool
__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
bool *aborted, bool *tas, unsigned long *flags)
__releases(&cmd->t_state_lock)
__acquires(&cmd->t_state_lock)
{
lockdep_assert_held(&cmd->t_state_lock);
if (fabric_stop)
cmd->transport_state |= CMD_T_FABRIC_STOP;
if (cmd->transport_state & CMD_T_ABORTED)
*aborted = true;
if (cmd->transport_state & CMD_T_TAS)
*tas = true;
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
return false;
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
return false;
if (!(cmd->transport_state & CMD_T_ACTIVE))
return false;
if (fabric_stop && *aborted)
return false;
cmd->transport_state |= CMD_T_STOP;
target_show_cmd("wait_for_tasks: Stopping ", cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
180 * HZ))
target_show_cmd("wait for tasks: ", cmd);
spin_lock_irqsave(&cmd->t_state_lock, *flags);
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
"t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
return true;
}
/**
* transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
* @cmd: command to wait on
*/
bool transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
bool ret, aborted = false, tas = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return ret;
}
EXPORT_SYMBOL(transport_wait_for_tasks);
struct sense_detail {
u8 key;
u8 asc;
u8 ascq;
bool add_sense_info;
};
static const struct sense_detail sense_detail_table[] = {
[TCM_NO_SENSE] = {
.key = NOT_READY
},
[TCM_NON_EXISTENT_LUN] = {
.key = ILLEGAL_REQUEST,
.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
},
[TCM_UNSUPPORTED_SCSI_OPCODE] = {
.key = ILLEGAL_REQUEST,
.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
},
[TCM_SECTOR_COUNT_TOO_MANY] = {
.key = ILLEGAL_REQUEST,
.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
},
[TCM_UNKNOWN_MODE_PAGE] = {
.key = ILLEGAL_REQUEST,
.asc = 0x24, /* INVALID FIELD IN CDB */
},
[TCM_CHECK_CONDITION_ABORT_CMD] = {
.key = ABORTED_COMMAND,
.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
.ascq = 0x03,
},
[TCM_INCORRECT_AMOUNT_OF_DATA] = {
.key = ABORTED_COMMAND,
.asc = 0x0c, /* WRITE ERROR */
.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
},
[TCM_INVALID_CDB_FIELD] = {
.key = ILLEGAL_REQUEST,
.asc = 0x24, /* INVALID FIELD IN CDB */
},
[TCM_INVALID_PARAMETER_LIST] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
},
[TCM_TOO_MANY_TARGET_DESCS] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26,
.ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
},
[TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26,
.ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
},
[TCM_TOO_MANY_SEGMENT_DESCS] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26,
.ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
},
[TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26,
.ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
},
[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
.key = ILLEGAL_REQUEST,
.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
},
[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
.key = ILLEGAL_REQUEST,
.asc = 0x0c, /* WRITE ERROR */
.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
},
[TCM_SERVICE_CRC_ERROR] = {
.key = ABORTED_COMMAND,
.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
.ascq = 0x05, /* N/A */
},
[TCM_SNACK_REJECTED] = {
.key = ABORTED_COMMAND,
.asc = 0x11, /* READ ERROR */
.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
},
[TCM_WRITE_PROTECTED] = {
.key = DATA_PROTECT,
.asc = 0x27, /* WRITE PROTECTED */
},
[TCM_ADDRESS_OUT_OF_RANGE] = {
.key = ILLEGAL_REQUEST,
.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
},
[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
.key = UNIT_ATTENTION,
},
[TCM_MISCOMPARE_VERIFY] = {
.key = MISCOMPARE,
.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
.ascq = 0x00,
.add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
.add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
.add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sense_info = true,
},
[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
.key = COPY_ABORTED,
.asc = 0x0d,
.ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
},
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
* Solaris initiators. Returning NOT READY instead means the
* operations will be retried a finite number of times and we
* can survive intermittent errors.
*/
.key = NOT_READY,
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
},
[TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
/*
* From spc4r22 section5.7.7,5.7.8
* If a PERSISTENT RESERVE OUT command with a REGISTER service action
* or a REGISTER AND IGNORE EXISTING KEY service action or
* REGISTER AND MOVE service actionis attempted,
* but there are insufficient device server resources to complete the
* operation, then the command shall be terminated with CHECK CONDITION
* status, with the sense key set to ILLEGAL REQUEST,and the additonal
* sense code set to INSUFFICIENT REGISTRATION RESOURCES.
*/
.key = ILLEGAL_REQUEST,
.asc = 0x55,
.ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
},
[TCM_INVALID_FIELD_IN_COMMAND_IU] = {
.key = ILLEGAL_REQUEST,
.asc = 0x0e,
.ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
},
[TCM_ALUA_TG_PT_STANDBY] = {
.key = NOT_READY,
.asc = 0x04,
.ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
},
[TCM_ALUA_TG_PT_UNAVAILABLE] = {
.key = NOT_READY,
.asc = 0x04,
.ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
},
[TCM_ALUA_STATE_TRANSITION] = {
.key = NOT_READY,
.asc = 0x04,
.ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
},
[TCM_ALUA_OFFLINE] = {
.key = NOT_READY,
.asc = 0x04,
.ascq = ASCQ_04H_ALUA_OFFLINE,
},
};
/**
* translate_sense_reason - translate a sense reason into T10 key, asc and ascq
* @cmd: SCSI command in which the resulting sense buffer or SCSI status will
* be stored.
* @reason: LIO sense reason code. If this argument has the value
* TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
* dequeuing a unit attention fails due to multiple commands being processed
* concurrently, set the command status to BUSY.
*
* Return: 0 upon success or -EINVAL if the sense buffer is too small.
*/
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
const struct sense_detail *sd;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
sd = &sense_detail_table[r];
else
sd = &sense_detail_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
key = sd->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
&ascq)) {
cmd->scsi_status = SAM_STAT_BUSY;
return;
}
} else {
WARN_ON_ONCE(sd->asc == 0);
asc = sd->asc;
ascq = sd->ascq;
}
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
if (sd->add_sense_info)
WARN_ON_ONCE(scsi_set_sense_information(buffer,
cmd->scsi_sense_length,
cmd->sense_info) < 0);
}
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
sense_reason_t reason, int from_transport)
{
unsigned long flags;
WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!from_transport)
translate_sense_reason(cmd, reason);
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
/**
* target_send_busy - Send SCSI BUSY status back to the initiator
* @cmd: SCSI command for which to send a BUSY reply.
*
* Note: Only call this function if target_submit_cmd*() failed.
*/
int target_send_busy(struct se_cmd *cmd)
{
WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
cmd->scsi_status = SAM_STAT_BUSY;
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(target_send_busy);
static void target_tmr_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
if (cmd->transport_state & CMD_T_ABORTED)
goto aborted;
switch (tmr->function) {
case TMR_ABORT_TASK:
core_tmr_abort_task(dev, tmr, cmd->se_sess);
break;
case TMR_ABORT_TASK_SET:
case TMR_CLEAR_ACA:
case TMR_CLEAR_TASK_SET:
tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
break;
case TMR_LUN_RESET:
ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
TMR_FUNCTION_REJECTED;
if (tmr->response == TMR_FUNCTION_COMPLETE) {
target_dev_ua_allocate(dev, 0x29,
ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
}
break;
case TMR_TARGET_WARM_RESET:
tmr->response = TMR_FUNCTION_REJECTED;
break;
case TMR_TARGET_COLD_RESET:
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
pr_err("Unknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
}
if (cmd->transport_state & CMD_T_ABORTED)
goto aborted;
cmd->se_tfo->queue_tm_rsp(cmd);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
aborted:
target_handle_abort(cmd);
}
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
unsigned long flags;
bool aborted = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_ABORTED) {
aborted = true;
} else {
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE;
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (aborted) {
pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
cmd->se_tmr_req->function,
cmd->se_tmr_req->ref_task_tag, cmd->tag);
target_handle_abort(cmd);
return 0;
}
INIT_WORK(&cmd->work, target_tmr_work);
schedule_work(&cmd->work);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
bool
target_check_wce(struct se_device *dev)
{
bool wce = false;
if (dev->transport->get_write_cache)
wce = dev->transport->get_write_cache(dev);
else if (dev->dev_attrib.emulate_write_cache > 0)
wce = true;
return wce;
}
bool
target_check_fua(struct se_device *dev)
{
return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
}
|
linux-master
|
drivers/target/target_core_transport.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_hba.c
*
* This file contains the TCM HBA Transport related functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
static LIST_HEAD(backend_list);
static DEFINE_MUTEX(backend_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
int transport_backend_register(const struct target_backend_ops *ops)
{
struct target_backend *tb, *old;
tb = kzalloc(sizeof(*tb), GFP_KERNEL);
if (!tb)
return -ENOMEM;
tb->ops = ops;
mutex_lock(&backend_mutex);
list_for_each_entry(old, &backend_list, list) {
if (!strcmp(old->ops->name, ops->name)) {
pr_err("backend %s already registered.\n", ops->name);
mutex_unlock(&backend_mutex);
kfree(tb);
return -EEXIST;
}
}
target_setup_backend_cits(tb);
list_add_tail(&tb->list, &backend_list);
mutex_unlock(&backend_mutex);
pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
ops->name, ops->owner);
return 0;
}
EXPORT_SYMBOL(transport_backend_register);
void target_backend_unregister(const struct target_backend_ops *ops)
{
struct target_backend *tb;
mutex_lock(&backend_mutex);
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
mutex_unlock(&backend_mutex);
/*
* Wait for any outstanding backend driver ->rcu_head
* callbacks to complete post TBO->free_device() ->
* call_rcu(), before allowing backend driver module
* unload of target_backend_ops->owner to proceed.
*/
rcu_barrier();
kfree(tb);
return;
}
}
mutex_unlock(&backend_mutex);
}
EXPORT_SYMBOL(target_backend_unregister);
static struct target_backend *core_get_backend(const char *name)
{
struct target_backend *tb;
mutex_lock(&backend_mutex);
list_for_each_entry(tb, &backend_list, list) {
if (!strcmp(tb->ops->name, name))
goto found;
}
mutex_unlock(&backend_mutex);
return NULL;
found:
if (tb->ops->owner && !try_module_get(tb->ops->owner))
tb = NULL;
mutex_unlock(&backend_mutex);
return tb;
}
struct se_hba *
core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
{
struct se_hba *hba;
int ret = 0;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
hba->backend = core_get_backend(plugin_name);
if (!hba->backend) {
ret = -EINVAL;
goto out_free_hba;
}
ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
spin_lock(&hba_lock);
hba->hba_id = hba_id_counter++;
list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&hba_lock);
pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
out_module_put:
module_put(hba->backend->ops->owner);
hba->backend = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
}
int
core_delete_hba(struct se_hba *hba)
{
WARN_ON(hba->dev_count);
hba->backend->ops->detach_hba(hba);
spin_lock(&hba_lock);
list_del(&hba->hba_node);
spin_unlock(&hba_lock);
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
module_put(hba->backend->ops->owner);
hba->backend = NULL;
kfree(hba);
return 0;
}
bool target_sense_desc_format(struct se_device *dev)
{
return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
}
|
linux-master
|
drivers/target/target_core_hba.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_fabric_configfs.c
*
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
* (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
****************************************************************************/
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
struct config_item_type *cit = &tf->tf_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_ops->module; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
struct config_item_type *cit = &tf->tf_##_name##_cit; \
struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = attrs; \
cit->ct_owner = tf->tf_ops->module; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
static struct configfs_item_operations target_fabric_port_item_ops;
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
bool lun_access_ro;
if (!lun_ci->ci_type ||
lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) {
pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci);
return -EFAULT;
}
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
/*
* Ensure that the source port exists
*/
if (!lun->lun_se_dev) {
pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL;
}
if (lun->lun_shutdown) {
pr_err("Unable to create mappedlun symlink because"
" lun->lun_shutdown=true\n");
return -EINVAL;
}
se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
tpg_ci = &nacl_ci->ci_group->cg_item;
wwn_ci = &tpg_ci->ci_group->cg_item;
tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
/*
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
pr_err("Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
pr_err("Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
}
/*
* If this struct se_node_acl was dynamically generated with
* tpg_1/attrib/generate_node_acls=1, use the existing
* deve->lun_access_ro value, which will be true when
* tpg_1/attrib/demo_mode_write_protect=1
*/
rcu_read_lock();
deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
if (deve)
lun_access_ro = deve->lun_access_ro;
else
lun_access_ro =
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? true : false;
rcu_read_unlock();
/*
* Determine the actual mapped LUN value user wants..
*
* This value is what the SCSI Initiator actually sees the
* $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
static void target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
core_dev_del_initiator_node_lun_acl(lun, lacl);
}
static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
{
return container_of(to_config_group(item), struct se_lun_acl,
se_lun_group);
}
static ssize_t target_fabric_mappedlun_write_protect_show(
struct config_item *item, char *page)
{
struct se_lun_acl *lacl = item_to_lun_acl(item);
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t len = 0;
rcu_read_lock();
deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
if (deve) {
len = sprintf(page, "%d\n", deve->lun_access_ro);
}
rcu_read_unlock();
return len;
}
static ssize_t target_fabric_mappedlun_write_protect_store(
struct config_item *item, const char *page, size_t count)
{
struct se_lun_acl *lacl = item_to_lun_acl(item);
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
unsigned long wp;
int ret;
ret = kstrtoul(page, 0, &wp);
if (ret)
return ret;
if ((wp != 1) && (wp != 0))
return -EINVAL;
/* wp=1 means lun_access_ro=true */
core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %llu Write Protect bit to %s\n",
se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
return count;
}
CONFIGFS_ATTR(target_fabric_mappedlun_, write_protect);
static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
&target_fabric_mappedlun_attr_write_protect,
NULL,
};
static void target_fabric_mappedlun_release(struct config_item *item)
{
struct se_lun_acl *lacl = container_of(to_config_group(item),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
}
static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
.release = target_fabric_mappedlun_release,
.allow_link = target_fabric_mappedlun_link,
.drop_link = target_fabric_mappedlun_unlink,
};
TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
target_fabric_mappedlun_attrs);
/* End of tfc_tpg_mappedlun_cit */
/* Start of tfc_tpg_mappedlun_port_cit */
static struct config_group *target_core_mappedlun_stat_mkdir(
struct config_group *group,
const char *name)
{
return ERR_PTR(-ENOSYS);
}
static void target_core_mappedlun_stat_rmdir(
struct config_group *group,
struct config_item *item)
{
return;
}
static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
.make_group = target_core_mappedlun_stat_mkdir,
.drop_item = target_core_mappedlun_stat_rmdir,
};
TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
NULL);
/* End of tfc_tpg_mappedlun_port_cit */
TF_CIT_SETUP_DRV(tpg_nacl_attrib, NULL, NULL);
TF_CIT_SETUP_DRV(tpg_nacl_auth, NULL, NULL);
TF_CIT_SETUP_DRV(tpg_nacl_param, NULL, NULL);
/* Start of tfc_tpg_nacl_base_cit */
static struct config_group *target_fabric_make_mappedlun(
struct config_group *group,
const char *name)
{
struct se_node_acl *se_nacl = container_of(group,
struct se_node_acl, acl_group);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_lun_acl *lacl = NULL;
char *buf;
unsigned long long mapped_lun;
int ret = 0;
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
/*
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
pr_err("Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
}
/*
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
ret = kstrtoull(buf + 4, 0, &mapped_lun);
if (ret)
goto out;
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
mapped_lun, &ret);
if (!lacl) {
ret = -EINVAL;
goto out;
}
config_group_init_type_name(&lacl->se_lun_group, name,
&tf->tf_tpg_mappedlun_cit);
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
"statistics", &tf->tf_tpg_mappedlun_stat_cit);
configfs_add_default_group(&lacl->ml_stat_grps.stat_group,
&lacl->se_lun_group);
target_stat_setup_mappedlun_default_groups(lacl);
kfree(buf);
return &lacl->se_lun_group;
out:
kfree(lacl);
kfree(buf);
return ERR_PTR(ret);
}
static void target_fabric_drop_mappedlun(
struct config_group *group,
struct config_item *item)
{
struct se_lun_acl *lacl = container_of(to_config_group(item),
struct se_lun_acl, se_lun_group);
configfs_remove_default_groups(&lacl->ml_stat_grps.stat_group);
configfs_remove_default_groups(&lacl->se_lun_group);
config_item_put(item);
}
static void target_fabric_nacl_base_release(struct config_item *item)
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
core_tpg_del_initiator_node_acl(se_nacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
.release = target_fabric_nacl_base_release,
};
static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
.make_group = target_fabric_make_mappedlun,
.drop_item = target_fabric_drop_mappedlun,
};
TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
&target_fabric_nacl_base_group_ops);
/* End of tfc_tpg_nacl_base_cit */
/* Start of tfc_node_fabric_stats_cit */
/*
* This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
* to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
*/
TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
/* End of tfc_wwn_fabric_stats_cit */
/* Start of tfc_tpg_nacl_cit */
static struct config_group *target_fabric_make_nodeacl(
struct config_group *group,
const char *name)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl;
se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
if (IS_ERR(se_nacl))
return ERR_CAST(se_nacl);
config_group_init_type_name(&se_nacl->acl_group, name,
&tf->tf_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
&tf->tf_tpg_nacl_attrib_cit);
configfs_add_default_group(&se_nacl->acl_attrib_group,
&se_nacl->acl_group);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
&tf->tf_tpg_nacl_auth_cit);
configfs_add_default_group(&se_nacl->acl_auth_group,
&se_nacl->acl_group);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
&tf->tf_tpg_nacl_param_cit);
configfs_add_default_group(&se_nacl->acl_param_group,
&se_nacl->acl_group);
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
"fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
&se_nacl->acl_group);
if (tf->tf_ops->fabric_init_nodeacl) {
int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
if (ret) {
configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
core_tpg_del_initiator_node_acl(se_nacl);
return ERR_PTR(ret);
}
}
return &se_nacl->acl_group;
}
static void target_fabric_drop_nodeacl(
struct config_group *group,
struct config_item *item)
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
configfs_remove_default_groups(&se_nacl->acl_group);
/*
* struct se_node_acl free is done in target_fabric_nacl_base_release()
*/
config_item_put(item);
}
static struct configfs_group_operations target_fabric_nacl_group_ops = {
.make_group = target_fabric_make_nodeacl,
.drop_item = target_fabric_drop_nodeacl,
};
TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
/* End of tfc_tpg_nacl_cit */
/* Start of tfc_tpg_np_base_cit */
static void target_fabric_np_base_release(struct config_item *item)
{
struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
struct se_tpg_np, tpg_np_group);
struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
tf->tf_ops->fabric_drop_np(se_tpg_np);
}
static struct configfs_item_operations target_fabric_np_base_item_ops = {
.release = target_fabric_np_base_release,
};
TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL);
/* End of tfc_tpg_np_base_cit */
/* Start of tfc_tpg_np_cit */
static struct config_group *target_fabric_make_np(
struct config_group *group,
const char *name)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_np_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
if (!tf->tf_ops->fabric_make_np) {
pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name);
if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
&tf->tf_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
static void target_fabric_drop_np(
struct config_group *group,
struct config_item *item)
{
/*
* struct se_tpg_np is released via target_fabric_np_base_release()
*/
config_item_put(item);
}
static struct configfs_group_operations target_fabric_np_group_ops = {
.make_group = &target_fabric_make_np,
.drop_item = &target_fabric_drop_np,
};
TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
/* End of tfc_tpg_np_cit */
/* Start of tfc_tpg_port_cit */
static struct se_lun *item_to_lun(struct config_item *item)
{
return container_of(to_config_group(item), struct se_lun,
lun_group);
}
static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item,
char *page)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun, page);
}
static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item,
const char *page, size_t count)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun, page, count);
}
static ssize_t target_fabric_port_alua_tg_pt_offline_show(
struct config_item *item, char *page)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
}
static ssize_t target_fabric_port_alua_tg_pt_offline_store(
struct config_item *item, const char *page, size_t count)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
}
static ssize_t target_fabric_port_alua_tg_pt_status_show(
struct config_item *item, char *page)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
}
static ssize_t target_fabric_port_alua_tg_pt_status_store(
struct config_item *item, const char *page, size_t count)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
}
static ssize_t target_fabric_port_alua_tg_pt_write_md_show(
struct config_item *item, char *page)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
}
static ssize_t target_fabric_port_alua_tg_pt_write_md_store(
struct config_item *item, const char *page, size_t count)
{
struct se_lun *lun = item_to_lun(item);
if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
}
CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_gp);
CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_offline);
CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_status);
CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_write_md);
static struct configfs_attribute *target_fabric_port_attrs[] = {
&target_fabric_port_attr_alua_tg_pt_gp,
&target_fabric_port_attr_alua_tg_pt_offline,
&target_fabric_port_attr_alua_tg_pt_status,
&target_fabric_port_attr_alua_tg_pt_write_md,
NULL,
};
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_portal_group *se_tpg;
struct se_device *dev;
struct target_fabric_configfs *tf;
int ret;
if (!se_dev_ci->ci_type ||
se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) {
pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci);
return -EFAULT;
}
dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
if (!target_dev_configured(dev)) {
pr_err("se_device not configured yet, cannot port link\n");
return -ENODEV;
}
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
pr_err("Port Symlink already exists\n");
return -EEXIST;
}
ret = core_dev_add_lun(se_tpg, dev, lun);
if (ret) {
pr_err("core_dev_add_lun() failed: %d\n", ret);
goto out;
}
if (tf->tf_ops->fabric_post_link) {
/*
* Call the optional fabric_post_link() to allow a
* fabric module to setup any additional state once
* core_dev_add_lun() has been called..
*/
tf->tf_ops->fabric_post_link(se_tpg, lun);
}
return 0;
out:
return ret;
}
static void target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_portal_group *se_tpg = lun->lun_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
if (tf->tf_ops->fabric_pre_unlink) {
/*
* Call the optional fabric_pre_unlink() to allow a
* fabric module to release any additional stat before
* core_dev_del_lun() is called.
*/
tf->tf_ops->fabric_pre_unlink(se_tpg, lun);
}
core_dev_del_lun(se_tpg, lun);
}
static void target_fabric_port_release(struct config_item *item)
{
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
kfree_rcu(lun, rcu_head);
}
static struct configfs_item_operations target_fabric_port_item_ops = {
.release = target_fabric_port_release,
.allow_link = target_fabric_port_link,
.drop_link = target_fabric_port_unlink,
};
TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
/* End of tfc_tpg_port_cit */
/* Start of tfc_tpg_port_stat_cit */
static struct config_group *target_core_port_stat_mkdir(
struct config_group *group,
const char *name)
{
return ERR_PTR(-ENOSYS);
}
static void target_core_port_stat_rmdir(
struct config_group *group,
struct config_item *item)
{
return;
}
static struct configfs_group_operations target_fabric_port_stat_group_ops = {
.make_group = target_core_port_stat_mkdir,
.drop_item = target_core_port_stat_rmdir,
};
TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
/* End of tfc_tpg_port_stat_cit */
/* Start of tfc_tpg_lun_cit */
static struct config_group *target_fabric_make_lun(
struct config_group *group,
const char *name)
{
struct se_lun *lun;
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
unsigned long long unpacked_lun;
int errno;
if (strstr(name, "lun_") != name) {
pr_err("Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
errno = kstrtoull(name + 4, 0, &unpacked_lun);
if (errno)
return ERR_PTR(errno);
lun = core_tpg_alloc_lun(se_tpg, unpacked_lun);
if (IS_ERR(lun))
return ERR_CAST(lun);
config_group_init_type_name(&lun->lun_group, name,
&tf->tf_tpg_port_cit);
config_group_init_type_name(&lun->port_stat_grps.stat_group,
"statistics", &tf->tf_tpg_port_stat_cit);
configfs_add_default_group(&lun->port_stat_grps.stat_group,
&lun->lun_group);
target_stat_setup_port_default_groups(lun);
return &lun->lun_group;
}
static void target_fabric_drop_lun(
struct config_group *group,
struct config_item *item)
{
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
configfs_remove_default_groups(&lun->port_stat_grps.stat_group);
configfs_remove_default_groups(&lun->lun_group);
config_item_put(item);
}
static struct configfs_group_operations target_fabric_lun_group_ops = {
.make_group = &target_fabric_make_lun,
.drop_item = &target_fabric_drop_lun,
};
TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
/* End of tfc_tpg_lun_cit */
TF_CIT_SETUP_DRV(tpg_attrib, NULL, NULL);
TF_CIT_SETUP_DRV(tpg_auth, NULL, NULL);
TF_CIT_SETUP_DRV(tpg_param, NULL, NULL);
/* Start of tfc_tpg_base_cit */
static void target_fabric_tpg_release(struct config_item *item)
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
struct target_fabric_configfs *tf = wwn->wwn_tf;
tf->tf_ops->fabric_drop_tpg(se_tpg);
}
static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
.release = target_fabric_tpg_release,
};
static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item,
char *page)
{
return sysfs_emit(page, "%d\n", to_tpg(item)->enabled);
}
static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item,
const char *page,
size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
int ret;
bool op;
ret = kstrtobool(page, &op);
if (ret)
return ret;
if (se_tpg->enabled == op)
return count;
if (op)
ret = target_tpg_enable(se_tpg);
else
ret = target_tpg_disable(se_tpg);
if (ret)
return ret;
return count;
}
static ssize_t target_fabric_tpg_base_rtpi_show(struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
return sysfs_emit(page, "%#x\n", se_tpg->tpg_rtpi);
}
static ssize_t target_fabric_tpg_base_rtpi_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
u16 val;
int ret;
ret = kstrtou16(page, 0, &val);
if (ret < 0)
return ret;
if (val == 0)
return -EINVAL;
if (se_tpg->enabled) {
pr_info("%s_TPG[%hu] - Can not change RTPI on enabled TPG",
se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
return -EINVAL;
}
se_tpg->tpg_rtpi = val;
se_tpg->rtpi_manual = true;
return count;
}
CONFIGFS_ATTR(target_fabric_tpg_base_, enable);
CONFIGFS_ATTR(target_fabric_tpg_base_, rtpi);
static int
target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf)
{
struct config_item_type *cit = &tf->tf_tpg_base_cit;
struct configfs_attribute **attrs = NULL;
size_t nr_attrs = 0;
int i = 0;
if (tf->tf_ops->tfc_tpg_base_attrs)
while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL)
nr_attrs++;
if (tf->tf_ops->fabric_enable_tpg)
nr_attrs++;
/* + 1 for target_fabric_tpg_base_attr_rtpi */
nr_attrs++;
/* + 1 for final NULL in the array */
attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
if (tf->tf_ops->tfc_tpg_base_attrs)
for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++)
attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i];
if (tf->tf_ops->fabric_enable_tpg)
attrs[i++] = &target_fabric_tpg_base_attr_enable;
attrs[i++] = &target_fabric_tpg_base_attr_rtpi;
cit->ct_item_ops = &target_fabric_tpg_base_item_ops;
cit->ct_attrs = attrs;
cit->ct_owner = tf->tf_ops->module;
pr_debug("Setup generic tpg_base\n");
return 0;
}
/* End of tfc_tpg_base_cit */
/* Start of tfc_tpg_cit */
static struct config_group *target_fabric_make_tpg(
struct config_group *group,
const char *name)
{
struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
if (!tf->tf_ops->fabric_make_tpg) {
pr_err("tf->tf_ops->fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
config_group_init_type_name(&se_tpg->tpg_group, name,
&tf->tf_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
&tf->tf_tpg_lun_cit);
configfs_add_default_group(&se_tpg->tpg_lun_group,
&se_tpg->tpg_group);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
&tf->tf_tpg_np_cit);
configfs_add_default_group(&se_tpg->tpg_np_group,
&se_tpg->tpg_group);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
&tf->tf_tpg_nacl_cit);
configfs_add_default_group(&se_tpg->tpg_acl_group,
&se_tpg->tpg_group);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
&tf->tf_tpg_attrib_cit);
configfs_add_default_group(&se_tpg->tpg_attrib_group,
&se_tpg->tpg_group);
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
&tf->tf_tpg_auth_cit);
configfs_add_default_group(&se_tpg->tpg_auth_group,
&se_tpg->tpg_group);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
&tf->tf_tpg_param_cit);
configfs_add_default_group(&se_tpg->tpg_param_group,
&se_tpg->tpg_group);
return &se_tpg->tpg_group;
}
static void target_fabric_drop_tpg(
struct config_group *group,
struct config_item *item)
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
configfs_remove_default_groups(&se_tpg->tpg_group);
config_item_put(item);
}
static void target_fabric_release_wwn(struct config_item *item)
{
struct se_wwn *wwn = container_of(to_config_group(item),
struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
configfs_remove_default_groups(&wwn->fabric_stat_group);
configfs_remove_default_groups(&wwn->param_group);
tf->tf_ops->fabric_drop_wwn(wwn);
}
static struct configfs_item_operations target_fabric_tpg_item_ops = {
.release = target_fabric_release_wwn,
};
static struct configfs_group_operations target_fabric_tpg_group_ops = {
.make_group = target_fabric_make_tpg,
.drop_item = target_fabric_drop_tpg,
};
TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
NULL);
/* End of tfc_tpg_cit */
/* Start of tfc_wwn_fabric_stats_cit */
/*
* This is used as a placeholder for struct se_wwn->fabric_stat_group
* to allow fabrics access to ->fabric_stat_group->default_groups[]
*/
TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
/* End of tfc_wwn_fabric_stats_cit */
static ssize_t
target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
char *page)
{
struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
param_group);
return sprintf(page, "%d\n",
wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
}
static ssize_t
target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
const char *page, size_t count)
{
struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
param_group);
int compl_val;
if (kstrtoint(page, 0, &compl_val))
return -EINVAL;
switch (compl_val) {
case SE_COMPL_AFFINITY_CPUID:
wwn->cmd_compl_affinity = compl_val;
break;
case SE_COMPL_AFFINITY_CURR_CPU:
wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
break;
default:
if (compl_val < 0 || compl_val >= nr_cpu_ids ||
!cpu_online(compl_val)) {
pr_err("Command completion value must be between %d and %d or an online CPU.\n",
SE_COMPL_AFFINITY_CPUID,
SE_COMPL_AFFINITY_CURR_CPU);
return -EINVAL;
}
wwn->cmd_compl_affinity = compl_val;
}
return count;
}
CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
&target_fabric_wwn_attr_cmd_completion_affinity,
NULL,
};
TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
struct config_group *group,
const char *name)
{
struct target_fabric_configfs *tf = container_of(group,
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
if (!tf->tf_ops->fabric_make_wwn) {
pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops->fabric_make_wwn(tf, group, name);
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
wwn->wwn_tf = tf;
config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
&tf->tf_wwn_fabric_stats_cit);
configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
config_group_init_type_name(&wwn->param_group, "param",
&tf->tf_wwn_param_cit);
configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
if (tf->tf_ops->add_wwn_groups)
tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
}
static void target_fabric_drop_wwn(
struct config_group *group,
struct config_item *item)
{
struct se_wwn *wwn = container_of(to_config_group(item),
struct se_wwn, wwn_group);
configfs_remove_default_groups(&wwn->wwn_group);
config_item_put(item);
}
static struct configfs_group_operations target_fabric_wwn_group_ops = {
.make_group = target_fabric_make_wwn,
.drop_item = target_fabric_drop_wwn,
};
TF_CIT_SETUP_DRV(wwn, NULL, &target_fabric_wwn_group_ops);
TF_CIT_SETUP_DRV(discovery, NULL, NULL);
int target_fabric_setup_cits(struct target_fabric_configfs *tf)
{
int ret;
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_wwn_fabric_stats_cit(tf);
target_fabric_setup_wwn_param_cit(tf);
target_fabric_setup_tpg_cit(tf);
ret = target_fabric_setup_tpg_base_cit(tf);
if (ret)
return ret;
target_fabric_setup_tpg_port_cit(tf);
target_fabric_setup_tpg_port_stat_cit(tf);
target_fabric_setup_tpg_lun_cit(tf);
target_fabric_setup_tpg_np_cit(tf);
target_fabric_setup_tpg_np_base_cit(tf);
target_fabric_setup_tpg_attrib_cit(tf);
target_fabric_setup_tpg_auth_cit(tf);
target_fabric_setup_tpg_param_cit(tf);
target_fabric_setup_tpg_nacl_cit(tf);
target_fabric_setup_tpg_nacl_base_cit(tf);
target_fabric_setup_tpg_nacl_attrib_cit(tf);
target_fabric_setup_tpg_nacl_auth_cit(tf);
target_fabric_setup_tpg_nacl_param_cit(tf);
target_fabric_setup_tpg_nacl_stat_cit(tf);
target_fabric_setup_tpg_mappedlun_cit(tf);
target_fabric_setup_tpg_mappedlun_stat_cit(tf);
return 0;
}
|
linux-master
|
drivers/target/target_core_fabric_configfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Shaohua Li <[email protected]>
* Copyright (C) 2014 Red Hat, Inc.
* Copyright (C) 2015 Arrikto, Inc.
* Copyright (C) 2017 Chinamobile, Inc.
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
#include <linux/xarray.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
#include <linux/configfs.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
#include <linux/target_core_user.h>
/**
* DOC: Userspace I/O
* Userspace I/O
* -------------
*
* Define a shared-memory interface for LIO to pass SCSI commands and
* data to userspace for processing. This is to allow backends that
* are too complex for in-kernel support to be possible.
*
* It uses the UIO framework to do a lot of the device-creation and
* introspection work for us.
*
* See the .h file for how the ring is laid out. Note that while the
* command ring is defined, the particulars of the data area are
* not. Offset values in the command entry point to other locations
* internal to the mmap-ed area. There is separate space outside the
* command ring for data buffers. This leaves maximum flexibility for
* moving buffer allocations, or even page flipping or other
* allocation techniques, without altering the command ring layout.
*
* SECURITY:
* The user process must be assumed to be malicious. There's no way to
* prevent it breaking the command ring protocol if it wants, but in
* order to prevent other issues we must only ever read *data* from
* the shared memory area, not offsets or sizes. This applies to
* command ring entries as well as the mailbox. Extra code needed for
* this may have a 'UAM' comment.
*/
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
/* For mailbox plus cmd ring, the size is fixed 8MB */
#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024)
/* Offset of cmd ring is size of mailbox */
#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox))
#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
/*
* For data area, the default block size is PAGE_SIZE and
* the default total size is 256K * PAGE_SIZE.
*/
#define DATA_PAGES_PER_BLK_DEF 1
#define DATA_AREA_PAGES_DEF (256 * 1024)
#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
*/
#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
static u8 tcmu_netlink_blocked;
static struct device *tcmu_root_device;
struct tcmu_hba {
u32 host_id;
};
#define TCMU_CONFIG_LEN 256
static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
static LIST_HEAD(tcmu_nl_cmd_list);
struct tcmu_dev;
struct tcmu_nl_cmd {
/* wake up thread waiting for reply */
struct completion complete;
struct list_head nl_list;
struct tcmu_dev *udev;
int cmd;
int status;
};
struct tcmu_dev {
struct list_head node;
struct kref kref;
struct se_device se_dev;
struct se_dev_plug se_plug;
char *name;
struct se_hba *hba;
#define TCMU_DEV_BIT_OPEN 0
#define TCMU_DEV_BIT_BROKEN 1
#define TCMU_DEV_BIT_BLOCKED 2
#define TCMU_DEV_BIT_TMR_NOTIFY 3
#define TCMU_DEV_BIT_PLUGGED 4
unsigned long flags;
struct uio_info uio_info;
struct inode *inode;
uint64_t dev_size;
struct tcmu_mailbox *mb_addr;
void *cmdr;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
int data_area_mb;
uint32_t max_blocks;
size_t mmap_pages;
struct mutex cmdr_lock;
struct list_head qfull_queue;
struct list_head tmr_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
unsigned long *data_bitmap;
struct xarray data_pages;
uint32_t data_pages_per_blk;
uint32_t data_blk_size;
struct xarray commands;
struct timer_list cmd_timer;
unsigned int cmd_time_out;
struct list_head inflight_queue;
struct timer_list qfull_timer;
int qfull_time_out;
struct list_head timedout_entry;
struct tcmu_nl_cmd curr_nl_cmd;
char dev_config[TCMU_CONFIG_LEN];
int nl_reply_supported;
};
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
struct list_head queue_entry;
uint16_t cmd_id;
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
uint32_t dbi_cnt;
uint32_t dbi_bidi_cnt;
uint32_t dbi_cur;
uint32_t *dbi;
uint32_t data_len_bidi;
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
#define TCMU_CMD_BIT_KEEP_BUF 1
unsigned long flags;
};
struct tcmu_tmr {
struct list_head queue_entry;
uint8_t tmr_type;
uint32_t tmr_cmd_cnt;
int16_t tmr_cmd_ids[];
};
/*
* To avoid dead lock the mutex lock order should always be:
*
* mutex_lock(&root_udev_mutex);
* ...
* mutex_lock(&tcmu_dev->cmdr_lock);
* mutex_unlock(&tcmu_dev->cmdr_lock);
* ...
* mutex_unlock(&root_udev_mutex);
*/
static DEFINE_MUTEX(root_udev_mutex);
static LIST_HEAD(root_udev);
static DEFINE_SPINLOCK(timed_out_udevs_lock);
static LIST_HEAD(timed_out_udevs);
static struct kmem_cache *tcmu_cmd_cache;
static atomic_t global_page_count = ATOMIC_INIT(0);
static struct delayed_work tcmu_unmap_work;
static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
static int tcmu_set_global_max_data_area(const char *str,
const struct kernel_param *kp)
{
int ret, max_area_mb;
ret = kstrtoint(str, 10, &max_area_mb);
if (ret)
return -EINVAL;
if (max_area_mb <= 0) {
pr_err("global_max_data_area must be larger than 0.\n");
return -EINVAL;
}
tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, 0);
else
cancel_delayed_work_sync(&tcmu_unmap_work);
return 0;
}
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
.set = tcmu_set_global_max_data_area,
.get = tcmu_get_global_max_data_area,
};
module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(global_max_data_area_mb,
"Max MBs allowed to be allocated to all the tcmu device's "
"data areas.");
static int tcmu_get_block_netlink(char *buffer,
const struct kernel_param *kp)
{
return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
"blocked" : "unblocked");
}
static int tcmu_set_block_netlink(const char *str,
const struct kernel_param *kp)
{
int ret;
u8 val;
ret = kstrtou8(str, 0, &val);
if (ret < 0)
return ret;
if (val > 1) {
pr_err("Invalid block netlink value %u\n", val);
return -EINVAL;
}
tcmu_netlink_blocked = val;
return 0;
}
static const struct kernel_param_ops tcmu_block_netlink_op = {
.set = tcmu_set_block_netlink,
.get = tcmu_get_block_netlink,
};
module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
{
struct tcmu_dev *udev = nl_cmd->udev;
if (!tcmu_netlink_blocked) {
pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
return -EBUSY;
}
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
nl_cmd->status = -EINTR;
list_del(&nl_cmd->nl_list);
complete(&nl_cmd->complete);
}
return 0;
}
static int tcmu_set_reset_netlink(const char *str,
const struct kernel_param *kp)
{
struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
int ret;
u8 val;
ret = kstrtou8(str, 0, &val);
if (ret < 0)
return ret;
if (val != 1) {
pr_err("Invalid reset netlink value %u\n", val);
return -EINVAL;
}
mutex_lock(&tcmu_nl_cmd_mutex);
list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
ret = tcmu_fail_netlink_cmd(nl_cmd);
if (ret)
break;
}
mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
static const struct kernel_param_ops tcmu_reset_netlink_op = {
.set = tcmu_set_reset_netlink,
};
module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
};
static const struct genl_multicast_group tcmu_mcgrps[] = {
[TCMU_MCGRP_CONFIG] = { .name = "config", },
};
static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
[TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
[TCMU_ATTR_MINOR] = { .type = NLA_U32 },
[TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
[TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
[TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
};
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
struct tcmu_dev *udev = NULL;
struct tcmu_nl_cmd *nl_cmd;
int dev_id, rc, ret = 0;
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
return -EINVAL;
}
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
mutex_lock(&tcmu_nl_cmd_mutex);
list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
if (nl_cmd->udev->se_dev.dev_index == dev_id) {
udev = nl_cmd->udev;
break;
}
}
if (!udev) {
pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
completed_cmd, rc, dev_id);
ret = -ENODEV;
goto unlock;
}
list_del(&nl_cmd->nl_list);
pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
nl_cmd->status);
if (nl_cmd->cmd != completed_cmd) {
pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
udev->name, completed_cmd, nl_cmd->cmd);
ret = -EINVAL;
goto unlock;
}
nl_cmd->status = rc;
complete(&nl_cmd->complete);
unlock:
mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
{
return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
}
static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
{
return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
}
static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
struct genl_info *info)
{
return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
}
static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
{
if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
tcmu_kern_cmd_reply_supported =
nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
tcmu_kern_cmd_reply_supported);
}
return 0;
}
static const struct genl_small_ops tcmu_genl_ops[] = {
{
.cmd = TCMU_CMD_SET_FEATURES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = tcmu_genl_set_features,
},
{
.cmd = TCMU_CMD_ADDED_DEVICE_DONE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = tcmu_genl_add_dev_done,
},
{
.cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = tcmu_genl_rm_dev_done,
},
{
.cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = tcmu_genl_reconfig_dev_done,
},
};
/* Our generic netlink family */
static struct genl_family tcmu_genl_family __ro_after_init = {
.module = THIS_MODULE,
.hdrsize = 0,
.name = "TCM-USER",
.version = 2,
.maxattr = TCMU_ATTR_MAX,
.policy = tcmu_attr_policy,
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
.small_ops = tcmu_genl_ops,
.n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
.resv_start_op = TCMU_CMD_SET_FEATURES + 1,
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
uint32_t i;
for (i = 0; i < len; i++)
clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
}
static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
struct tcmu_cmd *tcmu_cmd,
int prev_dbi, int length, int *iov_cnt)
{
XA_STATE(xas, &udev->data_pages, 0);
struct page *page;
int i, cnt, dbi, dpi;
int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
return -1;
dpi = dbi * udev->data_pages_per_blk;
/* Count the number of already allocated pages */
xas_set(&xas, dpi);
rcu_read_lock();
for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
cnt++;
rcu_read_unlock();
for (i = cnt; i < page_cnt; i++) {
/* try to get new zeroed page from the mm */
page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!page)
break;
if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
__free_page(page);
break;
}
}
if (atomic_add_return(i - cnt, &global_page_count) >
tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, 0);
if (i && dbi > udev->dbi_max)
udev->dbi_max = dbi;
set_bit(dbi, udev->data_bitmap);
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
if (dbi != prev_dbi + 1)
*iov_cnt += 1;
return i == page_cnt ? dbi : -1;
}
static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
struct tcmu_cmd *tcmu_cmd, int length)
{
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
int blk_data_len, iov_cnt = 0;
uint32_t blk_size = udev->data_blk_size;
for (; length > 0; length -= blk_size) {
blk_data_len = min_t(uint32_t, length, blk_size);
dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
&iov_cnt);
if (dbi < 0)
return -1;
}
return iov_cnt;
}
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
kfree(tcmu_cmd->dbi);
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
int i, len;
struct se_cmd *se_cmd = cmd->se_cmd;
uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
len += se_cmd->t_bidi_data_sg[i].length;
cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
cmd->dbi_cnt += cmd->dbi_bidi_cnt;
cmd->data_len_bidi = len;
}
}
static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
struct iovec **iov, int prev_dbi, int len)
{
/* Get the next dbi */
int dbi = tcmu_cmd_get_dbi(cmd);
/* Do not add more than udev->data_blk_size to iov */
len = min_t(int, len, udev->data_blk_size);
/*
* The following code will gather and map the blocks to the same iovec
* when the blocks are all next to each other.
*/
if (dbi != prev_dbi + 1) {
/* dbi is not next to previous dbi, so start new iov */
if (prev_dbi >= 0)
(*iov)++;
/* write offset relative to mb_addr */
(*iov)->iov_base = (void __user *)
(udev->data_off + dbi * udev->data_blk_size);
}
(*iov)->iov_len += len;
return dbi;
}
static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
struct iovec **iov, int data_length)
{
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
/* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
for (; data_length > 0; data_length -= udev->data_blk_size)
dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
if (!tcmu_cmd)
return NULL;
INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
tcmu_cmd_set_block_cnts(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
GFP_NOIO);
if (!tcmu_cmd->dbi) {
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
return NULL;
}
return tcmu_cmd;
}
static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
{
unsigned long offset = offset_in_page(vaddr);
void *start = vaddr - offset;
size = round_up(size+offset, PAGE_SIZE);
while (size) {
flush_dcache_page(vmalloc_to_page(start));
start += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
/*
* Some ring helper functions. We don't assume size is a power of 2 so
* we can't use circ_buf.h.
*/
static inline size_t spc_used(size_t head, size_t tail, size_t size)
{
int diff = head - tail;
if (diff >= 0)
return diff;
else
return size + diff;
}
static inline size_t spc_free(size_t head, size_t tail, size_t size)
{
/* Keep 1 byte unused or we can't tell full from empty */
return (size - spc_used(head, tail, size) - 1);
}
static inline size_t head_to_end(size_t head, size_t size)
{
return size - head;
}
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
#define TCMU_SG_TO_DATA_AREA 1
#define TCMU_DATA_AREA_TO_SG 2
static inline void tcmu_copy_data(struct tcmu_dev *udev,
struct tcmu_cmd *tcmu_cmd, uint32_t direction,
struct scatterlist *sg, unsigned int sg_nents,
struct iovec **iov, size_t data_len)
{
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
size_t page_remaining, cp_len;
int page_cnt, page_inx, dpi;
struct sg_mapping_iter sg_iter;
unsigned int sg_flags;
struct page *page;
void *data_page_start, *data_addr;
if (direction == TCMU_SG_TO_DATA_AREA)
sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
else
sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
while (data_len) {
if (direction == TCMU_SG_TO_DATA_AREA)
dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
data_len);
else
dbi = tcmu_cmd_get_dbi(tcmu_cmd);
page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
if (page_cnt > udev->data_pages_per_blk)
page_cnt = udev->data_pages_per_blk;
dpi = dbi * udev->data_pages_per_blk;
for (page_inx = 0; page_inx < page_cnt && data_len;
page_inx++, dpi++) {
page = xa_load(&udev->data_pages, dpi);
if (direction == TCMU_DATA_AREA_TO_SG)
flush_dcache_page(page);
data_page_start = kmap_atomic(page);
page_remaining = PAGE_SIZE;
while (page_remaining && data_len) {
if (!sg_miter_next(&sg_iter)) {
/* set length to 0 to abort outer loop */
data_len = 0;
pr_debug("%s: aborting data copy due to exhausted sg_list\n",
__func__);
break;
}
cp_len = min3(sg_iter.length, page_remaining,
data_len);
data_addr = data_page_start +
PAGE_SIZE - page_remaining;
if (direction == TCMU_SG_TO_DATA_AREA)
memcpy(data_addr, sg_iter.addr, cp_len);
else
memcpy(sg_iter.addr, data_addr, cp_len);
data_len -= cp_len;
page_remaining -= cp_len;
sg_iter.consumed = cp_len;
}
sg_miter_stop(&sg_iter);
kunmap_atomic(data_page_start);
if (direction == TCMU_SG_TO_DATA_AREA)
flush_dcache_page(page);
}
}
}
static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
struct iovec **iov)
{
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
se_cmd->t_data_nents, iov, se_cmd->data_length);
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
bool bidi, uint32_t read_len)
{
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
struct scatterlist *data_sg;
unsigned int data_nents;
if (!bidi) {
data_sg = se_cmd->t_data_sg;
data_nents = se_cmd->t_data_nents;
} else {
/*
* For bidi case, the first count blocks are for Data-Out
* buffer blocks, and before gathering the Data-In buffer
* the Data-Out buffer blocks should be skipped.
*/
tcmu_cmd_set_dbi_cur(tcmu_cmd,
tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
}
tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
data_nents, NULL, read_len);
}
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
{
return thresh - bitmap_weight(bitmap, thresh);
}
/*
* We can't queue a command until we have space available on the cmd ring.
*
* Called with ring lock held.
*/
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
{
struct tcmu_mailbox *mb = udev->mb_addr;
size_t space, cmd_needed;
u32 cmd_head;
tcmu_flush_dcache_range(mb, sizeof(*mb));
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
/*
* If cmd end-of-ring space is too small then we need space for a NOP plus
* original cmd - cmds are internally contiguous.
*/
if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
cmd_needed = cmd_size;
else
cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
if (space < cmd_needed) {
pr_debug("no cmd space: %u %u %u\n", cmd_head,
udev->cmdr_last_cleaned, udev->cmdr_size);
return false;
}
return true;
}
/*
* We have to allocate data buffers before we can queue a command.
* Returns -1 on error (not enough space) or number of needed iovs on success
*
* Called with ring lock held.
*/
static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
int *iov_bidi_cnt)
{
int space, iov_cnt = 0, ret = 0;
if (!cmd->dbi_cnt)
goto wr_iov_cnts;
/* try to check and get the data blocks as needed */
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
if (space < cmd->dbi_cnt) {
unsigned long blocks_left =
(udev->max_blocks - udev->dbi_thresh) + space;
if (blocks_left < cmd->dbi_cnt) {
pr_debug("no data space: only %lu available, but ask for %u\n",
blocks_left * udev->data_blk_size,
cmd->dbi_cnt * udev->data_blk_size);
return -1;
}
udev->dbi_thresh += cmd->dbi_cnt;
if (udev->dbi_thresh > udev->max_blocks)
udev->dbi_thresh = udev->max_blocks;
}
iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
if (iov_cnt < 0)
return -1;
if (cmd->dbi_bidi_cnt) {
ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
if (ret < 0)
return -1;
}
wr_iov_cnts:
*iov_bidi_cnt = ret;
return iov_cnt + ret;
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
{
return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
sizeof(struct tcmu_cmd_entry));
}
static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
size_t base_command_size)
{
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
size_t command_size;
command_size = base_command_size +
round_up(scsi_command_size(se_cmd->t_task_cdb),
TCMU_OP_ALIGN_SIZE);
WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
return command_size;
}
static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
struct timer_list *timer)
{
if (!tmo)
return;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
}
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
/*
* For backwards compat if qfull_time_out is not set use
* cmd_time_out and if that's not set use the default time out.
*/
if (!udev->qfull_time_out)
return -ETIMEDOUT;
else if (udev->qfull_time_out > 0)
tmo = udev->qfull_time_out;
else if (udev->cmd_time_out)
tmo = udev->cmd_time_out;
else
tmo = TCMU_TIME_OUT;
tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
tcmu_cmd, udev->name);
return 0;
}
static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
{
struct tcmu_cmd_entry_hdr *hdr;
struct tcmu_mailbox *mb = udev->mb_addr;
uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
/* Insert a PAD if end-of-ring space is too small */
if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
hdr = udev->cmdr + cmd_head;
tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
tcmu_hdr_set_len(&hdr->len_op, pad_size);
hdr->cmd_id = 0; /* not used for PAD */
hdr->kflags = 0;
hdr->uflags = 0;
tcmu_flush_dcache_range(hdr, sizeof(*hdr));
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
WARN_ON(cmd_head != 0);
}
return cmd_head;
}
static void tcmu_unplug_device(struct se_dev_plug *se_plug)
{
struct se_device *se_dev = se_plug->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
uio_event_notify(&udev->uio_info);
}
static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
{
struct tcmu_dev *udev = TCMU_DEV(se_dev);
if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
return &udev->se_plug;
return NULL;
}
/**
* queue_cmd_ring - queue cmd to ring or internally
* @tcmu_cmd: cmd to queue
* @scsi_err: TCM error code if failure (-1) returned.
*
* Returns:
* -1 we cannot queue internally or to the ring.
* 0 success
* 1 internally queued to wait for ring memory to free.
*/
static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
size_t base_command_size, command_size;
struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
int iov_cnt, iov_bidi_cnt;
uint32_t cmd_id, cmd_head;
uint64_t cdb_off;
uint32_t blk_size = udev->data_blk_size;
/* size of data buffer needed */
size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
*scsi_err = TCM_NO_SENSE;
if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
*scsi_err = TCM_LUN_BUSY;
return -1;
}
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
*scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -1;
}
if (!list_empty(&udev->qfull_queue))
goto queue;
if (data_length > (size_t)udev->max_blocks * blk_size) {
pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
data_length, (size_t)udev->max_blocks * blk_size);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
if (iov_cnt < 0)
goto free_and_queue;
/*
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
*/
base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
if (command_size > (udev->cmdr_size / 2)) {
pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
command_size, udev->cmdr_size);
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
if (!is_ring_space_avail(udev, command_size))
/*
* Don't leave commands partially setup because the unmap
* thread might need the blocks to make forward progress.
*/
goto free_and_queue;
if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
GFP_NOWAIT) < 0) {
pr_err("tcmu: Could not allocate cmd id.\n");
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
tcmu_cmd->cmd_id = cmd_id;
pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
tcmu_cmd, udev->name);
cmd_head = ring_insert_padding(udev, command_size);
entry = udev->cmdr + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
/* prepare iov list and copy data to data area if necessary */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
iov = &entry->req.iov[0];
if (se_cmd->data_direction == DMA_TO_DEVICE ||
se_cmd->se_cmd_flags & SCF_BIDI)
scatter_data_area(udev, tcmu_cmd, &iov);
else
tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
/* Handle BIDI commands */
if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov++;
tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
entry->req.iov_bidi_cnt = iov_bidi_cnt;
}
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
/* All offsets relative to mb_addr, not start of entry! */
cdb_off = CMDR_OFF + cmd_head + base_command_size;
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
entry->req.cdb_off = cdb_off;
tcmu_flush_dcache_range(entry, command_size);
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
uio_event_notify(&udev->uio_info);
return 0;
free_and_queue:
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
queue:
if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
return 1;
}
/**
* queue_tmr_ring - queue tmr info to ring or internally
* @udev: related tcmu_dev
* @tmr: tcmu_tmr containing tmr info to queue
*
* Returns:
* 0 success
* 1 internally queued to wait for ring memory to free.
*/
static int
queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
{
struct tcmu_tmr_entry *entry;
int cmd_size;
int id_list_sz;
struct tcmu_mailbox *mb = udev->mb_addr;
uint32_t cmd_head;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
goto out_free;
id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
if (!list_empty(&udev->tmr_queue) ||
!is_ring_space_avail(udev, cmd_size)) {
list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
tmr, udev->name);
return 1;
}
cmd_head = ring_insert_padding(udev, cmd_size);
entry = udev->cmdr + cmd_head;
memset(entry, 0, cmd_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
entry->tmr_type = tmr->tmr_type;
entry->cmd_cnt = tmr->tmr_cmd_cnt;
memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
tcmu_flush_dcache_range(entry, cmd_size);
UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
uio_event_notify(&udev->uio_info);
out_free:
kfree(tmr);
return 0;
}
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
int ret = -1;
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock);
if (!(se_cmd->transport_state & CMD_T_ABORTED))
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
else
se_cmd->priv = tcmu_cmd;
mutex_unlock(&udev->cmdr_lock);
return scsi_ret;
}
static void tcmu_set_next_deadline(struct list_head *queue,
struct timer_list *timer)
{
struct tcmu_cmd *cmd;
if (!list_empty(queue)) {
cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
mod_timer(timer, cmd->deadline);
} else
del_timer(timer);
}
static int
tcmu_tmr_type(enum tcm_tmreq_table tmf)
{
switch (tmf) {
case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
default: return TCMU_TMR_UNKNOWN;
}
}
static void
tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
struct list_head *cmd_list)
{
int i = 0, cmd_cnt = 0;
bool unqueued = false;
struct tcmu_cmd *cmd;
struct se_cmd *se_cmd;
struct tcmu_tmr *tmr;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
mutex_lock(&udev->cmdr_lock);
/* First we check for aborted commands in qfull_queue */
list_for_each_entry(se_cmd, cmd_list, state_list) {
i++;
if (!se_cmd->priv)
continue;
cmd = se_cmd->priv;
/* Commands on qfull queue have no id yet */
if (cmd->cmd_id) {
cmd_cnt++;
continue;
}
pr_debug("Removing aborted command %p from queue on dev %s.\n",
cmd, udev->name);
list_del_init(&cmd->queue_entry);
tcmu_free_cmd(cmd);
se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
unqueued = true;
}
if (unqueued)
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
goto unlock;
pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO);
if (!tmr)
goto unlock;
tmr->tmr_type = tcmu_tmr_type(tmf);
tmr->tmr_cmd_cnt = cmd_cnt;
if (cmd_cnt != 0) {
cmd_cnt = 0;
list_for_each_entry(se_cmd, cmd_list, state_list) {
if (!se_cmd->priv)
continue;
cmd = se_cmd->priv;
if (cmd->cmd_id)
tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
}
}
queue_tmr_ring(udev, tmr);
unlock:
mutex_unlock(&udev->cmdr_lock);
}
static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
struct tcmu_cmd_entry *entry, bool keep_buf)
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
bool ret = true;
uint32_t read_len;
/*
* cmd has been completed already from timeout, just reclaim
* data area space and free cmd
*/
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON_ONCE(se_cmd);
goto out;
}
if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
entry->hdr.cmd_id);
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
ret = false;
goto out;
}
list_del_init(&cmd->queue_entry);
tcmu_cmd_reset_dbi_cur(cmd);
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
goto done;
}
read_len = se_cmd->data_length;
if (se_cmd->data_direction == DMA_FROM_DEVICE &&
(entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
read_len_valid = true;
if (entry->rsp.read_len < read_len)
read_len = entry->rsp.read_len;
}
if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
if (!read_len_valid )
goto done;
else
se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
}
if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */
gather_data_area(udev, cmd, true, read_len);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
gather_data_area(udev, cmd, false, read_len);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
/* TODO: */
} else if (se_cmd->data_direction != DMA_NONE) {
pr_warn("TCMU: data direction was %d!\n",
se_cmd->data_direction);
}
done:
se_cmd->priv = NULL;
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
entry->rsp.scsi_status, read_len);
} else
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
if (!keep_buf) {
tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
tcmu_free_cmd(cmd);
} else {
/*
* Keep this command after completion, since userspace still
* needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
* and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
* a second completion later.
* Userspace can free the buffer later by writing the cmd_id
* to new action attribute free_kept_buf.
*/
clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
}
return ret;
}
static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
{
struct tcmu_tmr *tmr, *tmp;
LIST_HEAD(tmrs);
if (list_empty(&udev->tmr_queue))
return 1;
pr_debug("running %s's tmr queue\n", udev->name);
list_splice_init(&udev->tmr_queue, &tmrs);
list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
list_del_init(&tmr->queue_entry);
pr_debug("removing tmr %p on dev %s from queue\n",
tmr, udev->name);
if (queue_tmr_ring(udev, tmr)) {
pr_debug("ran out of space during tmr queue run\n");
/*
* tmr was requeued, so just put all tmrs back in
* the queue
*/
list_splice_tail(&tmrs, &udev->tmr_queue);
return 0;
}
}
return 1;
}
static bool tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
bool free_space = false;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
pr_err("ring broken, not handling completions\n");
return false;
}
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
bool keep_buf;
/*
* Flush max. up to end of cmd ring since current entry might
* be a padding that is shorter than sizeof(*entry)
*/
size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
udev->cmdr_size);
tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
ring_left : sizeof(*entry));
free_space = true;
if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
UPDATE_HEAD(udev->cmdr_last_cleaned,
tcmu_hdr_get_len(entry->hdr.len_op),
udev->cmdr_size);
continue;
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
if (keep_buf)
cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
else
cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
return false;
}
if (!tcmu_handle_completion(cmd, entry, keep_buf))
break;
UPDATE_HEAD(udev->cmdr_last_cleaned,
tcmu_hdr_get_len(entry->hdr.len_op),
udev->cmdr_size);
}
if (free_space)
free_space = tcmu_run_tmr_queue(udev);
if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
/*
* Allocated blocks exceeded global block limit, currently no
* more pending or waiting commands so try to reclaim blocks.
*/
schedule_delayed_work(&tcmu_unmap_work, 0);
}
if (udev->cmd_time_out)
tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
return free_space;
}
static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
struct se_cmd *se_cmd;
if (!time_after_eq(jiffies, cmd->deadline))
return;
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
se_cmd->priv = NULL;
cmd->se_cmd = NULL;
pr_debug("Timing out inflight cmd %u on dev %s.\n",
cmd->cmd_id, cmd->tcmu_dev->name);
target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
}
static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
{
struct se_cmd *se_cmd;
if (!time_after_eq(jiffies, cmd->deadline))
return;
pr_debug("Timing out queued cmd %p on dev %s.\n",
cmd, cmd->tcmu_dev->name);
list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd);
se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
static void tcmu_device_timedout(struct tcmu_dev *udev)
{
spin_lock(&timed_out_udevs_lock);
if (list_empty(&udev->timedout_entry))
list_add_tail(&udev->timedout_entry, &timed_out_udevs);
spin_unlock(&timed_out_udevs_lock);
schedule_delayed_work(&tcmu_unmap_work, 0);
}
static void tcmu_cmd_timedout(struct timer_list *t)
{
struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
pr_debug("%s cmd timeout has expired\n", udev->name);
tcmu_device_timedout(udev);
}
static void tcmu_qfull_timedout(struct timer_list *t)
{
struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
pr_debug("%s qfull timeout has expired\n", udev->name);
tcmu_device_timedout(udev);
}
static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
{
struct tcmu_hba *tcmu_hba;
tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
if (!tcmu_hba)
return -ENOMEM;
tcmu_hba->host_id = host_id;
hba->hba_ptr = tcmu_hba;
return 0;
}
static void tcmu_detach_hba(struct se_hba *hba)
{
kfree(hba->hba_ptr);
hba->hba_ptr = NULL;
}
static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
{
struct tcmu_dev *udev;
udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
if (!udev)
return NULL;
kref_init(&udev->kref);
udev->name = kstrdup(name, GFP_KERNEL);
if (!udev->name) {
kfree(udev);
return NULL;
}
udev->hba = hba;
udev->cmd_time_out = TCMU_TIME_OUT;
udev->qfull_time_out = -1;
udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
udev->cmdr_size = CMDR_SIZE_DEF;
udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
mutex_init(&udev->cmdr_lock);
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->qfull_queue);
INIT_LIST_HEAD(&udev->tmr_queue);
INIT_LIST_HEAD(&udev->inflight_queue);
xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
xa_init(&udev->data_pages);
return &udev->se_dev;
}
static void tcmu_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct tcmu_dev *udev = TCMU_DEV(dev);
kfree(udev->uio_info.name);
kfree(udev->name);
kfree(udev);
}
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
{
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
kmem_cache_free(tcmu_cmd_cache, cmd);
return 0;
}
return -EINVAL;
}
static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
unsigned long last)
{
struct page *page;
unsigned long dpi;
u32 pages_freed = 0;
first = first * udev->data_pages_per_blk;
last = (last + 1) * udev->data_pages_per_blk - 1;
xa_for_each_range(&udev->data_pages, dpi, page, first, last) {
xa_erase(&udev->data_pages, dpi);
/*
* While reaching here there may be page faults occurring on
* the to-be-released pages. A race condition may occur if
* unmap_mapping_range() is called before page faults on these
* pages have completed; a valid but stale map is created.
*
* If another command subsequently runs and needs to extend
* dbi_thresh, it may reuse the slot corresponding to the
* previous page in data_bitmap. Though we will allocate a new
* page for the slot in data_area, no page fault will happen
* because we have a valid map. Therefore the command's data
* will be lost.
*
* We lock and unlock pages that are to be released to ensure
* all page faults have completed. This way
* unmap_mapping_range() can ensure stale maps are cleanly
* removed.
*/
lock_page(page);
unlock_page(page);
__free_page(page);
pages_freed++;
}
atomic_sub(pages_freed, &global_page_count);
return pages_freed;
}
static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
{
struct tcmu_tmr *tmr, *tmp;
list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
list_del_init(&tmr->queue_entry);
kfree(tmr);
}
}
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
struct tcmu_cmd *cmd;
bool all_expired = true;
unsigned long i;
vfree(udev->mb_addr);
udev->mb_addr = NULL;
spin_lock_bh(&timed_out_udevs_lock);
if (!list_empty(&udev->timedout_entry))
list_del(&udev->timedout_entry);
spin_unlock_bh(&timed_out_udevs_lock);
/* Upper layer should drain all requests before calling this */
mutex_lock(&udev->cmdr_lock);
xa_for_each(&udev->commands, i, cmd) {
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
/* There can be left over TMR cmds. Remove them. */
tcmu_remove_all_queued_tmr(udev);
if (!list_empty(&udev->qfull_queue))
all_expired = false;
xa_destroy(&udev->commands);
WARN_ON(!all_expired);
tcmu_blocks_release(udev, 0, udev->dbi_max);
bitmap_free(udev->data_bitmap);
mutex_unlock(&udev->cmdr_lock);
pr_debug("dev_kref_release\n");
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->qfull_queue))
return;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
list_splice_init(&udev->qfull_queue, &cmds);
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
pr_debug("removing cmd %p on dev %s from queue\n",
tcmu_cmd, udev->name);
if (fail) {
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
* was only temporarily down. If the device is being
* removed then LIO core will do the right thing and
* fail the retry.
*/
tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd);
continue;
}
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) {
pr_debug("cmd %p on dev %s failed with %u\n",
tcmu_cmd, udev->name, scsi_ret);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
*/
tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd);
} else if (ret > 0) {
pr_debug("ran out of space during cmdr queue run\n");
/*
* cmd was requeued, so just put all cmds back in
* the queue
*/
list_splice_tail(&cmds, &udev->qfull_queue);
break;
}
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
}
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
mutex_lock(&udev->cmdr_lock);
if (tcmu_handle_completions(udev))
run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
}
/*
* mmap code from uio.c. Copied here because we want to hook mmap()
* and this stuff must come along.
*/
static int tcmu_find_mem_index(struct vm_area_struct *vma)
{
struct tcmu_dev *udev = vma->vm_private_data;
struct uio_info *info = &udev->uio_info;
if (vma->vm_pgoff < MAX_UIO_MAPS) {
if (info->mem[vma->vm_pgoff].size == 0)
return -1;
return (int)vma->vm_pgoff;
}
return -1;
}
static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
{
struct page *page;
mutex_lock(&udev->cmdr_lock);
page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
get_page(page);
lock_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
/*
* Userspace messed up and passed in a address not in the
* data iov passed to it.
*/
pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
dpi, udev->name);
mutex_unlock(&udev->cmdr_lock);
return NULL;
}
static void tcmu_vma_open(struct vm_area_struct *vma)
{
struct tcmu_dev *udev = vma->vm_private_data;
pr_debug("vma_open\n");
kref_get(&udev->kref);
}
static void tcmu_vma_close(struct vm_area_struct *vma)
{
struct tcmu_dev *udev = vma->vm_private_data;
pr_debug("vma_close\n");
/* release ref from tcmu_vma_open */
kref_put(&udev->kref, tcmu_dev_kref_release);
}
static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
{
struct tcmu_dev *udev = vmf->vma->vm_private_data;
struct uio_info *info = &udev->uio_info;
struct page *page;
unsigned long offset;
void *addr;
vm_fault_t ret = 0;
int mi = tcmu_find_mem_index(vmf->vma);
if (mi < 0)
return VM_FAULT_SIGBUS;
/*
* We need to subtract mi because userspace uses offset = N*PAGE_SIZE
* to use mem[N].
*/
offset = (vmf->pgoff - mi) << PAGE_SHIFT;
if (offset < udev->data_off) {
/* For the vmalloc()ed cmd area pages */
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
get_page(page);
} else {
uint32_t dpi;
/* For the dynamically growing data area pages */
dpi = (offset - udev->data_off) / PAGE_SIZE;
page = tcmu_try_get_data_page(udev, dpi);
if (!page)
return VM_FAULT_SIGBUS;
ret = VM_FAULT_LOCKED;
}
vmf->page = page;
return ret;
}
static const struct vm_operations_struct tcmu_vm_ops = {
.open = tcmu_vma_open,
.close = tcmu_vma_close,
.fault = tcmu_vma_fault,
};
static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &tcmu_vm_ops;
vma->vm_private_data = udev;
/* Ensure the mmap is exactly the right size */
if (vma_pages(vma) != udev->mmap_pages)
return -EINVAL;
tcmu_vma_open(vma);
return 0;
}
static int tcmu_open(struct uio_info *info, struct inode *inode)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
/* O_EXCL not supported for char devs, so fake it? */
if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
return -EBUSY;
udev->inode = inode;
pr_debug("open\n");
return 0;
}
static int tcmu_release(struct uio_info *info, struct inode *inode)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
struct tcmu_cmd *cmd;
unsigned long i;
bool freed = false;
mutex_lock(&udev->cmdr_lock);
xa_for_each(&udev->commands, i, cmd) {
/* Cmds with KEEP_BUF set are no longer on the ring, but
* userspace still holds the data buffer. If userspace closes
* we implicitly free these cmds and buffers, since after new
* open the (new ?) userspace cannot find the cmd in the ring
* and thus never will release the buffer by writing cmd_id to
* free_kept_buf action attribute.
*/
if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
continue;
pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
cmd->cmd_id, udev->name);
freed = true;
xa_erase(&udev->commands, i);
tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
tcmu_free_cmd(cmd);
}
/*
* We only freed data space, not ring space. Therefore we dont call
* run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
*/
if (freed && list_empty(&udev->tmr_queue))
run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
pr_debug("close\n");
return 0;
}
static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
return 0;
if (udev->nl_reply_supported <= 0)
return 0;
mutex_lock(&tcmu_nl_cmd_mutex);
if (tcmu_netlink_blocked) {
mutex_unlock(&tcmu_nl_cmd_mutex);
pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
udev->name);
return -EAGAIN;
}
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
mutex_unlock(&tcmu_nl_cmd_mutex);
pr_warn("netlink cmd %d already executing on %s\n",
nl_cmd->cmd, udev->name);
return -EBUSY;
}
memset(nl_cmd, 0, sizeof(*nl_cmd));
nl_cmd->cmd = cmd;
nl_cmd->udev = udev;
init_completion(&nl_cmd->complete);
INIT_LIST_HEAD(&nl_cmd->nl_list);
list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
mutex_unlock(&tcmu_nl_cmd_mutex);
return 0;
}
static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
return;
if (udev->nl_reply_supported <= 0)
return;
mutex_lock(&tcmu_nl_cmd_mutex);
list_del(&nl_cmd->nl_list);
memset(nl_cmd, 0, sizeof(*nl_cmd));
mutex_unlock(&tcmu_nl_cmd_mutex);
}
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
int ret;
if (!tcmu_kern_cmd_reply_supported)
return 0;
if (udev->nl_reply_supported <= 0)
return 0;
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
mutex_lock(&tcmu_nl_cmd_mutex);
nl_cmd->cmd = TCMU_CMD_UNSPEC;
ret = nl_cmd->status;
mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
static int tcmu_netlink_event_init(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
struct sk_buff **buf, void **hdr)
{
struct sk_buff *skb;
void *msg_header;
int ret = -ENOMEM;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return ret;
msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
if (!msg_header)
goto free_skb;
ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
if (ret < 0)
goto free_skb;
ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
if (ret < 0)
goto free_skb;
ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
if (ret < 0)
goto free_skb;
*buf = skb;
*hdr = msg_header;
return ret;
free_skb:
nlmsg_free(skb);
return ret;
}
static int tcmu_netlink_event_send(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
struct sk_buff *skb, void *msg_header)
{
int ret;
genlmsg_end(skb, msg_header);
ret = tcmu_init_genl_cmd_reply(udev, cmd);
if (ret) {
nlmsg_free(skb);
return ret;
}
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
/* Wait during an add as the listener may not be up yet */
if (ret == 0 ||
(ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
return tcmu_wait_genl_cmd_reply(udev);
else
tcmu_destroy_genl_cmd_reply(udev);
return ret;
}
static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
{
struct sk_buff *skb = NULL;
void *msg_header = NULL;
int ret = 0;
ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
&msg_header);
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
msg_header);
}
static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
{
struct sk_buff *skb = NULL;
void *msg_header = NULL;
int ret = 0;
ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
&skb, &msg_header);
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
skb, msg_header);
}
static int tcmu_update_uio_info(struct tcmu_dev *udev)
{
struct tcmu_hba *hba = udev->hba->hba_ptr;
struct uio_info *info;
char *str;
info = &udev->uio_info;
if (udev->dev_config[0])
str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
udev->name, udev->dev_config);
else
str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
udev->name);
if (!str)
return -ENOMEM;
/* If the old string exists, free it */
kfree(info->name);
info->name = str;
return 0;
}
static int tcmu_configure_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
struct uio_info *info;
struct tcmu_mailbox *mb;
size_t data_size;
int ret = 0;
ret = tcmu_update_uio_info(udev);
if (ret)
return ret;
info = &udev->uio_info;
mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
}
mb = vzalloc(udev->cmdr_size + CMDR_OFF);
if (!mb) {
ret = -ENOMEM;
goto err_vzalloc;
}
/* mailbox fits in first part of CMDR space */
udev->mb_addr = mb;
udev->cmdr = (void *)mb + CMDR_OFF;
udev->data_off = udev->cmdr_size + CMDR_OFF;
data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT;
udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
/* Initialise the mailbox of the ring buffer */
mb->version = TCMU_MAILBOX_VERSION;
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
TCMU_MAILBOX_FLAG_CAP_READ_LEN |
TCMU_MAILBOX_FLAG_CAP_TMR |
TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
WARN_ON(data_size % PAGE_SIZE);
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
info->irq = UIO_IRQ_CUSTOM;
info->mmap = tcmu_mmap;
info->open = tcmu_open;
info->release = tcmu_release;
ret = uio_register_device(tcmu_root_device, info);
if (ret)
goto err_register;
/* User can set hw_block_size before enable the device */
if (dev->dev_attrib.hw_block_size == 0)
dev->dev_attrib.hw_block_size = 512;
/* Other attributes can be configured in userspace */
if (!dev->dev_attrib.hw_max_sectors)
dev->dev_attrib.hw_max_sectors = 128;
if (!dev->dev_attrib.emulate_write_cache)
dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
/* If user didn't explicitly disable netlink reply support, use
* module scope setting.
*/
if (udev->nl_reply_supported >= 0)
udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
*/
kref_get(&udev->kref);
ret = tcmu_send_dev_add_event(udev);
if (ret)
goto err_netlink;
mutex_lock(&root_udev_mutex);
list_add(&udev->node, &root_udev);
mutex_unlock(&root_udev_mutex);
return 0;
err_netlink:
kref_put(&udev->kref, tcmu_dev_kref_release);
uio_unregister_device(&udev->uio_info);
err_register:
vfree(udev->mb_addr);
udev->mb_addr = NULL;
err_vzalloc:
bitmap_free(udev->data_bitmap);
udev->data_bitmap = NULL;
err_bitmap_alloc:
kfree(info->name);
info->name = NULL;
return ret;
}
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
/* release ref from init */
kref_put(&udev->kref, tcmu_dev_kref_release);
}
static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
del_timer_sync(&udev->cmd_timer);
del_timer_sync(&udev->qfull_timer);
mutex_lock(&root_udev_mutex);
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
tcmu_send_dev_remove_event(udev);
uio_unregister_device(&udev->uio_info);
/* release ref from configure */
kref_put(&udev->kref, tcmu_dev_kref_release);
}
static void tcmu_unblock_dev(struct tcmu_dev *udev)
{
mutex_lock(&udev->cmdr_lock);
clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
mutex_unlock(&udev->cmdr_lock);
}
static void tcmu_block_dev(struct tcmu_dev *udev)
{
mutex_lock(&udev->cmdr_lock);
if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
goto unlock;
/* complete IO that has executed successfully */
tcmu_handle_completions(udev);
/* fail IO waiting to be queued */
run_qfull_queue(udev, true);
unlock:
mutex_unlock(&udev->cmdr_lock);
}
static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
unsigned long i;
mutex_lock(&udev->cmdr_lock);
xa_for_each(&udev->commands, i, cmd) {
pr_debug("removing cmd %u on dev %s from ring %s\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
"(is expired)" :
(test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
"(is keep buffer)" : ""));
xa_erase(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
cmd->se_cmd->priv = NULL;
if (err_level == 1) {
/*
* Userspace was not able to start the
* command or it is retryable.
*/
target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
} else {
/* hard failure */
target_complete_cmd(cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
}
}
tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
tcmu_free_cmd(cmd);
}
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
mb->cmd_tail, mb->cmd_head);
udev->cmdr_last_cleaned = 0;
mb->cmd_tail = 0;
mb->cmd_head = 0;
tcmu_flush_dcache_range(mb, sizeof(*mb));
clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
del_timer(&udev->cmd_timer);
/*
* ring is empty and qfull queue never contains aborted commands.
* So TMRs in tmr queue do not contain relevant cmd_ids.
* After a ring reset userspace should do a fresh start, so
* even LUN RESET message is no longer relevant.
* Therefore remove all TMRs from qfull queue
*/
tcmu_remove_all_queued_tmr(udev);
run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
}
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
Opt_cmd_ring_size_mb, Opt_err,
};
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%s"},
{Opt_hw_block_size, "hw_block_size=%d"},
{Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
{Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"},
{Opt_err, NULL}
};
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
int val, ret;
ret = match_int(arg, &val);
if (ret < 0) {
pr_err("match_int() failed for dev attrib. Error %d.\n",
ret);
return ret;
}
if (val <= 0) {
pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
val);
return -EINVAL;
}
*dev_attrib = val;
return 0;
}
static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
{
int val, ret;
uint32_t pages_per_blk = udev->data_pages_per_blk;
ret = match_int(arg, &val);
if (ret < 0) {
pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
ret);
return ret;
}
if (val <= 0) {
pr_err("Invalid max_data_area %d.\n", val);
return -EINVAL;
}
if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
}
if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
return -EINVAL;
}
mutex_lock(&udev->cmdr_lock);
if (udev->data_bitmap) {
pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
ret = -EINVAL;
goto unlock;
}
udev->data_area_mb = val;
udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
unlock:
mutex_unlock(&udev->cmdr_lock);
return ret;
}
static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
{
int val, ret;
ret = match_int(arg, &val);
if (ret < 0) {
pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
ret);
return ret;
}
if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
val, udev->data_area_mb,
TCMU_MBS_TO_PAGES(udev->data_area_mb));
return -EINVAL;
}
mutex_lock(&udev->cmdr_lock);
if (udev->data_bitmap) {
pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
ret = -EINVAL;
goto unlock;
}
udev->data_pages_per_blk = val;
udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
unlock:
mutex_unlock(&udev->cmdr_lock);
return ret;
}
static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
{
int val, ret;
ret = match_int(arg, &val);
if (ret < 0) {
pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n",
ret);
return ret;
}
if (val <= 0) {
pr_err("Invalid cmd_ring_size_mb %d.\n", val);
return -EINVAL;
}
mutex_lock(&udev->cmdr_lock);
if (udev->data_bitmap) {
pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n");
ret = -EINVAL;
goto unlock;
}
udev->cmdr_size = (val << 20) - CMDR_OFF;
if (val > (MB_CMDR_SIZE_DEF >> 20)) {
pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n",
val, (MB_CMDR_SIZE_DEF >> 20));
udev->cmdr_size = CMDR_SIZE_DEF;
}
unlock:
mutex_unlock(&udev->cmdr_lock);
return ret;
}
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_dev_config:
if (match_strlcpy(udev->dev_config, &args[0],
TCMU_CONFIG_LEN) == 0) {
ret = -EINVAL;
break;
}
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
break;
case Opt_dev_size:
ret = match_u64(&args[0], &udev->dev_size);
if (ret < 0)
pr_err("match_u64() failed for dev_size=. Error %d.\n",
ret);
break;
case Opt_hw_block_size:
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_block_size));
break;
case Opt_hw_max_sectors:
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break;
case Opt_nl_reply_supported:
ret = match_int(&args[0], &udev->nl_reply_supported);
if (ret < 0)
pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
ret);
break;
case Opt_max_data_area_mb:
ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
case Opt_data_pages_per_blk:
ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
break;
case Opt_cmd_ring_size_mb:
ret = tcmu_set_cmd_ring_size(udev, &args[0]);
break;
default:
break;
}
if (ret)
break;
}
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
ssize_t bl = 0;
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk);
bl += sprintf(b + bl, "CmdRingSizeMB: %u\n",
(udev->cmdr_size + CMDR_OFF) >> 20);
return bl;
}
static sector_t tcmu_get_blocks(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
return div_u64(udev->dev_size - dev->dev_attrib.block_size,
dev->dev_attrib.block_size);
}
static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{
return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = container_of(da->da_dev,
struct tcmu_dev, se_dev);
u32 val;
int ret;
if (da->da_dev->export_count) {
pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
return -EINVAL;
}
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
udev->cmd_time_out = val * MSEC_PER_SEC;
return count;
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);
static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
udev->qfull_time_out :
udev->qfull_time_out / MSEC_PER_SEC);
}
static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
s32 val;
int ret;
ret = kstrtos32(page, 0, &val);
if (ret < 0)
return ret;
if (val >= 0) {
udev->qfull_time_out = val * MSEC_PER_SEC;
} else if (val == -1) {
udev->qfull_time_out = val;
} else {
printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
return -EINVAL;
}
return count;
}
CONFIGFS_ATTR(tcmu_, qfull_time_out);
static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
}
CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
}
CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%u\n",
(udev->cmdr_size + CMDR_OFF) >> 20);
}
CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb);
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
}
static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
const char *reconfig_data)
{
struct sk_buff *skb = NULL;
void *msg_header = NULL;
int ret = 0;
ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
&skb, &msg_header);
if (ret < 0)
return ret;
ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
if (ret < 0) {
nlmsg_free(skb);
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
skb, msg_header);
}
static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
int ret, len;
len = strlen(page);
if (!len || len > TCMU_CONFIG_LEN - 1)
return -EINVAL;
/* Check if device has been configured before */
if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
}
strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
ret = tcmu_update_uio_info(udev);
if (ret)
return ret;
return count;
}
strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
return count;
}
CONFIGFS_ATTR(tcmu_, dev_config);
static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
}
static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
{
struct sk_buff *skb = NULL;
void *msg_header = NULL;
int ret = 0;
ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
&skb, &msg_header);
if (ret < 0)
return ret;
ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
size, TCMU_ATTR_PAD);
if (ret < 0) {
nlmsg_free(skb);
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
skb, msg_header);
}
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
u64 val;
int ret;
ret = kstrtou64(page, 0, &val);
if (ret < 0)
return ret;
/* Check if device has been configured before */
if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
}
}
udev->dev_size = val;
return count;
}
CONFIGFS_ATTR(tcmu_, dev_size);
static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
}
static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
s8 val;
int ret;
ret = kstrtos8(page, 0, &val);
if (ret < 0)
return ret;
udev->nl_reply_supported = val;
return count;
}
CONFIGFS_ATTR(tcmu_, nl_reply_supported);
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
}
static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
{
struct sk_buff *skb = NULL;
void *msg_header = NULL;
int ret = 0;
ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
&skb, &msg_header);
if (ret < 0)
return ret;
ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
if (ret < 0) {
nlmsg_free(skb);
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
skb, msg_header);
}
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
u8 val;
int ret;
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
/* Check if device has been configured before */
if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
}
}
da->emulate_write_cache = val;
return count;
}
CONFIGFS_ATTR(tcmu_, emulate_write_cache);
static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%i\n",
test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
}
static ssize_t tcmu_tmr_notification_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
u8 val;
int ret;
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
if (val > 1)
return -EINVAL;
if (val)
set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
else
clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
return count;
}
CONFIGFS_ATTR(tcmu_, tmr_notification);
static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
{
struct se_device *se_dev = container_of(to_config_group(item),
struct se_device,
dev_action_group);
struct tcmu_dev *udev = TCMU_DEV(se_dev);
if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
else
return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
}
static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
size_t count)
{
struct se_device *se_dev = container_of(to_config_group(item),
struct se_device,
dev_action_group);
struct tcmu_dev *udev = TCMU_DEV(se_dev);
u8 val;
int ret;
if (!target_dev_configured(&udev->se_dev)) {
pr_err("Device is not configured.\n");
return -EINVAL;
}
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
if (val > 1) {
pr_err("Invalid block value %d\n", val);
return -EINVAL;
}
if (!val)
tcmu_unblock_dev(udev);
else
tcmu_block_dev(udev);
return count;
}
CONFIGFS_ATTR(tcmu_, block_dev);
static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
size_t count)
{
struct se_device *se_dev = container_of(to_config_group(item),
struct se_device,
dev_action_group);
struct tcmu_dev *udev = TCMU_DEV(se_dev);
u8 val;
int ret;
if (!target_dev_configured(&udev->se_dev)) {
pr_err("Device is not configured.\n");
return -EINVAL;
}
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
if (val != 1 && val != 2) {
pr_err("Invalid reset ring value %d\n", val);
return -EINVAL;
}
tcmu_reset_ring(udev, val);
return count;
}
CONFIGFS_ATTR_WO(tcmu_, reset_ring);
static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
size_t count)
{
struct se_device *se_dev = container_of(to_config_group(item),
struct se_device,
dev_action_group);
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *cmd;
u16 cmd_id;
int ret;
if (!target_dev_configured(&udev->se_dev)) {
pr_err("Device is not configured.\n");
return -EINVAL;
}
ret = kstrtou16(page, 0, &cmd_id);
if (ret < 0)
return ret;
mutex_lock(&udev->cmdr_lock);
{
XA_STATE(xas, &udev->commands, cmd_id);
xas_lock(&xas);
cmd = xas_load(&xas);
if (!cmd) {
pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
count = -EINVAL;
xas_unlock(&xas);
goto out_unlock;
}
if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
cmd_id);
count = -EINVAL;
xas_unlock(&xas);
goto out_unlock;
}
xas_store(&xas, NULL);
xas_unlock(&xas);
}
tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
tcmu_free_cmd(cmd);
/*
* We only freed data space, not ring space. Therefore we dont call
* run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
*/
if (list_empty(&udev->tmr_queue))
run_qfull_queue(udev, false);
out_unlock:
mutex_unlock(&udev->cmdr_lock);
return count;
}
CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
&tcmu_attr_qfull_time_out,
&tcmu_attr_max_data_area_mb,
&tcmu_attr_data_pages_per_blk,
&tcmu_attr_cmd_ring_size_mb,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
&tcmu_attr_tmr_notification,
&tcmu_attr_nl_reply_supported,
NULL,
};
static struct configfs_attribute **tcmu_attrs;
static struct configfs_attribute *tcmu_action_attrs[] = {
&tcmu_attr_block_dev,
&tcmu_attr_reset_ring,
&tcmu_attr_free_kept_buf,
NULL,
};
static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
.transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = tcmu_attach_hba,
.detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device,
.configure_device = tcmu_configure_device,
.destroy_device = tcmu_destroy_device,
.free_device = tcmu_free_device,
.unplug_device = tcmu_unplug_device,
.plug_device = tcmu_plug_device,
.parse_cdb = tcmu_parse_cdb,
.tmr_notify = tcmu_tmr_notify,
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
.tb_dev_action_attrs = tcmu_action_attrs,
};
static void find_free_blocks(void)
{
struct tcmu_dev *udev;
loff_t off;
u32 pages_freed, total_pages_freed = 0;
u32 start, end, block, total_blocks_freed = 0;
if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
return;
mutex_lock(&root_udev_mutex);
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
if (!target_dev_configured(&udev->se_dev)) {
mutex_unlock(&udev->cmdr_lock);
continue;
}
/* Try to complete the finished commands first */
if (tcmu_handle_completions(udev))
run_qfull_queue(udev, false);
/* Skip the udevs in idle */
if (!udev->dbi_thresh) {
mutex_unlock(&udev->cmdr_lock);
continue;
}
end = udev->dbi_max + 1;
block = find_last_bit(udev->data_bitmap, end);
if (block == udev->dbi_max) {
/*
* The last bit is dbi_max, so it is not possible
* reclaim any blocks.
*/
mutex_unlock(&udev->cmdr_lock);
continue;
} else if (block == end) {
/* The current udev will goto idle state */
udev->dbi_thresh = start = 0;
udev->dbi_max = 0;
} else {
udev->dbi_thresh = start = block + 1;
udev->dbi_max = block;
}
/*
* Release the block pages.
*
* Also note that since tcmu_vma_fault() gets an extra page
* refcount, tcmu_blocks_release() won't free pages if pages
* are mapped. This means it is safe to call
* tcmu_blocks_release() before unmap_mapping_range() which
* drops the refcount of any pages it unmaps and thus releases
* them.
*/
pages_freed = tcmu_blocks_release(udev, start, end - 1);
/* Here will truncate the data area from off */
off = udev->data_off + (loff_t)start * udev->data_blk_size;
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
mutex_unlock(&udev->cmdr_lock);
total_pages_freed += pages_freed;
total_blocks_freed += end - start;
pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
pages_freed, total_pages_freed, end - start,
total_blocks_freed, udev->name);
}
mutex_unlock(&root_udev_mutex);
if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
}
static void check_timedout_devices(void)
{
struct tcmu_dev *udev, *tmp_dev;
struct tcmu_cmd *cmd, *tmp_cmd;
LIST_HEAD(devs);
spin_lock_bh(&timed_out_udevs_lock);
list_splice_init(&timed_out_udevs, &devs);
list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
list_del_init(&udev->timedout_entry);
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
/*
* If cmd_time_out is disabled but qfull is set deadline
* will only reflect the qfull timeout. Ignore it.
*/
if (udev->cmd_time_out) {
list_for_each_entry_safe(cmd, tmp_cmd,
&udev->inflight_queue,
queue_entry) {
tcmu_check_expired_ring_cmd(cmd);
}
tcmu_set_next_deadline(&udev->inflight_queue,
&udev->cmd_timer);
}
list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
queue_entry) {
tcmu_check_expired_queue_cmd(cmd);
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);
spin_lock_bh(&timed_out_udevs_lock);
}
spin_unlock_bh(&timed_out_udevs_lock);
}
static void tcmu_unmap_work_fn(struct work_struct *work)
{
check_timedout_devices();
find_free_blocks();
}
static int __init tcmu_module_init(void)
{
int ret, i, k, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
sizeof(struct tcmu_cmd),
__alignof__(struct tcmu_cmd),
0, NULL);
if (!tcmu_cmd_cache)
return -ENOMEM;
tcmu_root_device = root_device_register("tcm_user");
if (IS_ERR(tcmu_root_device)) {
ret = PTR_ERR(tcmu_root_device);
goto out_free_cache;
}
ret = genl_register_family(&tcmu_genl_family);
if (ret < 0) {
goto out_unreg_device;
}
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
len += sizeof(struct configfs_attribute *);
tcmu_attrs = kzalloc(len, GFP_KERNEL);
if (!tcmu_attrs) {
ret = -ENOMEM;
goto out_unreg_genl;
}
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
tcmu_attrs[i] = passthrough_attrib_attrs[i];
for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
tcmu_attrs[i++] = tcmu_attrib_attrs[k];
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops);
if (ret)
goto out_attrs;
return 0;
out_attrs:
kfree(tcmu_attrs);
out_unreg_genl:
genl_unregister_family(&tcmu_genl_family);
out_unreg_device:
root_device_unregister(tcmu_root_device);
out_free_cache:
kmem_cache_destroy(tcmu_cmd_cache);
return ret;
}
static void __exit tcmu_module_exit(void)
{
cancel_delayed_work_sync(&tcmu_unmap_work);
target_backend_unregister(&tcmu_ops);
kfree(tcmu_attrs);
genl_unregister_family(&tcmu_genl_family);
root_device_unregister(tcmu_root_device);
kmem_cache_destroy(tcmu_cmd_cache);
}
MODULE_DESCRIPTION("TCM USER subsystem plugin");
MODULE_AUTHOR("Shaohua Li <[email protected]>");
MODULE_AUTHOR("Andy Grover <[email protected]>");
MODULE_LICENSE("GPL");
module_init(tcmu_module_init);
module_exit(tcmu_module_exit);
|
linux-master
|
drivers/target/target_core_user.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_rd.c
*
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/highmem.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_rd.h"
static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
return container_of(dev, struct rd_dev, dev);
}
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL);
if (!rd_host)
return -ENOMEM;
rd_host->rd_host_id = host_id;
hba->hba_ptr = rd_host;
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_VERSION);
return 0;
}
static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
hba->hba_ptr = NULL;
}
static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
u32 sg_table_count)
{
struct page *pg;
struct scatterlist *sg;
u32 i, j, page_count = 0, sg_per_table;
for (i = 0; i < sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
if (pg) {
__free_page(pg);
page_count++;
}
}
kfree(sg);
}
kfree(sg_table);
return page_count;
}
static void rd_release_device_space(struct rd_dev *rd_dev)
{
u32 page_count;
if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
return;
page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
rd_dev->sg_table_count);
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
/* rd_build_device_space():
*
*
*/
static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
u32 total_sg_needed, unsigned char init_payload)
{
u32 i = 0, j, page_offset = 0, sg_per_table;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct page *pg;
struct scatterlist *sg;
unsigned char *p;
while (total_sg_needed) {
unsigned int chain_entry = 0;
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
/*
* Reserve extra element for chain entry
*/
if (sg_per_table < total_sg_needed)
chain_entry = 1;
sg = kmalloc_array(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
if (!sg)
return -ENOMEM;
sg_init_table(sg, sg_per_table + chain_entry);
if (i > 0) {
sg_chain(sg_table[i - 1].sg_table,
max_sg_per_table + 1, sg);
}
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
p = kmap(pg);
memset(p, init_payload, PAGE_SIZE);
kunmap(pg);
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
return 0;
}
static int rd_build_device_space(struct rd_dev *rd_dev)
{
struct rd_dev_sg_table *sg_table;
u32 sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
int rc;
if (rd_dev->rd_page_count <= 0) {
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
if (!sg_table)
return -ENOMEM;
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
if (rc)
return rc;
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
}
static void rd_release_prot_space(struct rd_dev *rd_dev)
{
u32 page_count;
if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
return;
page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
rd_dev->sg_prot_count);
pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
rd_dev->sg_prot_array = NULL;
rd_dev->sg_prot_count = 0;
}
static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
{
struct rd_dev_sg_table *sg_table;
u32 total_sg_needed, sg_tables;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
int rc;
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
/*
* prot_length=8byte dif data
* tot sg needed = rd_page_count * (PGSZ/block_size) *
* (prot_length/block_size) + pad
* PGSZ canceled each other.
*/
total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
if (!sg_table)
return -ENOMEM;
rd_dev->sg_prot_array = sg_table;
rd_dev->sg_prot_count = sg_tables;
rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
if (rc)
return rc;
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
return 0;
}
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL);
if (!rd_dev)
return NULL;
rd_dev->rd_host = rd_host;
return &rd_dev->dev;
}
static int rd_configure_device(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
struct rd_host *rd_host = dev->se_hba->hba_ptr;
int ret;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
ret = rd_build_device_space(rd_dev);
if (ret < 0)
goto fail;
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev->dev_attrib.is_nonrot = 1;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return 0;
fail:
rd_release_device_space(rd_dev);
return ret;
}
static void rd_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct rd_dev *rd_dev = RD_DEV(dev);
kfree(rd_dev);
}
static void rd_free_device(struct se_device *dev)
{
call_rcu(&dev->rcu_head, rd_dev_call_rcu);
}
static void rd_destroy_device(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
}
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
struct rd_dev_sg_table *sg_table;
u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
i = page / sg_per_table;
if (i < rd_dev->sg_table_count) {
sg_table = &rd_dev->sg_table_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
return sg_table;
}
pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
}
static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
{
struct rd_dev_sg_table *sg_table;
u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
i = page / sg_per_table;
if (i < rd_dev->sg_prot_count) {
sg_table = &rd_dev->sg_prot_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
return sg_table;
}
pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
page);
return NULL;
}
static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
{
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *prot_table;
struct scatterlist *prot_sg;
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
u32 prot_offset, prot_page;
u32 prot_npages __maybe_unused;
u64 tmp;
sense_reason_t rc = 0;
tmp = cmd->t_task_lba * se_dev->prot_length;
prot_offset = do_div(tmp, PAGE_SIZE);
prot_page = tmp;
prot_table = rd_get_prot_table(dev, prot_page);
if (!prot_table)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
prot_sg = &prot_table->sg_table[prot_page -
prot_table->page_start_offset];
if (se_dev->dev_attrib.pi_prot_verify) {
if (is_read)
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
prot_sg, prot_offset);
else
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
cmd->t_prot_sg, 0);
}
if (!rc)
sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
return rc;
}
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
u32 rd_offset;
u32 rd_size;
u32 rd_page;
u32 src_len;
u64 tmp;
sense_reason_t rc;
if (dev->rd_flags & RDF_NULLIO) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page);
if (!table)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
dev->rd_dev_id,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
data_direction == DMA_TO_DEVICE) {
rc = rd_do_prot_rw(cmd, false);
if (rc)
return rc;
}
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, sgl, sgl_nents,
data_direction == DMA_FROM_DEVICE ?
SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (rd_size) {
u32 len;
void *rd_addr;
sg_miter_next(&m);
if (!(u32)m.length) {
pr_debug("RD[%u]: invalid sgl %p len %zu\n",
dev->rd_dev_id, m.addr, m.length);
sg_miter_stop(&m);
return TCM_INCORRECT_AMOUNT_OF_DATA;
}
len = min((u32)m.length, src_len);
if (len > rd_size) {
pr_debug("RD[%u]: size underrun page %d offset %d "
"size %d\n", dev->rd_dev_id,
rd_page, rd_offset, rd_size);
len = rd_size;
}
m.consumed = len;
rd_addr = sg_virt(rd_sg) + rd_offset;
if (data_direction == DMA_FROM_DEVICE)
memcpy(m.addr, rd_addr, len);
else
memcpy(rd_addr, m.addr, len);
rd_size -= len;
if (!rd_size)
continue;
src_len -= len;
if (src_len) {
rd_offset += len;
continue;
}
/* rd page completed, next one please */
rd_page++;
rd_offset = 0;
src_len = PAGE_SIZE;
if (rd_page <= table->page_end_offset) {
rd_sg++;
continue;
}
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
rd_sg = table->sg_table;
}
sg_miter_stop(&m);
if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
data_direction == DMA_FROM_DEVICE) {
rc = rd_do_prot_rw(cmd, true);
if (rc)
return rc;
}
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
enum {
Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_rd_nullio, "rd_nullio=%d"},
{Opt_rd_dummy, "rd_dummy=%d"},
{Opt_err, NULL}
};
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
pr_debug("RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
case Opt_rd_nullio:
match_int(args, &arg);
if (arg != 1)
break;
pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
rd_dev->rd_flags |= RDF_NULLIO;
break;
case Opt_rd_dummy:
match_int(args, &arg);
if (arg != 1)
break;
pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg);
rd_dev->rd_flags |= RDF_DUMMY;
break;
default:
break;
}
}
kfree(orig);
return count;
}
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct rd_dev *rd_dev = RD_DEV(dev);
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
" SG_table_count: %u nullio: %d dummy: %d\n",
rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count,
!!(rd_dev->rd_flags & RDF_NULLIO),
!!(rd_dev->rd_flags & RDF_DUMMY));
return bl;
}
static u32 rd_get_device_type(struct se_device *dev)
{
if (RD_DEV(dev)->rd_flags & RDF_DUMMY)
return 0x3f; /* Unknown device type, not connected */
else
return sbc_get_device_type(dev);
}
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
dev->dev_attrib.block_size) - 1;
return blocks_long;
}
static int rd_init_prot(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
if (!dev->dev_attrib.pi_prot_type)
return 0;
return rd_build_prot_space(rd_dev, dev->prot_length,
dev->dev_attrib.block_size);
}
static void rd_free_prot(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_prot_space(rd_dev);
}
static struct exec_cmd_ops rd_exec_cmd_ops = {
.execute_rw = rd_execute_rw,
};
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &rd_exec_cmd_ops);
}
static const struct target_backend_ops rd_mcp_ops = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.alloc_device = rd_alloc_device,
.configure_device = rd_configure_device,
.destroy_device = rd_destroy_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
.init_prot = rd_init_prot,
.free_prot = rd_free_prot,
.tb_dev_attrib_attrs = sbc_attrib_attrs,
};
int __init rd_module_init(void)
{
return transport_backend_register(&rd_mcp_ops);
}
void rd_module_exit(void)
{
target_backend_unregister(&rd_mcp_ops);
}
|
linux-master
|
drivers/target/target_core_rd.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_configfs.c
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
* (c) Copyright 2008-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
* based on configfs Copyright (C) 2005 Oracle. All rights reserved.
*
****************************************************************************/
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
#include "target_core_xcopy.h"
#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_core_setup_##_name##_cit(struct target_backend *tb) \
{ \
struct config_item_type *cit = &tb->tb_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tb->ops->owner; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
static void target_core_setup_##_name##_cit(struct target_backend *tb) \
{ \
struct config_item_type *cit = &tb->tb_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
cit->ct_owner = tb->ops->owner; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
extern struct t10_alua_lu_gp *default_lu_gp;
static LIST_HEAD(g_tf_list);
static DEFINE_MUTEX(g_tf_lock);
static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
static unsigned int target_devices;
static DEFINE_MUTEX(target_devices_lock);
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
return container_of(to_config_group(item), struct se_hba, hba_group);
}
/*
* Attributes for /sys/kernel/config/target/
*/
static ssize_t target_core_item_version_show(struct config_item *item,
char *page)
{
return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
utsname()->sysname, utsname()->machine);
}
CONFIGFS_ATTR_RO(target_core_item_, version);
char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
static char db_root_stage[DB_ROOT_LEN];
static ssize_t target_core_item_dbroot_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", db_root);
}
static ssize_t target_core_item_dbroot_store(struct config_item *item,
const char *page, size_t count)
{
ssize_t read_bytes;
struct file *fp;
ssize_t r = -EINVAL;
mutex_lock(&target_devices_lock);
if (target_devices) {
pr_err("db_root: cannot be changed because it's in use\n");
goto unlock;
}
if (count > (DB_ROOT_LEN - 1)) {
pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
(int)count, DB_ROOT_LEN - 1);
goto unlock;
}
read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
if (!read_bytes)
goto unlock;
if (db_root_stage[read_bytes - 1] == '\n')
db_root_stage[read_bytes - 1] = '\0';
/* validate new db root before accepting it */
fp = filp_open(db_root_stage, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("db_root: cannot open: %s\n", db_root_stage);
goto unlock;
}
if (!S_ISDIR(file_inode(fp)->i_mode)) {
filp_close(fp, NULL);
pr_err("db_root: not a directory: %s\n", db_root_stage);
goto unlock;
}
filp_close(fp, NULL);
strncpy(db_root, db_root_stage, read_bytes);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
r = read_bytes;
unlock:
mutex_unlock(&target_devices_lock);
return r;
}
CONFIGFS_ATTR(target_core_item_, dbroot);
static struct target_fabric_configfs *target_core_get_fabric(
const char *name)
{
struct target_fabric_configfs *tf;
if (!name)
return NULL;
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
const char *cmp_name = tf->tf_ops->fabric_alias;
if (!cmp_name)
cmp_name = tf->tf_ops->fabric_name;
if (!strcmp(cmp_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
}
}
mutex_unlock(&g_tf_lock);
return NULL;
}
/*
* Called from struct target_core_group_ops->make_group()
*/
static struct config_group *target_core_register_fabric(
struct config_group *group,
const char *name)
{
struct target_fabric_configfs *tf;
int ret;
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
tf = target_core_get_fabric(name);
if (!tf) {
pr_debug("target_core_register_fabric() trying autoload for %s\n",
name);
/*
* Below are some hardcoded request_module() calls to automatically
* local fabric modules when the following is called:
*
* mkdir -p /sys/kernel/config/target/$MODULE_NAME
*
* Note that this does not limit which TCM fabric module can be
* registered, but simply provids auto loading logic for modules with
* mkdir(2) system calls with known TCM fabric modules.
*/
if (!strncmp(name, "iscsi", 5)) {
/*
* Automatically load the LIO Target fabric module when the
* following is called:
*
* mkdir -p $CONFIGFS/target/iscsi
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
pr_debug("request_module() failed for"
" iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
} else if (!strncmp(name, "loopback", 8)) {
/*
* Automatically load the tcm_loop fabric module when the
* following is called:
*
* mkdir -p $CONFIGFS/target/loopback
*/
ret = request_module("tcm_loop");
if (ret < 0) {
pr_debug("request_module() failed for"
" tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
}
if (!tf) {
pr_debug("target_core_get_fabric() failed for %s\n",
name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
" %s\n", tf->tf_ops->fabric_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&tf->tf_wwn_cit);
config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&tf->tf_discovery_cit);
configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
/*
* Called from struct target_core_group_ops->drop_item()
*/
static void target_core_deregister_fabric(
struct config_group *group,
struct config_item *item)
{
struct target_fabric_configfs *tf = container_of(
to_config_group(item), struct target_fabric_configfs, tf_group);
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
" %s\n", tf->tf_ops->fabric_name);
atomic_dec(&tf->tf_access_cnt);
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
configfs_remove_default_groups(&tf->tf_group);
config_item_put(item);
}
static struct configfs_group_operations target_core_fabric_group_ops = {
.make_group = &target_core_register_fabric,
.drop_item = &target_core_deregister_fabric,
};
/*
* All item attributes appearing in /sys/kernel/target/ appear here.
*/
static struct configfs_attribute *target_core_fabric_item_attrs[] = {
&target_core_item_attr_version,
&target_core_item_attr_dbroot,
NULL,
};
/*
* Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
*/
static const struct config_item_type target_core_fabrics_item = {
.ct_group_ops = &target_core_fabric_group_ops,
.ct_attrs = target_core_fabric_item_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem target_core_fabrics = {
.su_group = {
.cg_item = {
.ci_namebuf = "target",
.ci_type = &target_core_fabrics_item,
},
},
};
int target_depend_item(struct config_item *item)
{
return configfs_depend_item(&target_core_fabrics, item);
}
EXPORT_SYMBOL(target_depend_item);
void target_undepend_item(struct config_item *item)
{
return configfs_undepend_item(item);
}
EXPORT_SYMBOL(target_undepend_item);
/*##############################################################################
// Start functions called by external Target Fabrics Modules
//############################################################################*/
static int target_disable_feature(struct se_portal_group *se_tpg)
{
return 0;
}
static u32 target_default_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
}
static u32 target_default_sess_get_index(struct se_session *se_sess)
{
return 0;
}
static void target_set_default_node_attributes(struct se_node_acl *se_acl)
{
}
static int target_default_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
}
static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
{
if (tfo->fabric_alias) {
if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
pr_err("Passed alias: %s exceeds "
"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
return -EINVAL;
}
}
if (!tfo->fabric_name) {
pr_err("Missing tfo->fabric_name\n");
return -EINVAL;
}
if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
pr_err("Passed name: %s exceeds "
"TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
return -EINVAL;
}
if (!tfo->tpg_get_wwn) {
pr_err("Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
}
if (!tfo->tpg_get_tag) {
pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
if (!tfo->release_cmd) {
pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
if (!tfo->write_pending) {
pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
if (!tfo->queue_data_in) {
pr_err("Missing tfo->queue_data_in()\n");
return -EINVAL;
}
if (!tfo->queue_status) {
pr_err("Missing tfo->queue_status()\n");
return -EINVAL;
}
if (!tfo->queue_tm_rsp) {
pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
if (!tfo->aborted_task) {
pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
if (!tfo->check_stop_free) {
pr_err("Missing tfo->check_stop_free()\n");
return -EINVAL;
}
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
* target_core_fabric_configfs.c WWN+TPG group context code.
*/
if (!tfo->fabric_make_wwn) {
pr_err("Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
if (!tfo->fabric_drop_wwn) {
pr_err("Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
if (!tfo->fabric_make_tpg) {
pr_err("Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
if (!tfo->fabric_drop_tpg) {
pr_err("Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
return 0;
}
static void target_set_default_ops(struct target_core_fabric_ops *tfo)
{
if (!tfo->tpg_check_demo_mode)
tfo->tpg_check_demo_mode = target_disable_feature;
if (!tfo->tpg_check_demo_mode_cache)
tfo->tpg_check_demo_mode_cache = target_disable_feature;
if (!tfo->tpg_check_demo_mode_write_protect)
tfo->tpg_check_demo_mode_write_protect = target_disable_feature;
if (!tfo->tpg_check_prod_mode_write_protect)
tfo->tpg_check_prod_mode_write_protect = target_disable_feature;
if (!tfo->tpg_get_inst_index)
tfo->tpg_get_inst_index = target_default_get_inst_index;
if (!tfo->sess_get_index)
tfo->sess_get_index = target_default_sess_get_index;
if (!tfo->set_default_node_attributes)
tfo->set_default_node_attributes = target_set_default_node_attributes;
if (!tfo->get_cmd_state)
tfo->get_cmd_state = target_default_get_cmd_state;
}
int target_register_template(const struct target_core_fabric_ops *fo)
{
struct target_core_fabric_ops *tfo;
struct target_fabric_configfs *tf;
int ret;
ret = target_fabric_tf_ops_check(fo);
if (ret)
return ret;
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!tf) {
pr_err("%s: could not allocate memory!\n", __func__);
return -ENOMEM;
}
tfo = kzalloc(sizeof(struct target_core_fabric_ops), GFP_KERNEL);
if (!tfo) {
kfree(tf);
pr_err("%s: could not allocate memory!\n", __func__);
return -ENOMEM;
}
memcpy(tfo, fo, sizeof(*tfo));
target_set_default_ops(tfo);
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
tf->tf_ops = tfo;
target_fabric_setup_cits(tf);
mutex_lock(&g_tf_lock);
list_add_tail(&tf->tf_list, &g_tf_list);
mutex_unlock(&g_tf_lock);
return 0;
}
EXPORT_SYMBOL(target_register_template);
void target_unregister_template(const struct target_core_fabric_ops *fo)
{
struct target_fabric_configfs *t;
mutex_lock(&g_tf_lock);
list_for_each_entry(t, &g_tf_list, tf_list) {
if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
mutex_unlock(&g_tf_lock);
/*
* Wait for any outstanding fabric se_deve_entry->rcu_head
* callbacks to complete post kfree_rcu(), before allowing
* fabric driver unload of TFO->module to proceed.
*/
rcu_barrier();
kfree(t->tf_tpg_base_cit.ct_attrs);
kfree(t->tf_ops);
kfree(t);
return;
}
}
mutex_unlock(&g_tf_lock);
}
EXPORT_SYMBOL(target_unregister_template);
/*##############################################################################
// Stop functions called by external Target Fabrics Modules
//############################################################################*/
static inline struct se_dev_attrib *to_attrib(struct config_item *item)
{
return container_of(to_config_group(item), struct se_dev_attrib,
da_group);
}
/* Start functions for struct config_item_type tb_dev_attrib_cit */
#define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
static ssize_t _name##_show(struct config_item *item, char *page) \
{ \
return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
}
DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
DEF_CONFIGFS_ATTRIB_SHOW(block_size);
DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
size_t count) \
{ \
struct se_dev_attrib *da = to_attrib(item); \
u32 val; \
int ret; \
\
ret = kstrtou32(page, 0, &val); \
if (ret < 0) \
return ret; \
da->_name = val; \
return count; \
}
DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
struct se_dev_attrib *da = to_attrib(item); \
bool flag; \
int ret; \
\
ret = kstrtobool(page, &flag); \
if (ret < 0) \
return ret; \
da->_name = flag; \
return count; \
}
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
size_t count) \
{ \
printk_once(KERN_WARNING \
"ignoring deprecated %s attribute\n", \
__stringify(_name)); \
return count; \
}
DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
{
const char *configname;
configname = config_item_name(&dev->dev_group.cg_item);
if (strlen(configname) >= INQUIRY_MODEL_LEN) {
pr_warn("dev[%p]: Backstore name '%s' is too long for "
"INQUIRY_MODEL, truncating to 15 characters\n", dev,
configname);
}
/*
* XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
* here without potentially breaking existing setups, so continue to
* truncate one byte shorter than what can be carried in INQUIRY.
*/
strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
}
static ssize_t emulate_model_alias_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
if (dev->export_count) {
pr_err("dev[%p]: Unable to change model alias"
" while export_count is %d\n",
dev, dev->export_count);
return -EINVAL;
}
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
if (flag) {
dev_set_t10_wwn_model_alias(dev);
} else {
strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
}
da->emulate_model_alias = flag;
return count;
}
static ssize_t emulate_write_cache_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (flag && da->da_dev->transport->get_write_cache) {
pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
}
da->emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (val != TARGET_UA_INTLCK_CTRL_CLEAR
&& val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
&& val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
pr_err("Illegal value %d\n", val);
return -EINVAL;
}
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
da->emulate_ua_intlck_ctrl = val;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
da->da_dev, val);
return count;
}
static ssize_t emulate_tas_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while"
" export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
da->emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
da->da_dev, flag ? "Enabled" : "Disabled");
return count;
}
static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
if (!dev->transport->configure_unmap ||
!dev->transport->configure_unmap(dev)) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
}
da->emulate_tpu = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t emulate_tpws_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
if (!dev->transport->configure_unmap ||
!dev->transport->configure_unmap(dev)) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
}
da->emulate_tpws = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t pi_prot_type_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
int old_prot = da->pi_prot_type, ret;
struct se_device *dev = da->da_dev;
u32 flag;
ret = kstrtou32(page, 0, &flag);
if (ret < 0)
return ret;
if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
pr_err("Illegal value %d for pi_prot_type\n", flag);
return -EINVAL;
}
if (flag == 2) {
pr_err("DIF TYPE2 protection currently not supported\n");
return -ENOSYS;
}
if (da->hw_pi_prot_type) {
pr_warn("DIF protection enabled on underlying hardware,"
" ignoring\n");
return count;
}
if (!dev->transport->init_prot || !dev->transport->free_prot) {
/* 0 is only allowed value for non-supporting backends */
if (flag == 0)
return count;
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name);
return -ENOSYS;
}
if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device PROT type while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
da->pi_prot_type = flag;
if (flag && !old_prot) {
ret = dev->transport->init_prot(dev);
if (ret) {
da->pi_prot_type = old_prot;
da->pi_prot_verify = (bool) da->pi_prot_type;
return ret;
}
} else if (!flag && old_prot) {
dev->transport->free_prot(dev);
}
da->pi_prot_verify = (bool) da->pi_prot_type;
pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
return count;
}
/* always zero, but attr needs to remain RW to avoid userspace breakage */
static ssize_t pi_prot_format_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "0\n");
}
static ssize_t pi_prot_format_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (!flag)
return count;
if (!dev->transport->format_prot) {
pr_err("DIF protection format not supported by backend %s\n",
dev->transport->name);
return -ENOSYS;
}
if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to format SE Device PROT type while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
ret = dev->transport->format_prot(dev);
if (ret)
return ret;
pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
return count;
}
static ssize_t pi_prot_verify_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (!flag) {
da->pi_prot_verify = flag;
return count;
}
if (da->hw_pi_prot_type) {
pr_warn("DIF protection enabled on underlying hardware,"
" ignoring\n");
return count;
}
if (!da->pi_prot_type) {
pr_warn("DIF protection not supported by backend, ignoring\n");
return count;
}
da->pi_prot_verify = flag;
return count;
}
static ssize_t force_pr_aptpl_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to set force_pr_aptpl while"
" export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
da->force_pr_aptpl = flag;
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
return count;
}
static ssize_t emulate_rest_reord_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (flag != 0) {
printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
" reordering not implemented\n", da->da_dev);
return -ENOSYS;
}
da->emulate_rest_reord = flag;
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
da->da_dev, flag);
return count;
}
static ssize_t unmap_zeroes_data_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" unmap_zeroes_data while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
if (!dev->transport->configure_unmap ||
!dev->transport->configure_unmap(dev)) {
pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
da->da_dev);
return -ENOSYS;
}
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag);
return count;
}
/*
* Note, this can only be called on unexported SE Device Object.
*/
static ssize_t queue_depth_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" export_count is %d\n",
dev, dev->export_count);
return -EINVAL;
}
if (!val) {
pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
return -EINVAL;
}
if (val > dev->dev_attrib.queue_depth) {
if (val > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, val,
dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
}
da->queue_depth = dev->queue_depth = val;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
return count;
}
static ssize_t optimal_sectors_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
if (val > da->hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than hw_max_sectors: %u\n",
da->da_dev, val, da->hw_max_sectors);
return -EINVAL;
}
da->optimal_sectors = val;
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
da->da_dev, val);
return count;
}
static ssize_t block_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size"
" while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
da->da_dev, val);
return -EINVAL;
}
da->block_size = val;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
da->da_dev, val);
return count;
}
static ssize_t alua_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
}
static ssize_t alua_support_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag, oldflag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
if (flag == oldflag)
return count;
if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
pr_err("dev[%p]: Unable to change SE Device alua_support:"
" alua_support has fixed value\n", dev);
return -ENOSYS;
}
if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
else
dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
return count;
}
static ssize_t pgr_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
}
static ssize_t pgr_support_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
bool flag, oldflag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
if (flag == oldflag)
return count;
if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
pr_err("dev[%p]: Unable to change SE Device pgr_support:"
" pgr_support has fixed value\n", dev);
return -ENOSYS;
}
if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
else
dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
return count;
}
static ssize_t emulate_rsoc_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
da->emulate_rsoc = flag;
pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n",
da->da_dev, flag);
return count;
}
CONFIGFS_ATTR(, emulate_model_alias);
CONFIGFS_ATTR(, emulate_dpo);
CONFIGFS_ATTR(, emulate_fua_write);
CONFIGFS_ATTR(, emulate_fua_read);
CONFIGFS_ATTR(, emulate_write_cache);
CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
CONFIGFS_ATTR(, emulate_tas);
CONFIGFS_ATTR(, emulate_tpu);
CONFIGFS_ATTR(, emulate_tpws);
CONFIGFS_ATTR(, emulate_caw);
CONFIGFS_ATTR(, emulate_3pc);
CONFIGFS_ATTR(, emulate_pr);
CONFIGFS_ATTR(, emulate_rsoc);
CONFIGFS_ATTR(, pi_prot_type);
CONFIGFS_ATTR_RO(, hw_pi_prot_type);
CONFIGFS_ATTR(, pi_prot_format);
CONFIGFS_ATTR(, pi_prot_verify);
CONFIGFS_ATTR(, enforce_pr_isids);
CONFIGFS_ATTR(, is_nonrot);
CONFIGFS_ATTR(, emulate_rest_reord);
CONFIGFS_ATTR(, force_pr_aptpl);
CONFIGFS_ATTR_RO(, hw_block_size);
CONFIGFS_ATTR(, block_size);
CONFIGFS_ATTR_RO(, hw_max_sectors);
CONFIGFS_ATTR(, optimal_sectors);
CONFIGFS_ATTR_RO(, hw_queue_depth);
CONFIGFS_ATTR(, queue_depth);
CONFIGFS_ATTR(, max_unmap_lba_count);
CONFIGFS_ATTR(, max_unmap_block_desc_count);
CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR(, alua_support);
CONFIGFS_ATTR(, pgr_support);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
* interpreter. Any backend using spc_parse_cdb should be using
* these.
*/
struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_emulate_model_alias,
&attr_emulate_dpo,
&attr_emulate_fua_write,
&attr_emulate_fua_read,
&attr_emulate_write_cache,
&attr_emulate_ua_intlck_ctrl,
&attr_emulate_tas,
&attr_emulate_tpu,
&attr_emulate_tpws,
&attr_emulate_caw,
&attr_emulate_3pc,
&attr_emulate_pr,
&attr_pi_prot_type,
&attr_hw_pi_prot_type,
&attr_pi_prot_format,
&attr_pi_prot_verify,
&attr_enforce_pr_isids,
&attr_is_nonrot,
&attr_emulate_rest_reord,
&attr_force_pr_aptpl,
&attr_hw_block_size,
&attr_block_size,
&attr_hw_max_sectors,
&attr_optimal_sectors,
&attr_hw_queue_depth,
&attr_queue_depth,
&attr_max_unmap_lba_count,
&attr_max_unmap_block_desc_count,
&attr_unmap_granularity,
&attr_unmap_granularity_alignment,
&attr_unmap_zeroes_data,
&attr_max_write_same_len,
&attr_alua_support,
&attr_pgr_support,
&attr_emulate_rsoc,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
/*
* Minimal dev_attrib attributes for devices passing through CDBs.
* In this case we only provide a few read-only attributes for
* backwards compatibility.
*/
struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_pi_prot_type,
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
&attr_emulate_pr,
&attr_alua_support,
&attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
/*
* pr related dev_attrib attributes for devices passing through CDBs,
* but allowing in core pr emulation.
*/
struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
&attr_enforce_pr_isids,
&attr_force_pr_aptpl,
NULL,
};
EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
/* End functions for struct config_item_type tb_dev_attrib_cit */
/* Start functions for struct config_item_type tb_dev_wwn_cit */
static struct t10_wwn *to_t10_wwn(struct config_item *item)
{
return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
}
static ssize_t target_check_inquiry_data(char *buf)
{
size_t len;
int i;
len = strlen(buf);
/*
* SPC 4.3.1:
* ASCII data fields shall contain only ASCII printable characters
* (i.e., code values 20h to 7Eh) and may be terminated with one or
* more ASCII null (00h) characters.
*/
for (i = 0; i < len; i++) {
if (buf[i] < 0x20 || buf[i] > 0x7E) {
pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
return -EINVAL;
}
}
return len;
}
/*
* STANDARD and VPD page 0x83 T10 Vendor Identification
*/
static ssize_t target_wwn_vendor_id_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
}
static ssize_t target_wwn_vendor_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
char *stripped = NULL;
ssize_t len;
ssize_t ret;
len = strscpy(buf, page, sizeof(buf));
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
if (len < 0 || len > INQUIRY_VENDOR_LEN) {
pr_err("Emulated T10 Vendor Identification exceeds"
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
"\n");
return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0)
return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set T10 Vendor Identification while"
" active %d exports exist\n", dev->export_count);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
" %s\n", dev->t10_wwn.vendor);
return count;
}
static ssize_t target_wwn_product_id_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
}
static ssize_t target_wwn_product_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_MODEL_LEN + 2];
char *stripped = NULL;
ssize_t len;
ssize_t ret;
len = strscpy(buf, page, sizeof(buf));
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
if (len < 0 || len > INQUIRY_MODEL_LEN) {
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
__stringify(INQUIRY_MODEL_LEN)
"\n");
return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0)
return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set T10 Model while active %d exports exist\n",
dev->export_count);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
dev->t10_wwn.model);
return count;
}
static ssize_t target_wwn_revision_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
}
static ssize_t target_wwn_revision_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_REVISION_LEN + 2];
char *stripped = NULL;
ssize_t len;
ssize_t ret;
len = strscpy(buf, page, sizeof(buf));
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
if (len < 0 || len > INQUIRY_REVISION_LEN) {
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
__stringify(INQUIRY_REVISION_LEN)
"\n");
return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0)
return ret;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set T10 Revision while active %d exports exist\n",
dev->export_count);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
dev->t10_wwn.revision);
return count;
}
static ssize_t
target_wwn_company_id_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%#08x\n",
to_t10_wwn(item)->company_id);
}
static ssize_t
target_wwn_company_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
u32 val;
int ret;
/*
* The IEEE COMPANY_ID field should contain a 24-bit canonical
* form OUI assigned by the IEEE.
*/
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (val >= 0x1000000)
return -EOVERFLOW;
/*
* Check to see if any active exports exist. If they do exist, fail
* here as changing this information on the fly (underneath the
* initiator side OS dependent multipath code) could cause negative
* effects.
*/
if (dev->export_count) {
pr_err("Unable to set Company ID while %u exports exist\n",
dev->export_count);
return -EINVAL;
}
t10_wwn->company_id = val;
pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
t10_wwn->company_id);
return count;
}
/*
* VPD page 0x80 Unit serial
*/
static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
char *page)
{
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&to_t10_wwn(item)->unit_serial[0]);
}
static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
/*
* If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
* from the struct scsi_device level firmware, do not allow
* VPD Unit Serial to be emulated.
*
* Note this struct scsi_device could also be emulating VPD
* information from its drivers/scsi LLD. But for now we assume
* it is doing 'the right thing' wrt a world wide unique
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
}
if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
pr_err("Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
}
/*
* Check to see if any active $FABRIC_MOD exports exist. If they
* do exist, fail here as changing this information on the fly
* (underneath the initiator side OS dependent multipath code)
* could cause negative effects.
*/
if (dev->export_count) {
pr_err("Unable to set VPD Unit Serial while"
" active %d $FABRIC_MOD exports exist\n",
dev->export_count);
return -EINVAL;
}
/*
* This currently assumes ASCII encoding for emulated VPD Unit Serial.
*
* Also, strip any newline added from the userspace
* echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
" %s\n", dev->t10_wwn.unit_serial);
return count;
}
/*
* VPD page 0x83 Protocol Identifier
*/
static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
char *page)
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct t10_vpd *vpd;
unsigned char buf[VPD_TMP_BUF_SIZE] = { };
ssize_t len = 0;
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
if (!vpd->protocol_identifier_set)
continue;
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
}
spin_unlock(&t10_wwn->t10_vpd_lock);
return len;
}
/*
* Generic wrapper for dumping VPD identifiers by association.
*/
#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
static ssize_t target_wwn_##_name##_show(struct config_item *item, \
char *page) \
{ \
struct t10_wwn *t10_wwn = to_t10_wwn(item); \
struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
spin_lock(&t10_wwn->t10_vpd_lock); \
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
if (vpd->association != _assoc) \
continue; \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
spin_unlock(&t10_wwn->t10_vpd_lock); \
\
return len; \
}
/* VPD page 0x83 Association: Logical Unit */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
/* VPD page 0x83 Association: Target Port */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
/* VPD page 0x83 Association: SCSI Target Device */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
CONFIGFS_ATTR(target_wwn_, vendor_id);
CONFIGFS_ATTR(target_wwn_, product_id);
CONFIGFS_ATTR(target_wwn_, revision);
CONFIGFS_ATTR(target_wwn_, company_id);
CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
&target_wwn_attr_vendor_id,
&target_wwn_attr_product_id,
&target_wwn_attr_revision,
&target_wwn_attr_company_id,
&target_wwn_attr_vpd_unit_serial,
&target_wwn_attr_vpd_protocol_identifier,
&target_wwn_attr_vpd_assoc_logical_unit,
&target_wwn_attr_vpd_assoc_target_port,
&target_wwn_attr_vpd_assoc_scsi_target_device,
NULL,
};
TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
/* End functions for struct config_item_type tb_dev_wwn_cit */
/* Start functions for struct config_item_type tb_dev_pr_cit */
static struct se_device *pr_to_dev(struct config_item *item)
{
return container_of(to_config_group(item), struct se_device,
dev_pr_group);
}
static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
char *page)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
char i_buf[PR_REG_ISID_ID_LEN] = { };
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg)
return sprintf(page, "No SPC-3 Reservation holder\n");
se_nacl = pr_reg->pr_reg_nacl;
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname, i_buf);
}
static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
char *page)
{
struct se_session *sess = dev->reservation_holder;
struct se_node_acl *se_nacl;
ssize_t len;
if (sess) {
se_nacl = sess->se_node_acl;
len = sprintf(page,
"SPC-2 Reservation: %s Initiator: %s\n",
se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname);
} else {
len = sprintf(page, "No SPC-2 Reservation holder\n");
}
return len;
}
static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
int ret;
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
ret = target_core_dev_pr_show_spc2_res(dev, page);
else
ret = target_core_dev_pr_show_spc3_res(dev, page);
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_pr_res_holder) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
} else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
len = sprintf(page, "SPC-3 Reservation: All Target"
" Ports registration\n");
} else {
len = sprintf(page, "SPC-3 Reservation: Single"
" Target Port registration\n");
}
spin_unlock(&dev->dev_reservation_lock);
return len;
}
static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
char *page)
{
return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
}
static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
struct se_node_acl *se_nacl;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
const struct target_core_fabric_ops *tfo;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
goto out_unlock;
}
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->fabric_name,
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
" Identifier Tag: %hu %s Portal Group Tag: %hu"
" %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
return len;
}
static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
const struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
ssize_t len = 0;
int reg_count = 0;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
spin_lock(&dev->t10_pr.registration_lock);
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
core_pr_dump_initiator_port(pr_reg, i_buf,
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
reg_count++;
}
spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(page+len, "None\n");
return len;
}
static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (pr_reg) {
len = sprintf(page, "SPC-3 Reservation Type: %s\n",
core_scsi3_pr_dump_type(pr_reg->pr_res_type));
} else {
len = sprintf(page, "No SPC-3 Reservation holder\n");
}
spin_unlock(&dev->dev_reservation_lock);
return len;
}
static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
}
static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
char *page)
{
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
}
enum {
Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
};
static match_table_t tokens = {
{Opt_initiator_fabric, "initiator_fabric=%s"},
{Opt_initiator_node, "initiator_node=%s"},
{Opt_initiator_sid, "initiator_sid=%s"},
{Opt_sa_res_key, "sa_res_key=%s"},
{Opt_res_holder, "res_holder=%d"},
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
{Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
{Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = pr_to_dev(item);
unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
unsigned char *t_fabric = NULL, *t_port = NULL;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
unsigned long long tmp_ll;
u64 sa_res_key = 0;
u64 mapped_lun = 0, target_lun = 0;
int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
u16 tpgt = 0;
u8 type = 0;
if (!dev->dev_attrib.emulate_pr ||
(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return count;
if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
}
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_initiator_fabric:
i_fabric = match_strdup(args);
if (!i_fabric) {
ret = -ENOMEM;
goto out;
}
break;
case Opt_initiator_node:
i_port = match_strdup(args);
if (!i_port) {
ret = -ENOMEM;
goto out;
}
if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
pr_err("APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_initiator_sid:
isid = match_strdup(args);
if (!isid) {
ret = -ENOMEM;
goto out;
}
if (strlen(isid) >= PR_REG_ISID_LEN) {
pr_err("APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_sa_res_key:
ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
}
sa_res_key = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Reservation
*/
case Opt_res_holder:
ret = match_int(args, &arg);
if (ret)
goto out;
res_holder = arg;
break;
case Opt_res_type:
ret = match_int(args, &arg);
if (ret)
goto out;
type = (u8)arg;
break;
case Opt_res_scope:
ret = match_int(args, &arg);
if (ret)
goto out;
break;
case Opt_res_all_tg_pt:
ret = match_int(args, &arg);
if (ret)
goto out;
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
*/
case Opt_target_fabric:
t_fabric = match_strdup(args);
if (!t_fabric) {
ret = -ENOMEM;
goto out;
}
break;
case Opt_target_node:
t_port = match_strdup(args);
if (!t_port) {
ret = -ENOMEM;
goto out;
}
if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
pr_err("APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_tpgt:
ret = match_int(args, &arg);
if (ret)
goto out;
tpgt = (u16)arg;
break;
case Opt_port_rtpi:
ret = match_int(args, &arg);
if (ret)
goto out;
break;
case Opt_target_lun:
ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
target_lun = (u64)tmp_ll;
break;
default:
break;
}
}
if (!i_port || !t_port || !sa_res_key) {
pr_err("Illegal parameters for APTPL registration\n");
ret = -EINVAL;
goto out;
}
if (res_holder && !(type)) {
pr_err("Illegal PR type: 0x%02x for reservation"
" holder\n", type);
ret = -EINVAL;
goto out;
}
ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
kfree(i_fabric);
kfree(i_port);
kfree(isid);
kfree(t_fabric);
kfree(t_port);
kfree(orig);
return (ret == 0) ? count : ret;
}
CONFIGFS_ATTR_RO(target_pr_, res_holder);
CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
CONFIGFS_ATTR_RO(target_pr_, res_type);
CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
&target_pr_attr_res_holder,
&target_pr_attr_res_pr_all_tgt_pts,
&target_pr_attr_res_pr_generation,
&target_pr_attr_res_pr_holder_tg_port,
&target_pr_attr_res_pr_registered_i_pts,
&target_pr_attr_res_pr_type,
&target_pr_attr_res_type,
&target_pr_attr_res_aptpl_active,
&target_pr_attr_res_aptpl_metadata,
NULL,
};
TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
/* End functions for struct config_item_type tb_dev_pr_cit */
/* Start functions for struct config_item_type tb_dev_cit */
static inline struct se_device *to_device(struct config_item *item)
{
return container_of(to_config_group(item), struct se_device, dev_group);
}
static ssize_t target_dev_info_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
int bl = 0;
ssize_t read_bytes = 0;
transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
read_bytes += dev->transport->show_configfs_dev_params(dev,
page+read_bytes);
return read_bytes;
}
static ssize_t target_dev_control_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
return dev->transport->set_configfs_dev_params(dev, page, count);
}
static ssize_t target_dev_alias_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
if (!(dev->dev_flags & DF_USING_ALIAS))
return 0;
return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
}
static ssize_t target_dev_alias_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
pr_err("alias count: %d exceeds"
" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
SE_DEV_ALIAS_LEN-1);
return -EINVAL;
}
read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
if (!read_bytes)
return -EINVAL;
if (dev->dev_alias[read_bytes - 1] == '\n')
dev->dev_alias[read_bytes - 1] = '\0';
dev->dev_flags |= DF_USING_ALIAS;
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
dev->dev_alias);
return read_bytes;
}
static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
if (!(dev->dev_flags & DF_USING_UDEV_PATH))
return 0;
return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
}
static ssize_t target_dev_udev_path_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
pr_err("udev_path count: %d exceeds"
" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
SE_UDEV_PATH_LEN-1);
return -EINVAL;
}
read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
if (!read_bytes)
return -EINVAL;
if (dev->udev_path[read_bytes - 1] == '\n')
dev->udev_path[read_bytes - 1] = '\0';
dev->dev_flags |= DF_USING_UDEV_PATH;
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
dev->udev_path);
return read_bytes;
}
static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
static ssize_t target_dev_enable_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
char *ptr;
int ret;
ptr = strstr(page, "1");
if (!ptr) {
pr_err("For dev_enable ops, only valid value"
" is \"1\"\n");
return -EINVAL;
}
ret = target_configure_device(dev);
if (ret)
return ret;
return count;
}
static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return 0;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (lu_gp) {
lu_ci = &lu_gp->lu_gp_group.cg_item;
len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
config_item_name(lu_ci), lu_gp->lu_gp_id);
}
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
return len;
}
static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
unsigned char buf[LU_GROUP_NAME_BUF] = { };
int move = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return count;
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
memcpy(buf, page, count);
/*
* Any ALUA logical unit alias besides "NULL" means we will be
* making a new group association.
*/
if (strcmp(strstrip(buf), "NULL")) {
/*
* core_alua_get_lu_gp_by_name() will increment reference to
* struct t10_alua_lu_gp. This reference is released with
* core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
if (!lu_gp_new)
return -ENODEV;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (lu_gp) {
/*
* Clearing an existing lu_gp association, and replacing
* with NULL
*/
if (!lu_gp_new) {
pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp->lu_gp_group.cg_item),
lu_gp->lu_gp_id);
__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
return count;
}
/*
* Removing existing association of lu_gp_mem with lu_gp
*/
__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
move = 1;
}
/*
* Associate lu_gp_mem with lu_gp_new.
*/
__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp_new->lu_gp_group.cg_item),
lu_gp_new->lu_gp_id);
core_alua_put_lu_gp_from_name(lu_gp_new);
return count;
}
static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
struct t10_alua_lba_map *map;
struct t10_alua_lba_map_member *mem;
char *b = page;
int bl = 0;
char state;
spin_lock(&dev->t10_alua.lba_map_lock);
if (!list_empty(&dev->t10_alua.lba_map_list))
bl += sprintf(b + bl, "%u %u\n",
dev->t10_alua.lba_map_segment_size,
dev->t10_alua.lba_map_segment_multiplier);
list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
bl += sprintf(b + bl, "%llu %llu",
map->lba_map_first_lba, map->lba_map_last_lba);
list_for_each_entry(mem, &map->lba_map_mem_list,
lba_map_mem_list) {
switch (mem->lba_map_mem_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
state = 'O';
break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
state = 'A';
break;
case ALUA_ACCESS_STATE_STANDBY:
state = 'S';
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
state = 'U';
break;
default:
state = '.';
break;
}
bl += sprintf(b + bl, " %d:%c",
mem->lba_map_mem_alua_pg_id, state);
}
bl += sprintf(b + bl, "\n");
}
spin_unlock(&dev->t10_alua.lba_map_lock);
return bl;
}
static ssize_t target_dev_lba_map_store(struct config_item *item,
const char *page, size_t count)
{
struct se_device *dev = to_device(item);
struct t10_alua_lba_map *lba_map = NULL;
struct list_head lba_list;
char *map_entries, *orig, *ptr;
char state;
int pg_num = -1, pg;
int ret = 0, num = 0, pg_id, alua_state;
unsigned long start_lba = -1, end_lba = -1;
unsigned long segment_size = -1, segment_mult = -1;
orig = map_entries = kstrdup(page, GFP_KERNEL);
if (!map_entries)
return -ENOMEM;
INIT_LIST_HEAD(&lba_list);
while ((ptr = strsep(&map_entries, "\n")) != NULL) {
if (!*ptr)
continue;
if (num == 0) {
if (sscanf(ptr, "%lu %lu\n",
&segment_size, &segment_mult) != 2) {
pr_err("Invalid line %d\n", num);
ret = -EINVAL;
break;
}
num++;
continue;
}
if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
pr_err("Invalid line %d\n", num);
ret = -EINVAL;
break;
}
ptr = strchr(ptr, ' ');
if (!ptr) {
pr_err("Invalid line %d, missing end lba\n", num);
ret = -EINVAL;
break;
}
ptr++;
ptr = strchr(ptr, ' ');
if (!ptr) {
pr_err("Invalid line %d, missing state definitions\n",
num);
ret = -EINVAL;
break;
}
ptr++;
lba_map = core_alua_allocate_lba_map(&lba_list,
start_lba, end_lba);
if (IS_ERR(lba_map)) {
ret = PTR_ERR(lba_map);
break;
}
pg = 0;
while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
switch (state) {
case 'O':
alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
break;
case 'A':
alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
break;
case 'S':
alua_state = ALUA_ACCESS_STATE_STANDBY;
break;
case 'U':
alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
break;
default:
pr_err("Invalid ALUA state '%c'\n", state);
ret = -EINVAL;
goto out;
}
ret = core_alua_allocate_lba_map_mem(lba_map,
pg_id, alua_state);
if (ret) {
pr_err("Invalid target descriptor %d:%c "
"at line %d\n",
pg_id, state, num);
break;
}
pg++;
ptr = strchr(ptr, ' ');
if (ptr)
ptr++;
else
break;
}
if (pg_num == -1)
pg_num = pg;
else if (pg != pg_num) {
pr_err("Only %d from %d port groups definitions "
"at line %d\n", pg, pg_num, num);
ret = -EINVAL;
break;
}
num++;
}
out:
if (ret) {
core_alua_free_lba_map(&lba_list);
count = ret;
} else
core_alua_set_lba_map(dev, &lba_list,
segment_size, segment_mult);
kfree(orig);
return count;
}
CONFIGFS_ATTR_RO(target_dev_, info);
CONFIGFS_ATTR_WO(target_dev_, control);
CONFIGFS_ATTR(target_dev_, alias);
CONFIGFS_ATTR(target_dev_, udev_path);
CONFIGFS_ATTR(target_dev_, enable);
CONFIGFS_ATTR(target_dev_, alua_lu_gp);
CONFIGFS_ATTR(target_dev_, lba_map);
static struct configfs_attribute *target_core_dev_attrs[] = {
&target_dev_attr_info,
&target_dev_attr_control,
&target_dev_attr_alias,
&target_dev_attr_udev_path,
&target_dev_attr_enable,
&target_dev_attr_alua_lu_gp,
&target_dev_attr_lba_map,
NULL,
};
static void target_core_dev_release(struct config_item *item)
{
struct config_group *dev_cg = to_config_group(item);
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
target_free_device(dev);
}
/*
* Used in target_core_fabric_configfs.c to verify valid se_device symlink
* within target_fabric_port_link()
*/
struct configfs_item_operations target_core_dev_item_ops = {
.release = target_core_dev_release,
};
TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
/* End functions for struct config_item_type tb_dev_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
{
return container_of(to_config_group(item), struct t10_alua_lu_gp,
lu_gp_group);
}
static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
if (!lu_gp->lu_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
}
static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
unsigned long lu_gp_id;
int ret;
ret = kstrtoul(page, 0, &lu_gp_id);
if (ret < 0) {
pr_err("kstrtoul() returned %d for"
" lu_gp_id\n", ret);
return ret;
}
if (lu_gp_id > 0x0000ffff) {
pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", lu_gp_id);
return -EINVAL;
}
ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
if (ret < 0)
return -EINVAL;
pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
" Group: core/alua/lu_gps/%s to ID: %hu\n",
config_item_name(&alua_lu_gp_cg->cg_item),
lu_gp->lu_gp_id);
return count;
}
static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct se_device *dev;
struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF] = { };
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
return len;
}
CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
CONFIGFS_ATTR_RO(target_lu_gp_, members);
static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
&target_lu_gp_attr_lu_gp_id,
&target_lu_gp_attr_members,
NULL,
};
static void target_core_alua_lu_gp_release(struct config_item *item)
{
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
core_alua_free_lu_gp(lu_gp);
}
static struct configfs_item_operations target_core_alua_lu_gp_ops = {
.release = target_core_alua_lu_gp_release,
};
static const struct config_item_type target_core_alua_lu_gp_cit = {
.ct_item_ops = &target_core_alua_lu_gp_ops,
.ct_attrs = target_core_alua_lu_gp_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
static struct config_group *target_core_alua_create_lu_gp(
struct config_group *group,
const char *name)
{
struct t10_alua_lu_gp *lu_gp;
struct config_group *alua_lu_gp_cg = NULL;
struct config_item *alua_lu_gp_ci = NULL;
lu_gp = core_alua_allocate_lu_gp(name, 0);
if (IS_ERR(lu_gp))
return NULL;
alua_lu_gp_cg = &lu_gp->lu_gp_group;
alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
config_group_init_type_name(alua_lu_gp_cg, name,
&target_core_alua_lu_gp_cit);
pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
" Group: core/alua/lu_gps/%s\n",
config_item_name(alua_lu_gp_ci));
return alua_lu_gp_cg;
}
static void target_core_alua_drop_lu_gp(
struct config_group *group,
struct config_item *item)
{
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
/*
* core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
* -> target_core_alua_lu_gp_release()
*/
config_item_put(item);
}
static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
.make_group = &target_core_alua_create_lu_gp,
.drop_item = &target_core_alua_drop_lu_gp,
};
static const struct config_item_type target_core_alua_lu_gps_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_alua_lu_gps_group_ops,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
{
return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
tg_pt_gp_group);
}
static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
char *page)
{
return sprintf(page, "%d\n",
to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
}
static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
unsigned long tmp;
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
return -EINVAL;
}
if (!target_dev_configured(dev)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
}
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
return ret;
}
new_state = (int)tmp;
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
pr_err("Unable to process implicit configfs ALUA"
" transition while TPGS_IMPLICIT_ALUA is disabled\n");
return -EINVAL;
}
if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
/* LBA DEPENDENT is only allowed with implicit ALUA */
pr_err("Unable to process implicit configfs ALUA transition"
" while explicit ALUA management is enabled\n");
return -EINVAL;
}
ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
return (!ret) ? count : -EINVAL;
}
static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
char *page)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
return sprintf(page, "%s\n",
core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
}
static ssize_t target_tg_pt_gp_alua_access_status_store(
struct config_item *item, const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
unsigned long tmp;
int new_status, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
return -EINVAL;
}
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
return ret;
}
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
return count;
}
static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
char *page)
{
return core_alua_show_access_type(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
}
#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
struct config_item *item, char *p) \
{ \
struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
return sprintf(p, "%d\n", \
!!(t->tg_pt_gp_alua_supported_states & _bit)); \
} \
\
static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
struct config_item *item, const char *p, size_t c) \
{ \
struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
unsigned long tmp; \
int ret; \
\
if (!t->tg_pt_gp_valid_id) { \
pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
return -EINVAL; \
} \
\
ret = kstrtoul(p, 0, &tmp); \
if (ret < 0) { \
pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
return -EINVAL; \
} \
if (tmp > 1) { \
pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
return -EINVAL; \
} \
if (tmp) \
t->tg_pt_gp_alua_supported_states |= _bit; \
else \
t->tg_pt_gp_alua_supported_states &= ~_bit; \
\
return c; \
}
ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
static ssize_t target_tg_pt_gp_alua_write_metadata_show(
struct config_item *item, char *page)
{
return sprintf(page, "%d\n",
to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
}
static ssize_t target_tg_pt_gp_alua_write_metadata_store(
struct config_item *item, const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_write_metadata\n");
return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_write_metadata:"
" %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
return count;
}
static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
char *page)
{
return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
count);
}
static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
char *page)
{
return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
count);
}
static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
struct config_item *item, char *page)
{
return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
struct config_item *item, const char *page, size_t count)
{
return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
count);
}
static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
char *page)
{
return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
}
static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
const char *page, size_t count)
{
return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
}
static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
char *page)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
if (!tg_pt_gp->tg_pt_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
}
static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
const char *page, size_t count)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
unsigned long tg_pt_gp_id;
int ret;
ret = kstrtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
page);
return ret;
}
if (tg_pt_gp_id > 0x0000ffff) {
pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
tg_pt_gp_id);
return -EINVAL;
}
ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
if (ret < 0)
return -EINVAL;
pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
"core/alua/tg_pt_gps/%s to ID: %hu\n",
config_item_name(&alua_tg_pt_gp_cg->cg_item),
tg_pt_gp->tg_pt_gp_id);
return count;
}
static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
char *page)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_lun *lun;
ssize_t len = 0, cur_len;
unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
lun_tg_pt_gp_link) {
struct se_portal_group *tpg = lun->lun_tpg;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
"/%s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
return len;
}
CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_tg_pt_gp_attr_alua_access_state,
&target_tg_pt_gp_attr_alua_access_status,
&target_tg_pt_gp_attr_alua_access_type,
&target_tg_pt_gp_attr_alua_support_transitioning,
&target_tg_pt_gp_attr_alua_support_offline,
&target_tg_pt_gp_attr_alua_support_lba_dependent,
&target_tg_pt_gp_attr_alua_support_unavailable,
&target_tg_pt_gp_attr_alua_support_standby,
&target_tg_pt_gp_attr_alua_support_active_nonoptimized,
&target_tg_pt_gp_attr_alua_support_active_optimized,
&target_tg_pt_gp_attr_alua_write_metadata,
&target_tg_pt_gp_attr_nonop_delay_msecs,
&target_tg_pt_gp_attr_trans_delay_msecs,
&target_tg_pt_gp_attr_implicit_trans_secs,
&target_tg_pt_gp_attr_preferred,
&target_tg_pt_gp_attr_tg_pt_gp_id,
&target_tg_pt_gp_attr_members,
NULL,
};
static void target_core_alua_tg_pt_gp_release(struct config_item *item)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
core_alua_free_tg_pt_gp(tg_pt_gp);
}
static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
.release = target_core_alua_tg_pt_gp_release,
};
static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
.ct_item_ops = &target_core_alua_tg_pt_gp_ops,
.ct_attrs = target_core_alua_tg_pt_gp_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_group *group,
const char *name)
{
struct t10_alua *alua = container_of(group, struct t10_alua,
alua_tg_pt_gps_group);
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_group *alua_tg_pt_gp_cg = NULL;
struct config_item *alua_tg_pt_gp_ci = NULL;
tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
if (!tg_pt_gp)
return NULL;
alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
config_group_init_type_name(alua_tg_pt_gp_cg, name,
&target_core_alua_tg_pt_gp_cit);
pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
" Group: alua/tg_pt_gps/%s\n",
config_item_name(alua_tg_pt_gp_ci));
return alua_tg_pt_gp_cg;
}
static void target_core_alua_drop_tg_pt_gp(
struct config_group *group,
struct config_item *item)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
/*
* core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
* -> target_core_alua_tg_pt_gp_release().
*/
config_item_put(item);
}
static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
.make_group = &target_core_alua_create_tg_pt_gp,
.drop_item = &target_core_alua_drop_tg_pt_gp,
};
TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
/* Start functions for struct config_item_type target_core_alua_cit */
/*
* target_core_alua_cit is a ConfigFS group that lives under
* /sys/kernel/config/target/core/alua. There are default groups
* core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
* target_core_alua_cit in target_core_init_configfs() below.
*/
static const struct config_item_type target_core_alua_cit = {
.ct_item_ops = NULL,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_cit */
/* Start functions for struct config_item_type tb_dev_stat_cit */
static struct config_group *target_core_stat_mkdir(
struct config_group *group,
const char *name)
{
return ERR_PTR(-ENOSYS);
}
static void target_core_stat_rmdir(
struct config_group *group,
struct config_item *item)
{
return;
}
static struct configfs_group_operations target_core_stat_group_ops = {
.make_group = &target_core_stat_mkdir,
.drop_item = &target_core_stat_rmdir,
};
TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
/* End functions for struct config_item_type tb_dev_stat_cit */
/* Start functions for struct config_item_type target_core_hba_cit */
static struct config_group *target_core_make_subdev(
struct config_group *group,
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
struct target_backend *tb = hba->backend;
struct se_device *dev;
int errno = -ENOMEM, ret;
ret = mutex_lock_interruptible(&hba->hba_access_mutex);
if (ret)
return ERR_PTR(ret);
dev = target_alloc_device(hba, name);
if (!dev)
goto out_unlock;
config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
config_group_init_type_name(&dev->dev_action_group, "action",
&tb->tb_dev_action_cit);
configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&tb->tb_dev_attrib_cit);
configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
config_group_init_type_name(&dev->dev_pr_group, "pr",
&tb->tb_dev_pr_cit);
configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
&tb->tb_dev_wwn_cit);
configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
&dev->dev_group);
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
&dev->dev_group);
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
"statistics", &tb->tb_dev_stat_cit);
configfs_add_default_group(&dev->dev_stat_grps.stat_group,
&dev->dev_group);
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
goto out_free_device;
dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
&dev->t10_alua.alua_tg_pt_gps_group);
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
target_stat_setup_dev_default_groups(dev);
mutex_lock(&target_devices_lock);
target_devices++;
mutex_unlock(&target_devices_lock);
mutex_unlock(&hba->hba_access_mutex);
return &dev->dev_group;
out_free_device:
target_free_device(dev);
out_unlock:
mutex_unlock(&hba->hba_access_mutex);
return ERR_PTR(errno);
}
static void target_core_drop_subdev(
struct config_group *group,
struct config_item *item)
{
struct config_group *dev_cg = to_config_group(item);
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
/*
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
dev->t10_alua.default_tg_pt_gp = NULL;
configfs_remove_default_groups(dev_cg);
/*
* se_dev is released from target_core_dev_item_ops->release()
*/
config_item_put(item);
mutex_lock(&target_devices_lock);
target_devices--;
mutex_unlock(&target_devices_lock);
mutex_unlock(&hba->hba_access_mutex);
}
static struct configfs_group_operations target_core_hba_group_ops = {
.make_group = target_core_make_subdev,
.drop_item = target_core_drop_subdev,
};
static inline struct se_hba *to_hba(struct config_item *item)
{
return container_of(to_config_group(item), struct se_hba, hba_group);
}
static ssize_t target_hba_info_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_hba(item);
return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
hba->hba_id, hba->backend->ops->name,
TARGET_CORE_VERSION);
}
static ssize_t target_hba_mode_show(struct config_item *item, char *page)
{
struct se_hba *hba = to_hba(item);
int hba_mode = 0;
if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
hba_mode = 1;
return sprintf(page, "%d\n", hba_mode);
}
static ssize_t target_hba_mode_store(struct config_item *item,
const char *page, size_t count)
{
struct se_hba *hba = to_hba(item);
unsigned long mode_flag;
int ret;
if (hba->backend->ops->pmode_enable_hba == NULL)
return -EINVAL;
ret = kstrtoul(page, 0, &mode_flag);
if (ret < 0) {
pr_err("Unable to extract hba mode flag: %d\n", ret);
return ret;
}
if (hba->dev_count) {
pr_err("Unable to set hba_mode with active devices\n");
return -EINVAL;
}
ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
return -EINVAL;
if (ret > 0)
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
else if (ret == 0)
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
return count;
}
CONFIGFS_ATTR_RO(target_, hba_info);
CONFIGFS_ATTR(target_, hba_mode);
static void target_core_hba_release(struct config_item *item)
{
struct se_hba *hba = container_of(to_config_group(item),
struct se_hba, hba_group);
core_delete_hba(hba);
}
static struct configfs_attribute *target_core_hba_attrs[] = {
&target_attr_hba_info,
&target_attr_hba_mode,
NULL,
};
static struct configfs_item_operations target_core_hba_item_ops = {
.release = target_core_hba_release,
};
static const struct config_item_type target_core_hba_cit = {
.ct_item_ops = &target_core_hba_item_ops,
.ct_group_ops = &target_core_hba_group_ops,
.ct_attrs = target_core_hba_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *target_core_call_addhbatotarget(
struct config_group *group,
const char *name)
{
char *se_plugin_str, *str, *str2;
struct se_hba *hba;
char buf[TARGET_CORE_NAME_MAX_LEN] = { };
unsigned long plugin_dep_id = 0;
int ret;
if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
return ERR_PTR(-ENAMETOOLONG);
}
snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
str = strstr(buf, "_");
if (!str) {
pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
return ERR_PTR(-EINVAL);
}
se_plugin_str = buf;
/*
* Special case for subsystem plugins that have "_" in their names.
* Namely rd_direct and rd_mcp..
*/
str2 = strstr(str+1, "_");
if (str2) {
*str2 = '\0'; /* Terminate for *se_plugin_str */
str2++; /* Skip to start of plugin dependent ID */
str = str2;
} else {
*str = '\0'; /* Terminate for *se_plugin_str */
str++; /* Skip to start of plugin dependent ID */
}
ret = kstrtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
pr_err("kstrtoul() returned %d for"
" plugin_dep_id\n", ret);
return ERR_PTR(ret);
}
/*
* Load up TCM subsystem plugins if they have not already been loaded.
*/
transport_subsystem_check_init();
hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
if (IS_ERR(hba))
return ERR_CAST(hba);
config_group_init_type_name(&hba->hba_group, name,
&target_core_hba_cit);
return &hba->hba_group;
}
static void target_core_call_delhbafromtarget(
struct config_group *group,
struct config_item *item)
{
/*
* core_delete_hba() is called from target_core_hba_item_ops->release()
* -> target_core_hba_release()
*/
config_item_put(item);
}
static struct configfs_group_operations target_core_group_ops = {
.make_group = target_core_call_addhbatotarget,
.drop_item = target_core_call_delhbafromtarget,
};
static const struct config_item_type target_core_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_group_ops,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
};
/* Stop functions for struct config_item_type target_core_hba_cit */
void target_setup_backend_cits(struct target_backend *tb)
{
target_core_setup_dev_cit(tb);
target_core_setup_dev_action_cit(tb);
target_core_setup_dev_attrib_cit(tb);
target_core_setup_dev_pr_cit(tb);
target_core_setup_dev_wwn_cit(tb);
target_core_setup_dev_alua_tg_pt_gps_cit(tb);
target_core_setup_dev_stat_cit(tb);
}
static void target_init_dbroot(void)
{
struct file *fp;
snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
fp = filp_open(db_root_stage, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("db_root: cannot open: %s\n", db_root_stage);
return;
}
if (!S_ISDIR(file_inode(fp)->i_mode)) {
filp_close(fp, NULL);
pr_err("db_root: not a valid directory: %s\n", db_root_stage);
return;
}
filp_close(fp, NULL);
strncpy(db_root, db_root_stage, DB_ROOT_LEN);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
}
static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
int ret;
pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
ret = init_se_kmem_caches();
if (ret < 0)
return ret;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
config_group_init_type_name(&target_core_hbagroup, "core",
&target_core_cit);
configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
configfs_add_default_group(&alua_group, &target_core_hbagroup);
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
&target_core_alua_lu_gps_cit);
configfs_add_default_group(&alua_lu_gps_group, &alua_group);
/*
* Add core/alua/lu_gps/default_lu_gp
*/
lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
if (IS_ERR(lu_gp)) {
ret = -ENOMEM;
goto out_global;
}
config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
&target_core_alua_lu_gp_cit);
configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
ret = configfs_register_subsystem(subsys);
if (ret < 0) {
pr_err("Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_global;
}
pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
" Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
* Register built-in RAMDISK subsystem logic for virtual LUN 0
*/
ret = rd_module_init();
if (ret < 0)
goto out;
ret = core_dev_setup_virtual_lun0();
if (ret < 0)
goto out;
ret = target_xcopy_setup_pt();
if (ret < 0)
goto out;
target_init_dbroot();
return 0;
out:
configfs_unregister_subsystem(subsys);
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
if (default_lu_gp) {
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
}
release_se_kmem_caches();
return ret;
}
static void __exit target_core_exit_configfs(void)
{
configfs_remove_default_groups(&alua_lu_gps_group);
configfs_remove_default_groups(&alua_group);
configfs_remove_default_groups(&target_core_hbagroup);
/*
* We expect subsys->su_group.default_groups to be released
* by configfs subsystem provider logic..
*/
configfs_unregister_subsystem(&target_core_fabrics);
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
core_dev_release_virtual_lun0();
rd_module_exit();
target_xcopy_release_pt();
release_se_kmem_caches();
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
MODULE_AUTHOR("[email protected]");
MODULE_LICENSE("GPL");
module_init(target_core_init_configfs);
module_exit(target_core_exit_configfs);
|
linux-master
|
drivers/target/target_core_configfs.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SCSI Primary Commands (SPC) parsing and emulation.
*
* (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
#include "target_core_xcopy.h"
static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
/*
* Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
*/
buf[5] = 0x80;
/*
* Set TPGS field for explicit and/or implicit ALUA access type
* and opteration.
*
* See spc4r17 section 6.4.2 Table 135
*/
rcu_read_lock();
tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp)
buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
rcu_read_unlock();
}
static u16
spc_find_scsi_transport_vd(int proto_id)
{
switch (proto_id) {
case SCSI_PROTOCOL_FCP:
return SCSI_VERSION_DESCRIPTOR_FCP4;
case SCSI_PROTOCOL_ISCSI:
return SCSI_VERSION_DESCRIPTOR_ISCSI;
case SCSI_PROTOCOL_SAS:
return SCSI_VERSION_DESCRIPTOR_SAS3;
case SCSI_PROTOCOL_SBP:
return SCSI_VERSION_DESCRIPTOR_SBP3;
case SCSI_PROTOCOL_SRP:
return SCSI_VERSION_DESCRIPTOR_SRP;
default:
pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
" transport PROTOCOL IDENTIFIER %#x\n", proto_id);
return 0;
}
}
sense_reason_t
spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_portal_group *tpg = lun->lun_tpg;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
/* Set RMB (removable media) for tape devices */
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80;
buf[2] = 0x06; /* SPC-4 */
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
*
* SPC4 says:
* A RESPONSE DATA FORMAT field set to 2h indicates that the
* standard INQUIRY data is in the format defined in this
* standard. Response data format values less than 2h are
* obsolete. Response data format values greater than 2h are
* reserved.
*/
buf[3] = 2;
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
spc_fill_alua_data(lun, buf);
/*
* Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
*/
if (dev->dev_attrib.emulate_3pc)
buf[5] |= 0x8;
/*
* Set Protection (PROTECT) bit when DIF has been enabled on the
* device, and the fabric supports VERIFY + PASS. Also report
* PROTECT=1 if sess_prot_type has been configured to allow T10-PI
* to unprotected devices.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
buf[5] |= 0x1;
}
/*
* Set MULTIP bit to indicate presence of multiple SCSI target ports
*/
if (dev->export_count > 1)
buf[6] |= 0x10;
buf[7] = 0x2; /* CmdQue=1 */
/*
* ASCII data fields described as being left-aligned shall have any
* unused bytes at the end of the field (i.e., highest offset) and the
* unused bytes shall be filled with ASCII space characters (20h).
*/
memset(&buf[8], 0x20,
INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
memcpy(&buf[8], dev->t10_wwn.vendor,
strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
memcpy(&buf[16], dev->t10_wwn.model,
strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
memcpy(&buf[32], dev->t10_wwn.revision,
strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
/*
* Set the VERSION DESCRIPTOR fields
*/
put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
buf[4] = 91; /* Set additional length to 91 */
return 0;
}
EXPORT_SYMBOL(spc_emulate_inquiry_std);
/* unit serial number */
static sense_reason_t
spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u16 len;
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
return 0;
}
/*
* Generate NAA IEEE Registered Extended designator
*/
void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
unsigned char *buf)
{
unsigned char *p = &dev->t10_wwn.unit_serial[0];
u32 company_id = dev->t10_wwn.company_id;
int cnt, off = 0;
bool next = true;
/*
* Start NAA IEEE Registered Extended Identifier/Designator
*/
buf[off] = 0x6 << 4;
/* IEEE COMPANY_ID */
buf[off++] |= (company_id >> 20) & 0xf;
buf[off++] = (company_id >> 12) & 0xff;
buf[off++] = (company_id >> 4) & 0xff;
buf[off] = (company_id & 0xf) << 4;
/*
* Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
* byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
* format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
* to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
* NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
* per device uniqeness.
*/
for (cnt = off + 13; *p && off < cnt; p++) {
int val = hex_to_bin(*p);
if (val < 0)
continue;
if (next) {
next = false;
buf[off++] |= val;
} else {
next = true;
buf[off] = val << 4;
}
}
}
/*
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
sense_reason_t
spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
unsigned char *prod = &dev->t10_wwn.model[0];
u32 off = 0;
u16 len = 0, id_len;
off = 4;
/*
* NAA IEEE Registered Extended Assigned designator format, see
* spc4r17 section 7.7.3.6.5
*
* We depend upon a target_core_mod/ConfigFS provided
* /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
* value in order to return the NAA id.
*/
if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
goto check_t10_vend_desc;
/* CODE SET == Binary */
buf[off++] = 0x1;
/* Set ASSOCIATION == addressed logical unit: 0)b */
buf[off] = 0x00;
/* Identifier/Designator type == NAA identifier */
buf[off++] |= 0x3;
off++;
/* Identifier/Designator length */
buf[off++] = 0x10;
/* NAA IEEE Registered Extended designator */
spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
check_t10_vend_desc:
/*
* T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
*/
id_len = 8; /* For Vendor field */
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)
id_len += sprintf(&buf[off+12], "%s:%s", prod,
&dev->t10_wwn.unit_serial[0]);
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
/* left align Vendor ID and pad with spaces */
memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
memcpy(&buf[off+4], dev->t10_wwn.vendor,
strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
buf[off+3] = id_len;
/* Header size for Designation descriptor */
len += (id_len + 4);
off += (id_len + 4);
if (1) {
struct t10_alua_lu_gp *lu_gp;
u32 padding, scsi_name_len, scsi_target_len;
u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0;
u16 tpgt;
tpg = lun->lun_tpg;
/*
* Relative target port identifer, see spc4r17
* section 7.7.3.7
*
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Relative target port identifer */
buf[off++] |= 0x4;
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
/* Skip over Obsolete field in RTPI payload
* in Table 472 */
off += 2;
put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* Target port group identifier, see spc4r17
* section 7.7.3.8
*
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
rcu_read_lock();
tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
rcu_read_unlock();
goto check_lu_gp;
}
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
rcu_read_unlock();
buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Target port group identifier */
buf[off++] |= 0x5;
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
put_unaligned_be16(tg_pt_gp_id, &buf[off]);
off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* Logical Unit Group identifier, see spc4r17
* section 7.7.3.8
*/
check_lu_gp:
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (!lu_gp) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name;
}
lu_gp_id = lu_gp->lu_gp_id;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
buf[off++] |= 0x1; /* CODE SET == Binary */
/* DESIGNATOR TYPE == Logical Unit Group identifier */
buf[off++] |= 0x6;
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
put_unaligned_be16(lu_gp_id, &buf[off]);
off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* SCSI name string designator, see spc4r17
* section 7.7.3.11
*
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
check_scsi_name:
buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == SCSI name string */
buf[off++] |= 0x8;
off += 2; /* Skip over Reserved and length */
/*
* SCSI name string identifer containing, $FABRIC_MOD
* dependent information. For LIO-Target and iSCSI
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
* UTF-8 encoding.
*/
tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
scsi_name_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
* NAME STRING field contains a UTF-8 format string.
* The number of bytes in the SCSI NAME STRING field
* (i.e., the value in the DESIGNATOR LENGTH field)
* shall be no larger than 256 and shall be a multiple
* of four.
*/
padding = ((-scsi_name_len) & 3);
if (padding)
scsi_name_len += padding;
if (scsi_name_len > 256)
scsi_name_len = 256;
buf[off-1] = scsi_name_len;
off += scsi_name_len;
/* Header size + Designation descriptor */
len += (scsi_name_len + 4);
/*
* Target device designator
*/
buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target device: 10b */
buf[off] |= 0x20;
/* DESIGNATOR TYPE == SCSI name string */
buf[off++] |= 0x8;
off += 2; /* Skip over Reserved and length */
/*
* SCSI name string identifer containing, $FABRIC_MOD
* dependent information. For LIO-Target and iSCSI
* Target Port, this means "<iSCSI name>" in
* UTF-8 encoding.
*/
scsi_target_len = sprintf(&buf[off], "%s",
tpg->se_tpg_tfo->tpg_get_wwn(tpg));
scsi_target_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
* NAME STRING field contains a UTF-8 format string.
* The number of bytes in the SCSI NAME STRING field
* (i.e., the value in the DESIGNATOR LENGTH field)
* shall be no larger than 256 and shall be a multiple
* of four.
*/
padding = ((-scsi_target_len) & 3);
if (padding)
scsi_target_len += padding;
if (scsi_target_len > 256)
scsi_target_len = 256;
buf[off-1] = scsi_target_len;
off += scsi_target_len;
/* Header size + Designation descriptor */
len += (scsi_target_len + 4);
}
put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
return 0;
}
EXPORT_SYMBOL(spc_emulate_evpd_83);
/* Extended INQUIRY Data VPD Page */
static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
buf[3] = 0x3c;
/*
* Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
* only for TYPE3 protection.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
buf[4] = 0x5;
else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
buf[4] = 0x4;
}
/* logical unit supports type 1 and type 3 protection */
if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
(sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
(dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
buf[4] |= (0x3 << 3);
}
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
if (target_check_wce(dev))
buf[6] = 0x01;
/* If an LBA map is present set R_SUP */
spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
if (!list_empty(&dev->t10_alua.lba_map_list))
buf[8] = 0x10;
spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
return 0;
}
/* Block Limits VPD page */
static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 mtl = 0;
int have_tp = 0, opt, min;
u32 io_max_blocks;
/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
/*
* Set MAXIMUM COMPARE AND WRITE LENGTH
*/
if (dev->dev_attrib.emulate_caw)
buf[5] = 0x01;
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
else
put_unaligned_be16(1, &buf[6]);
/*
* Set MAXIMUM TRANSFER LENGTH
*
* XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
* enforcing maximum HW scatter-gather-list entry limit
*/
if (cmd->se_tfo->max_data_sg_nents) {
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
dev->dev_attrib.block_size;
}
io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size,
dev->dev_attrib.block_size);
put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
*/
if (!have_tp)
goto max_write_same;
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
&buf[32]);
if (dev->dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
/*
* MAXIMUM WRITE SAME LENGTH
*/
max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
return 0;
}
/* Block Device Characteristics VPD page */
static sense_reason_t
spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = 0x3c;
buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
return 0;
}
/* Thin Provisioning VPD */
static sense_reason_t
spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
/*
* From spc3r22 section 6.5.4 Thin Provisioning VPD page:
*
* The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
* zero, then the page length shall be set to 0004h. If the DP bit
* is set to one, then the page length shall be set to the value
* defined in table 162.
*/
buf[0] = dev->transport->get_device_type(dev);
/*
* Set Hardcoded length mentioned above for DP=0
*/
put_unaligned_be16(0x0004, &buf[2]);
/*
* The THRESHOLD EXPONENT field indicates the threshold set size in
* LBAs as a power of 2 (i.e., the threshold set size is equal to
* 2(threshold exponent)).
*
* Note that this is currently set to 0x00 as mkp says it will be
* changing again. We can enable this once it has settled in T10
* and is actually used by Linux/SCSI ML code.
*/
buf[4] = 0x00;
/*
* A TPU bit set to one indicates that the device server supports
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
if (dev->dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
* A TPWS bit set to one indicates that the device server supports
* the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
if (dev->dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40 | 0x20;
/*
* The unmap_zeroes_data set means that the underlying device supports
* REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
* satisfies the SBC requirements for LBPRZ, meaning that a subsequent
* read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
* See sbc4r36 6.6.4.
*/
if (((dev->dev_attrib.emulate_tpu != 0) ||
(dev->dev_attrib.emulate_tpws != 0)) &&
(dev->dev_attrib.unmap_zeroes_data != 0))
buf[5] |= 0x04;
return 0;
}
/* Referrals VPD page */
static sense_reason_t
spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = 0x0c;
put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
return 0;
}
static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct {
uint8_t page;
sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = {
{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
{ .page = 0x83, .emulate = spc_emulate_evpd_83 },
{ .page = 0x86, .emulate = spc_emulate_evpd_86 },
{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
};
/* supported vital product data pages */
static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
int p;
/*
* Only report the INQUIRY EVPD=1 pages after a valid NAA
* Registered Extended LUN WWN has been set via ConfigFS
* during device creation/restart.
*/
if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
buf[3] = ARRAY_SIZE(evpd_handlers);
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
buf[p + 4] = evpd_handlers[p].page;
}
return 0;
}
static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
sense_reason_t ret;
int p;
int len = 0;
buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate response buffer for INQUIRY\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
buf[0] = dev->transport->get_device_type(dev);
if (!(cdb[1] & 0x1)) {
if (cdb[2]) {
pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
cdb[2]);
ret = TCM_INVALID_CDB_FIELD;
goto out;
}
ret = spc_emulate_inquiry_std(cmd, buf);
len = buf[4] + 5;
goto out;
}
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2];
ret = evpd_handlers[p].emulate(cmd, buf);
len = get_unaligned_be16(&buf[2]) + 4;
goto out;
}
}
pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]);
ret = TCM_INVALID_CDB_FIELD;
out:
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
kfree(buf);
if (!ret)
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
return ret;
}
static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
{
p[0] = 0x01;
p[1] = 0x0a;
/* No changeable values for now */
if (pc == 1)
goto out;
out:
return 12;
}
static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
p[0] = 0x0a;
p[1] = 0x0a;
/* No changeable values for now */
if (pc == 1)
goto out;
/* GLTSD: No implicit save of log parameters */
p[2] = (1 << 1);
if (target_sense_desc_format(dev))
/* D_SENSE: Descriptor format sense data for 64bit sectors */
p[2] |= (1 << 2);
/*
* From spc4r23, 7.4.7 Control mode page
*
* The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
* restrictions on the algorithm used for reordering commands
* having the SIMPLE task attribute (see SAM-4).
*
* Table 368 -- QUEUE ALGORITHM MODIFIER field
* Code Description
* 0h Restricted reordering
* 1h Unrestricted reordering allowed
* 2h to 7h Reserved
* 8h to Fh Vendor specific
*
* A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
* the device server shall order the processing sequence of commands
* having the SIMPLE task attribute such that data integrity is maintained
* for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
* requests is halted at any time, the final value of all data observable
* on the medium shall be the same as if all the commands had been processed
* with the ORDERED task attribute).
*
* A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
* device server may reorder the processing sequence of commands having the
* SIMPLE task attribute in any manner. Any data integrity exposures related to
* command sequence order shall be explicitly handled by the application client
* through the selection of appropriate ommands and task attributes.
*/
p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
* Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
*
* 00b: The logical unit shall clear any unit attention condition
* reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
* status and shall not establish a unit attention condition when a com-
* mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
* status.
*
* 10b: The logical unit shall not clear any unit attention condition
* reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
* status and shall not establish a unit attention condition when
* a command is completed with BUSY, TASK SET FULL, or RESERVATION
* CONFLICT status.
*
* 11b a The logical unit shall not clear any unit attention condition
* reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
* status and shall establish a unit attention condition for the
* initiator port associated with the I_T nexus on which the BUSY,
* TASK SET FULL, or RESERVATION CONFLICT status is being returned.
* Depending on the status, the additional sense code shall be set to
* PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
* RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
* command, a unit attention condition shall be established only once
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
p[4] = 0x30;
break;
case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
p[4] = 0x20;
break;
default: /* TARGET_UA_INTLCK_CTRL_CLEAR */
p[4] = 0x00;
break;
}
/*
* From spc4r17, section 7.4.6 Control mode Page
*
* Task Aborted Status (TAS) bit set to zero.
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
/*
* From spc4r30, section 7.5.7 Control mode page
*
* Application Tag Owner (ATO) bit set to one.
*
* If the ATO bit is set to one the device server shall not modify the
* LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
* type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
* TAG field.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
p[5] |= 0x80;
}
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
out:
return 12;
}
static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
{
struct se_device *dev = cmd->se_dev;
p[0] = 0x08;
p[1] = 0x12;
/* No changeable values for now */
if (pc == 1)
goto out;
if (target_check_wce(dev))
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
out:
return 20;
}
static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
{
p[0] = 0x1c;
p[1] = 0x0a;
/* No changeable values for now */
if (pc == 1)
goto out;
out:
return 12;
}
static struct {
uint8_t page;
uint8_t subpage;
int (*emulate)(struct se_cmd *, u8, unsigned char *);
} modesense_handlers[] = {
{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
{ .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
{ .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
};
static void spc_modesense_write_protect(unsigned char *buf, int type)
{
/*
* I believe that the WP bit (bit 7) in the mode header is the same for
* all device types..
*/
switch (type) {
case TYPE_DISK:
case TYPE_TAPE:
default:
buf[0] |= 0x80; /* WP bit */
break;
}
}
static void spc_modesense_dpofua(unsigned char *buf, int type)
{
switch (type) {
case TYPE_DISK:
buf[0] |= 0x10; /* DPOFUA bit */
break;
default:
break;
}
}
static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
{
*buf++ = 8;
put_unaligned_be32(min(blocks, 0xffffffffull), buf);
buf += 4;
put_unaligned_be32(block_size, buf);
return 9;
}
static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
{
if (blocks <= 0xffffffff)
return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
*buf++ = 1; /* LONGLBA */
buf += 2;
*buf++ = 16;
put_unaligned_be64(blocks, buf);
buf += 12;
put_unaligned_be32(block_size, buf);
return 17;
}
static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
int type = dev->transport->get_device_type(dev);
int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
bool dbd = !!(cdb[1] & 0x08);
bool llba = ten ? !!(cdb[1] & 0x10) : false;
u8 pc = cdb[2] >> 6;
u8 page = cdb[2] & 0x3f;
u8 subpage = cdb[3];
int length = 0;
int ret;
int i;
memset(buf, 0, SE_MODE_PAGE_BUF);
/*
* Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
* MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
*/
length = ten ? 3 : 2;
/* DEVICE-SPECIFIC PARAMETER */
if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
spc_modesense_write_protect(&buf[length], type);
/*
* SBC only allows us to enable FUA and DPO together. Fortunately
* DPO is explicitly specified as a hint, so a noop is a perfectly
* valid implementation.
*/
if (target_check_fua(dev))
spc_modesense_dpofua(&buf[length], type);
++length;
/* BLOCK DESCRIPTOR */
/*
* For now we only include a block descriptor for disk (SBC)
* devices; other command sets use a slightly different format.
*/
if (!dbd && type == TYPE_DISK) {
u64 blocks = dev->transport->get_blocks(dev);
u32 block_size = dev->dev_attrib.block_size;
if (ten) {
if (llba) {
length += spc_modesense_long_blockdesc(&buf[length],
blocks, block_size);
} else {
length += 3;
length += spc_modesense_blockdesc(&buf[length],
blocks, block_size);
}
} else {
length += spc_modesense_blockdesc(&buf[length], blocks,
block_size);
}
} else {
if (ten)
length += 4;
else
length += 1;
}
if (page == 0x3f) {
if (subpage != 0x00 && subpage != 0xff) {
pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
return TCM_INVALID_CDB_FIELD;
}
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
/*
* Tricky way to say all subpage 00h for
* subpage==0, all subpages for subpage==0xff
* (and we just checked above that those are
* the only two possibilities).
*/
if ((modesense_handlers[i].subpage & ~subpage) == 0) {
ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
if (!ten && length + ret >= 255)
break;
length += ret;
}
}
goto set_length;
}
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
goto set_length;
}
/*
* We don't intend to implement:
* - obsolete page 03h "format parameters" (checked by Solaris)
*/
if (page != 0x03)
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
page, subpage);
return TCM_UNKNOWN_MODE_PAGE;
set_length:
if (ten)
put_unaligned_be16(length - 2, buf);
else
buf[0] = length - 1;
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
return 0;
}
static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
{
char *cdb = cmd->t_task_cdb;
bool ten = cdb[0] == MODE_SELECT_10;
int off = ten ? 8 : 4;
bool pf = !!(cdb[1] & 0x10);
u8 page, subpage;
unsigned char *buf;
unsigned char tbuf[SE_MODE_PAGE_BUF];
int length;
sense_reason_t ret = 0;
int i;
if (!cmd->data_length) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
if (cmd->data_length < off + 2)
return TCM_PARAMETER_LIST_LENGTH_ERROR;
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!pf) {
ret = TCM_INVALID_CDB_FIELD;
goto out;
}
page = buf[off] & 0x3f;
subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
memset(tbuf, 0, SE_MODE_PAGE_BUF);
length = modesense_handlers[i].emulate(cmd, 0, tbuf);
goto check_contents;
}
ret = TCM_UNKNOWN_MODE_PAGE;
goto out;
check_contents:
if (cmd->data_length < off + length) {
ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
goto out;
}
if (memcmp(buf + off, tbuf, length))
ret = TCM_INVALID_PARAMETER_LIST;
out:
transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
u8 ua_asc = 0, ua_ascq = 0;
unsigned char buf[SE_SENSE_BUF];
bool desc_format = target_sense_desc_format(cmd->se_dev);
memset(buf, 0, SE_SENSE_BUF);
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return TCM_INVALID_CDB_FIELD;
}
rbuf = transport_kmap_data_sg(cmd);
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
ua_asc, ua_ascq);
else
scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
__be32 len;
buf = transport_kmap_data_sg(cmd);
if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!sess)
goto done;
nacl = sess->se_node_acl;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
/*
* We determine the correct LUN LIST LENGTH even once we
* have reached the initial allocation length.
* See SPC2-R20 7.19.
*/
lun_count++;
if (offset >= cmd->data_length)
continue;
int_to_scsilun(deve->mapped_lun, &slun);
memcpy(buf + offset, &slun,
min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
/*
* See SPC3 r07, page 159.
*/
done:
/*
* If no LUNs are accessible, report virtual LUN 0.
*/
if (lun_count == 0) {
int_to_scsilun(0, &slun);
if (cmd->data_length > 8)
memcpy(buf + offset, &slun,
min(8u, cmd->data_length - offset));
lun_count = 1;
}
if (buf) {
len = cpu_to_be32(lun_count * 8);
memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
return 0;
}
EXPORT_SYMBOL(spc_emulate_report_luns);
static sense_reason_t
spc_emulate_testunitready(struct se_cmd *cmd)
{
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev)
{
if (!target_check_fua(dev))
usage_bits[1] &= ~0x18;
else
usage_bits[1] |= 0x18;
}
static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
{
if (!target_check_fua(dev))
usage_bits[10] &= ~0x18;
else
usage_bits[10] |= 0x18;
}
static struct target_opcode_descriptor tcm_opcode_read6 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_6,
.cdb_size = 6,
.usage_bits = {READ_6, 0x1f, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_read10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_10,
.cdb_size = 10,
.usage_bits = {READ_10, 0xf8, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_read12 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_12,
.cdb_size = 12,
.usage_bits = {READ_12, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_read16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_16,
.cdb_size = 16,
.usage_bits = {READ_16, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_write6 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_6,
.cdb_size = 6,
.usage_bits = {WRITE_6, 0x1f, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_write10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_10,
.cdb_size = 10,
.usage_bits = {WRITE_10, 0xf8, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_VERIFY,
.cdb_size = 10,
.usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_write12 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_12,
.cdb_size = 12,
.usage_bits = {WRITE_12, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_write16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_16,
.cdb_size = 16,
.usage_bits = {WRITE_16, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_VERIFY_16,
.cdb_size = 16,
.usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.update_usage_bits = set_dpofua_usage_bits,
};
static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
!!ops->execute_write_same;
}
static struct target_opcode_descriptor tcm_opcode_write_same32 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = VARIABLE_LENGTH_CMD,
.service_action = WRITE_SAME_32,
.cdb_size = 32,
.usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00,
0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18,
0x00, WRITE_SAME_32, 0xe8, 0x00,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff},
.enabled = tcm_is_ws_enabled,
.update_usage_bits = set_dpofua_usage_bits32,
};
static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
return dev->dev_attrib.emulate_caw;
}
static struct target_opcode_descriptor tcm_opcode_compare_write = {
.support = SCSI_SUPPORT_FULL,
.opcode = COMPARE_AND_WRITE,
.cdb_size = 16,
.usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00,
0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.enabled = tcm_is_caw_enabled,
.update_usage_bits = set_dpofua_usage_bits,
};
static struct target_opcode_descriptor tcm_opcode_read_capacity = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_CAPACITY,
.cdb_size = 10,
.usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00,
0x01, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = SERVICE_ACTION_IN_16,
.service_action = SAI_READ_CAPACITY_16,
.cdb_size = 16,
.usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
spin_lock(&dev->t10_alua.lba_map_lock);
if (list_empty(&dev->t10_alua.lba_map_list)) {
spin_unlock(&dev->t10_alua.lba_map_lock);
return false;
}
spin_unlock(&dev->t10_alua.lba_map_lock);
return true;
}
static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = SERVICE_ACTION_IN_16,
.service_action = SAI_REPORT_REFERRALS,
.cdb_size = 16,
.usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_rep_ref_enabled,
};
static struct target_opcode_descriptor tcm_opcode_sync_cache = {
.support = SCSI_SUPPORT_FULL,
.opcode = SYNCHRONIZE_CACHE,
.cdb_size = 10,
.usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = SYNCHRONIZE_CACHE_16,
.cdb_size = 16,
.usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
}
static struct target_opcode_descriptor tcm_opcode_unmap = {
.support = SCSI_SUPPORT_FULL,
.opcode = UNMAP,
.cdb_size = 10,
.usage_bits = {UNMAP, 0x00, 0x00, 0x00,
0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_unmap_enabled,
};
static struct target_opcode_descriptor tcm_opcode_write_same = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_SAME,
.cdb_size = 10,
.usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_ws_enabled,
};
static struct target_opcode_descriptor tcm_opcode_write_same16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_SAME_16,
.cdb_size = 16,
.usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.enabled = tcm_is_ws_enabled,
};
static struct target_opcode_descriptor tcm_opcode_verify = {
.support = SCSI_SUPPORT_FULL,
.opcode = VERIFY,
.cdb_size = 10,
.usage_bits = {VERIFY, 0x00, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_verify16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = VERIFY_16,
.cdb_size = 16,
.usage_bits = {VERIFY_16, 0x00, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_start_stop = {
.support = SCSI_SUPPORT_FULL,
.opcode = START_STOP,
.cdb_size = 6,
.usage_bits = {START_STOP, 0x01, 0x00, 0x00,
0x01, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_mode_select = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SELECT,
.cdb_size = 6,
.usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SELECT_10,
.cdb_size = 10,
.usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_mode_sense = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SENSE,
.cdb_size = 6,
.usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SENSE_10,
.cdb_size = 10,
.usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
.service_action = PRI_READ_KEYS,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
.service_action = PRI_READ_RESERVATION,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
if (!dev->dev_attrib.emulate_pr)
return false;
if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return true;
switch (descr->opcode) {
case RESERVE:
case RESERVE_10:
case RELEASE:
case RELEASE_10:
/*
* The pr_ops which are used by the backend modules don't
* support these commands.
*/
return false;
case PERSISTENT_RESERVE_OUT:
switch (descr->service_action) {
case PRO_REGISTER_AND_MOVE:
case PRO_REPLACE_LOST_RESERVATION:
/*
* The backend modules don't have access to ports and
* I_T nexuses so they can't handle these type of
* requests.
*/
return false;
}
break;
case PERSISTENT_RESERVE_IN:
if (descr->service_action == PRI_READ_FULL_STATUS)
return false;
break;
}
return true;
}
static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
.service_action = PRI_REPORT_CAPABILITIES,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
.service_action = PRI_READ_FULL_STATUS,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_register = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_REGISTER,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_RESERVE,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_release = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_RELEASE,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_clear = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_CLEAR,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_PREEMPT,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_PREEMPT_AND_ABORT,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
.cdb_size = 10,
.usage_bits = {
PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
.service_action = PRO_REGISTER_AND_MOVE,
.cdb_size = 10,
.usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE,
.cdb_size = 6,
.usage_bits = {RELEASE, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_release10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE_10,
.cdb_size = 10,
.usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_reserve = {
.support = SCSI_SUPPORT_FULL,
.opcode = RESERVE,
.cdb_size = 6,
.usage_bits = {RESERVE, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_reserve10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = RESERVE_10,
.cdb_size = 10,
.usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff,
0xff, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
static struct target_opcode_descriptor tcm_opcode_request_sense = {
.support = SCSI_SUPPORT_FULL,
.opcode = REQUEST_SENSE,
.cdb_size = 6,
.usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00,
0xff, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_inquiry = {
.support = SCSI_SUPPORT_FULL,
.opcode = INQUIRY,
.cdb_size = 6,
.usage_bits = {INQUIRY, 0x01, 0xff, 0xff,
0xff, SCSI_CONTROL_MASK},
};
static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
return dev->dev_attrib.emulate_3pc;
}
static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = EXTENDED_COPY,
.cdb_size = 16,
.usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_3pc_enabled,
};
static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = RECEIVE_COPY_RESULTS,
.service_action = RCR_SA_OPERATING_PARAMETERS,
.cdb_size = 16,
.usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_3pc_enabled,
};
static struct target_opcode_descriptor tcm_opcode_report_luns = {
.support = SCSI_SUPPORT_FULL,
.opcode = REPORT_LUNS,
.cdb_size = 12,
.usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
.support = SCSI_SUPPORT_FULL,
.opcode = TEST_UNIT_READY,
.cdb_size = 6,
.usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
};
static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_IN,
.service_action = MI_REPORT_TARGET_PGS,
.cdb_size = 12,
.usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
return dev->dev_attrib.emulate_rsoc;
}
static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_IN,
.service_action = MI_REPORT_SUPPORTED_OPERATION_CODES,
.cdb_size = 12,
.usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES,
0x87, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
.enabled = spc_rsoc_enabled,
};
static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct t10_alua_tg_pt_gp *l_tg_pt_gp;
struct se_lun *l_lun = cmd->se_lun;
rcu_read_lock();
l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
if (!l_tg_pt_gp) {
rcu_read_unlock();
return false;
}
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
rcu_read_unlock();
return false;
}
rcu_read_unlock();
return true;
}
static struct target_opcode_descriptor tcm_opcode_set_tpg = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_OUT,
.service_action = MO_SET_TARGET_PGS,
.cdb_size = 12,
.usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_set_tpg_enabled,
};
static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_read6,
&tcm_opcode_read10,
&tcm_opcode_read12,
&tcm_opcode_read16,
&tcm_opcode_write6,
&tcm_opcode_write10,
&tcm_opcode_write_verify10,
&tcm_opcode_write12,
&tcm_opcode_write16,
&tcm_opcode_write_verify16,
&tcm_opcode_write_same32,
&tcm_opcode_compare_write,
&tcm_opcode_read_capacity,
&tcm_opcode_read_capacity16,
&tcm_opcode_read_report_refferals,
&tcm_opcode_sync_cache,
&tcm_opcode_sync_cache16,
&tcm_opcode_unmap,
&tcm_opcode_write_same,
&tcm_opcode_write_same16,
&tcm_opcode_verify,
&tcm_opcode_verify16,
&tcm_opcode_start_stop,
&tcm_opcode_mode_select,
&tcm_opcode_mode_select10,
&tcm_opcode_mode_sense,
&tcm_opcode_mode_sense10,
&tcm_opcode_pri_read_keys,
&tcm_opcode_pri_read_resrv,
&tcm_opcode_pri_read_caps,
&tcm_opcode_pri_read_full_status,
&tcm_opcode_pro_register,
&tcm_opcode_pro_reserve,
&tcm_opcode_pro_release,
&tcm_opcode_pro_clear,
&tcm_opcode_pro_preempt,
&tcm_opcode_pro_preempt_abort,
&tcm_opcode_pro_reg_ign_exist,
&tcm_opcode_pro_register_move,
&tcm_opcode_release,
&tcm_opcode_release10,
&tcm_opcode_reserve,
&tcm_opcode_reserve10,
&tcm_opcode_request_sense,
&tcm_opcode_inquiry,
&tcm_opcode_extended_copy_lid1,
&tcm_opcode_rcv_copy_res_op_params,
&tcm_opcode_report_luns,
&tcm_opcode_test_unit_ready,
&tcm_opcode_report_target_pgs,
&tcm_opcode_report_supp_opcodes,
&tcm_opcode_set_tpg,
};
static int
spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
struct target_opcode_descriptor *descr)
{
if (!ctdp)
return 0;
put_unaligned_be16(0xa, buf);
buf[3] = descr->specific_timeout;
put_unaligned_be32(descr->nominal_timeout, &buf[4]);
put_unaligned_be32(descr->recommended_timeout, &buf[8]);
return 12;
}
static int
spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
struct target_opcode_descriptor *descr)
{
int td_size = 0;
buf[0] = descr->opcode;
put_unaligned_be16(descr->service_action, &buf[2]);
buf[5] = (ctdp << 1) | descr->serv_action_valid;
put_unaligned_be16(descr->cdb_size, &buf[6]);
td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp,
descr);
return 8 + td_size;
}
static int
spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
struct target_opcode_descriptor *descr,
struct se_device *dev)
{
int td_size = 0;
if (!descr) {
buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED;
return 2;
}
buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL;
put_unaligned_be16(descr->cdb_size, &buf[2]);
memcpy(&buf[4], descr->usage_bits, descr->cdb_size);
if (descr->update_usage_bits)
descr->update_usage_bits(&buf[4], dev);
td_size = spc_rsoc_encode_command_timeouts_descriptor(
&buf[4 + descr->cdb_size], ctdp, descr);
return 4 + descr->cdb_size + td_size;
}
static sense_reason_t
spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
{
struct target_opcode_descriptor *descr;
struct se_session *sess = cmd->se_sess;
unsigned char *cdb = cmd->t_task_cdb;
u8 opts = cdb[2] & 0x3;
u8 requested_opcode;
u16 requested_sa;
int i;
requested_opcode = cdb[3];
requested_sa = ((u16)cdb[4]) << 8 | cdb[5];
*opcode = NULL;
if (opts > 3) {
pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES"
" with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n",
cmd->se_tfo->fabric_name, opts,
cmd->se_lun->unpacked_lun,
sess->se_node_acl->initiatorname);
return TCM_INVALID_CDB_FIELD;
}
for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
descr = tcm_supported_opcodes[i];
if (descr->opcode != requested_opcode)
continue;
switch (opts) {
case 0x1:
/*
* If the REQUESTED OPERATION CODE field specifies an
* operation code for which the device server implements
* service actions, then the device server shall
* terminate the command with CHECK CONDITION status,
* with the sense key set to ILLEGAL REQUEST, and the
* additional sense code set to INVALID FIELD IN CDB
*/
if (descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
if (!descr->enabled || descr->enabled(descr, cmd))
*opcode = descr;
break;
case 0x2:
/*
* If the REQUESTED OPERATION CODE field specifies an
* operation code for which the device server does not
* implement service actions, then the device server
* shall terminate the command with CHECK CONDITION
* status, with the sense key set to ILLEGAL REQUEST,
* and the additional sense code set to INVALID FIELD IN CDB.
*/
if (descr->serv_action_valid &&
descr->service_action == requested_sa) {
if (!descr->enabled || descr->enabled(descr,
cmd))
*opcode = descr;
} else if (!descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
break;
case 0x3:
/*
* The command support data for the operation code and
* service action a specified in the REQUESTED OPERATION
* CODE field and REQUESTED SERVICE ACTION field shall
* be returned in the one_command parameter data format.
*/
if (descr->service_action == requested_sa)
if (!descr->enabled || descr->enabled(descr,
cmd))
*opcode = descr;
break;
}
}
return 0;
}
static sense_reason_t
spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
{
int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
struct target_opcode_descriptor *descr = NULL;
unsigned char *cdb = cmd->t_task_cdb;
u8 rctd = (cdb[2] >> 7) & 0x1;
unsigned char *buf = NULL;
int response_length = 0;
u8 opts = cdb[2] & 0x3;
unsigned char *rbuf;
sense_reason_t ret = 0;
int i;
if (!cmd->se_dev->dev_attrib.emulate_rsoc)
return TCM_UNSUPPORTED_SCSI_OPCODE;
rbuf = transport_kmap_data_sg(cmd);
if (cmd->data_length && !rbuf) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
if (opts == 0)
response_length = 4 + (8 + rctd * 12) * descr_num;
else {
ret = spc_rsoc_get_descr(cmd, &descr);
if (ret)
goto out;
if (descr)
response_length = 4 + descr->cdb_size + rctd * 12;
else
response_length = 2;
}
buf = kzalloc(response_length, GFP_KERNEL);
if (!buf) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
response_length = 0;
if (opts == 0) {
response_length += 4;
for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
descr = tcm_supported_opcodes[i];
if (descr->enabled && !descr->enabled(descr, cmd))
continue;
response_length += spc_rsoc_encode_command_descriptor(
&buf[response_length], rctd, descr);
}
put_unaligned_be32(response_length - 3, buf);
} else {
response_length = spc_rsoc_encode_one_command_descriptor(
&buf[response_length], rctd, descr,
cmd->se_dev);
}
memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length));
out:
kfree(buf);
transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length);
return ret;
}
sense_reason_t
spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
case RESERVE:
case RESERVE_10:
case RELEASE:
case RELEASE_10:
if (!dev->dev_attrib.emulate_pr)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return TCM_UNSUPPORTED_SCSI_OPCODE;
break;
case PERSISTENT_RESERVE_IN:
case PERSISTENT_RESERVE_OUT:
if (!dev->dev_attrib.emulate_pr)
return TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
switch (cdb[0]) {
case MODE_SELECT:
*size = cdb[4];
cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SELECT_10:
*size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SENSE:
*size = cdb[4];
cmd->execute_cmd = spc_emulate_modesense;
break;
case MODE_SENSE_10:
*size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = spc_emulate_modesense;
break;
case LOG_SELECT:
case LOG_SENSE:
*size = get_unaligned_be16(&cdb[7]);
break;
case PERSISTENT_RESERVE_IN:
*size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = target_scsi3_emulate_pr_in;
break;
case PERSISTENT_RESERVE_OUT:
*size = get_unaligned_be32(&cdb[5]);
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
case RELEASE:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
*size = get_unaligned_be16(&cdb[7]);
else
*size = cmd->data_length;
cmd->execute_cmd = target_scsi2_reservation_release;
break;
case RESERVE:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
* Assume the passthrough or $FABRIC_MOD will tell us about it.
*/
if (cdb[0] == RESERVE_10)
*size = get_unaligned_be16(&cdb[7]);
else
*size = cmd->data_length;
cmd->execute_cmd = target_scsi2_reservation_reserve;
break;
case REQUEST_SENSE:
*size = cdb[4];
cmd->execute_cmd = spc_emulate_request_sense;
break;
case INQUIRY:
*size = get_unaligned_be16(&cdb[3]);
/*
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
cmd->sam_task_attr = TCM_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
*size = get_unaligned_be32(&cdb[6]);
break;
case EXTENDED_COPY:
*size = get_unaligned_be32(&cdb[10]);
cmd->execute_cmd = target_do_xcopy;
break;
case RECEIVE_COPY_RESULTS:
*size = get_unaligned_be32(&cdb[10]);
cmd->execute_cmd = target_do_receive_copy_results;
break;
case READ_ATTRIBUTE:
case WRITE_ATTRIBUTE:
*size = get_unaligned_be32(&cdb[10]);
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
*size = get_unaligned_be16(&cdb[3]);
break;
case WRITE_BUFFER:
*size = get_unaligned_be24(&cdb[6]);
break;
case REPORT_LUNS:
cmd->execute_cmd = spc_emulate_report_luns;
*size = get_unaligned_be32(&cdb[6]);
/*
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
cmd->sam_task_attr = TCM_HEAD_TAG;
break;
case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready;
*size = 0;
break;
case MAINTENANCE_IN:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/*
* MAINTENANCE_IN from SCC-2
* Check for emulated MI_REPORT_TARGET_PGS
*/
if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
if ((cdb[1] & 0x1f) ==
MI_REPORT_SUPPORTED_OPERATION_CODES)
cmd->execute_cmd =
spc_emulate_report_supp_op_codes;
*size = get_unaligned_be32(&cdb[6]);
} else {
/*
* GPCMD_SEND_KEY from multi media commands
*/
*size = get_unaligned_be16(&cdb[8]);
}
break;
case MAINTENANCE_OUT:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/*
* MAINTENANCE_OUT from SCC-2
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS) {
cmd->execute_cmd =
target_emulate_set_target_port_groups;
}
*size = get_unaligned_be32(&cdb[6]);
} else {
/*
* GPCMD_SEND_KEY from multi media commands
*/
*size = get_unaligned_be16(&cdb[8]);
}
break;
default:
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
return 0;
}
EXPORT_SYMBOL(spc_parse_cdb);
|
linux-master
|
drivers/target/target_core_spc.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Target specific utility functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/list.h>
#include <linux/sched/signal.h>
#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
extern struct list_head g_tiqn_list;
extern spinlock_t tiqn_lock;
int iscsit_add_r2t_to_list(
struct iscsit_cmd *cmd,
u32 offset,
u32 xfer_len,
int recovery,
u32 r2t_sn)
{
struct iscsi_r2t *r2t;
lockdep_assert_held(&cmd->r2t_lock);
WARN_ON_ONCE((s32)xfer_len < 0);
r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
if (!r2t) {
pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
return -1;
}
INIT_LIST_HEAD(&r2t->r2t_list);
r2t->recovery_r2t = recovery;
r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
r2t->offset = offset;
r2t->xfer_len = xfer_len;
list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
spin_unlock_bh(&cmd->r2t_lock);
iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
spin_lock_bh(&cmd->r2t_lock);
return 0;
}
struct iscsi_r2t *iscsit_get_r2t_for_eos(
struct iscsit_cmd *cmd,
u32 offset,
u32 length)
{
struct iscsi_r2t *r2t;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if ((r2t->offset <= offset) &&
(r2t->offset + r2t->xfer_len) >= (offset + length)) {
spin_unlock_bh(&cmd->r2t_lock);
return r2t;
}
}
spin_unlock_bh(&cmd->r2t_lock);
pr_err("Unable to locate R2T for Offset: %u, Length:"
" %u\n", offset, length);
return NULL;
}
struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *cmd)
{
struct iscsi_r2t *r2t;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (!r2t->sent_r2t) {
spin_unlock_bh(&cmd->r2t_lock);
return r2t;
}
}
spin_unlock_bh(&cmd->r2t_lock);
pr_err("Unable to locate next R2T to send for ITT:"
" 0x%08x.\n", cmd->init_task_tag);
return NULL;
}
void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsit_cmd *cmd)
{
lockdep_assert_held(&cmd->r2t_lock);
list_del(&r2t->r2t_list);
kmem_cache_free(lio_r2t_cache, r2t);
}
void iscsit_free_r2ts_from_list(struct iscsit_cmd *cmd)
{
struct iscsi_r2t *r2t, *r2t_tmp;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
iscsit_free_r2t(r2t, cmd);
spin_unlock_bh(&cmd->r2t_lock);
}
static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
{
int tag = -1;
DEFINE_SBQ_WAIT(wait);
struct sbq_wait_state *ws;
struct sbitmap_queue *sbq;
if (state == TASK_RUNNING)
return tag;
sbq = &se_sess->sess_tag_pool;
ws = &sbq->ws[0];
for (;;) {
sbitmap_prepare_to_wait(sbq, ws, &wait, state);
if (signal_pending_state(state, current))
break;
tag = sbitmap_queue_get(sbq, cpup);
if (tag >= 0)
break;
schedule();
}
sbitmap_finish_wait(sbq, ws, &wait);
return tag;
}
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
*/
struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *conn, int state)
{
struct iscsit_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
int size, tag, cpu;
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
tag = iscsit_wait_for_tag(se_sess, state, &cpu);
if (tag < 0)
return NULL;
size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
cmd = (struct iscsit_cmd *)(se_sess->sess_cmd_map + (tag * size));
memset(cmd, 0, size);
cmd->se_cmd.map_tag = tag;
cmd->se_cmd.map_cpu = cpu;
cmd->conn = conn;
cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
spin_lock_init(&cmd->datain_lock);
spin_lock_init(&cmd->dataout_timeout_lock);
spin_lock_init(&cmd->istate_lock);
spin_lock_init(&cmd->error_lock);
spin_lock_init(&cmd->r2t_lock);
timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
return cmd;
}
EXPORT_SYMBOL(iscsit_allocate_cmd);
struct iscsi_seq *iscsit_get_seq_holder_for_datain(
struct iscsit_cmd *cmd,
u32 seq_send_order)
{
u32 i;
for (i = 0; i < cmd->seq_count; i++)
if (cmd->seq_list[i].seq_send_order == seq_send_order)
return &cmd->seq_list[i];
return NULL;
}
struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *cmd)
{
u32 i;
if (!cmd->seq_list) {
pr_err("struct iscsit_cmd->seq_list is NULL!\n");
return NULL;
}
for (i = 0; i < cmd->seq_count; i++) {
if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
continue;
if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
cmd->seq_send_order++;
return &cmd->seq_list[i];
}
}
return NULL;
}
struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
struct iscsit_cmd *cmd,
u32 r2t_sn)
{
struct iscsi_r2t *r2t;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (r2t->r2t_sn == r2t_sn) {
spin_unlock_bh(&cmd->r2t_lock);
return r2t;
}
}
spin_unlock_bh(&cmd->r2t_lock);
return NULL;
}
static inline int iscsit_check_received_cmdsn(struct iscsit_session *sess, u32 cmdsn)
{
u32 max_cmdsn;
int ret;
/*
* This is the proper method of checking received CmdSN against
* ExpCmdSN and MaxCmdSN values, as well as accounting for out
* or order CmdSNs due to multiple connection sessions and/or
* CRC failures.
*/
max_cmdsn = atomic_read(&sess->max_cmd_sn);
if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
" MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
ret = CMDSN_MAXCMDSN_OVERRUN;
} else if (cmdsn == sess->exp_cmd_sn) {
sess->exp_cmd_sn++;
pr_debug("Received CmdSN matches ExpCmdSN,"
" incremented ExpCmdSN to: 0x%08x\n",
sess->exp_cmd_sn);
ret = CMDSN_NORMAL_OPERATION;
} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
pr_debug("Received CmdSN: 0x%08x is greater"
" than ExpCmdSN: 0x%08x, not acknowledging.\n",
cmdsn, sess->exp_cmd_sn);
ret = CMDSN_HIGHER_THAN_EXP;
} else {
pr_err("Received CmdSN: 0x%08x is less than"
" ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
sess->exp_cmd_sn);
ret = CMDSN_LOWER_THAN_EXP;
}
return ret;
}
/*
* Commands may be received out of order if MC/S is in use.
* Ensure they are executed in CmdSN order.
*/
int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf, __be32 cmdsn)
{
int ret, cmdsn_ret;
bool reject = false;
u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
mutex_lock(&conn->sess->cmdsn_mutex);
cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
switch (cmdsn_ret) {
case CMDSN_NORMAL_OPERATION:
ret = iscsit_execute_cmd(cmd, 0);
if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
iscsit_execute_ooo_cmdsns(conn->sess);
else if (ret < 0) {
reject = true;
ret = CMDSN_ERROR_CANNOT_RECOVER;
}
break;
case CMDSN_HIGHER_THAN_EXP:
ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
if (ret < 0) {
reject = true;
ret = CMDSN_ERROR_CANNOT_RECOVER;
break;
}
ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
case CMDSN_MAXCMDSN_OVERRUN:
default:
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
/*
* Existing callers for iscsit_sequence_cmd() will silently
* ignore commands with CMDSN_LOWER_THAN_EXP, so force this
* return for CMDSN_MAXCMDSN_OVERRUN as well..
*/
ret = CMDSN_LOWER_THAN_EXP;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
if (reject)
iscsit_reject_cmd(cmd, reason, buf);
return ret;
}
EXPORT_SYMBOL(iscsit_sequence_cmd);
int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf)
{
struct iscsit_conn *conn = cmd->conn;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if (conn->sess->sess_ops->InitialR2T) {
pr_err("Received unexpected unsolicited data"
" while InitialR2T=Yes, protocol error.\n");
transport_send_check_condition_and_sense(se_cmd,
TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
return -1;
}
if ((cmd->first_burst_len + payload_length) >
conn->sess->sess_ops->FirstBurstLength) {
pr_err("Total %u bytes exceeds FirstBurstLength: %u"
" for this Unsolicited DataOut Burst.\n",
(cmd->first_burst_len + payload_length),
conn->sess->sess_ops->FirstBurstLength);
transport_send_check_condition_and_sense(se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return -1;
}
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
return 0;
if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
((cmd->first_burst_len + payload_length) !=
conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data received %u"
" does not equal FirstBurstLength: %u, and does"
" not equal ExpXferLen %u.\n",
(cmd->first_burst_len + payload_length),
conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
transport_send_check_condition_and_sense(se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return -1;
}
return 0;
}
struct iscsit_cmd *iscsit_find_cmd_from_itt(
struct iscsit_conn *conn,
itt_t init_task_tag)
{
struct iscsit_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
}
}
spin_unlock_bh(&conn->cmd_lock);
pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
init_task_tag, conn->cid);
return NULL;
}
EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(
struct iscsit_conn *conn,
itt_t init_task_tag,
u32 length)
{
struct iscsit_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
continue;
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
}
}
spin_unlock_bh(&conn->cmd_lock);
pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
" dumping payload\n", init_task_tag, conn->cid);
if (length)
iscsit_dump_data_payload(conn, length, 1);
return NULL;
}
EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
struct iscsit_cmd *iscsit_find_cmd_from_ttt(
struct iscsit_conn *conn,
u32 targ_xfer_tag)
{
struct iscsit_cmd *cmd = NULL;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->targ_xfer_tag == targ_xfer_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
}
}
spin_unlock_bh(&conn->cmd_lock);
pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
targ_xfer_tag, conn->cid);
return NULL;
}
int iscsit_find_cmd_for_recovery(
struct iscsit_session *sess,
struct iscsit_cmd **cmd_ptr,
struct iscsi_conn_recovery **cr_ptr,
itt_t init_task_tag)
{
struct iscsit_cmd *cmd = NULL;
struct iscsi_conn_recovery *cr;
/*
* Scan through the inactive connection recovery list's command list.
* If init_task_tag matches the command is still alligent.
*/
spin_lock(&sess->cr_i_lock);
list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_i_lock);
*cr_ptr = cr;
*cmd_ptr = cmd;
return -2;
}
}
spin_unlock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&sess->cr_i_lock);
/*
* Scan through the active connection recovery list's command list.
* If init_task_tag matches the command is ready to be reassigned.
*/
spin_lock(&sess->cr_a_lock);
list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_a_lock);
*cr_ptr = cr;
*cmd_ptr = cmd;
return 0;
}
}
spin_unlock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&sess->cr_a_lock);
return -1;
}
void iscsit_add_cmd_to_immediate_queue(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn,
u8 state)
{
struct iscsi_queue_req *qr;
qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
return;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
qr->state = state;
spin_lock_bh(&conn->immed_queue_lock);
list_add_tail(&qr->qr_list, &conn->immed_queue_list);
atomic_inc(&cmd->immed_queue_count);
atomic_set(&conn->check_immediate_queue, 1);
spin_unlock_bh(&conn->immed_queue_lock);
wake_up(&conn->queues_wq);
}
EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr;
spin_lock_bh(&conn->immed_queue_lock);
if (list_empty(&conn->immed_queue_list)) {
spin_unlock_bh(&conn->immed_queue_lock);
return NULL;
}
qr = list_first_entry(&conn->immed_queue_list,
struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->immed_queue_count);
spin_unlock_bh(&conn->immed_queue_lock);
return qr;
}
static void iscsit_remove_cmd_from_immediate_queue(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
spin_lock_bh(&conn->immed_queue_lock);
if (!atomic_read(&cmd->immed_queue_count)) {
spin_unlock_bh(&conn->immed_queue_lock);
return;
}
list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
if (qr->cmd != cmd)
continue;
atomic_dec(&qr->cmd->immed_queue_count);
list_del(&qr->qr_list);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->immed_queue_lock);
if (atomic_read(&cmd->immed_queue_count)) {
pr_err("ITT: 0x%08x immed_queue_count: %d\n",
cmd->init_task_tag,
atomic_read(&cmd->immed_queue_count));
}
}
int iscsit_add_cmd_to_response_queue(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn,
u8 state)
{
struct iscsi_queue_req *qr;
qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
qr->state = state;
spin_lock_bh(&conn->response_queue_lock);
list_add_tail(&qr->qr_list, &conn->response_queue_list);
atomic_inc(&cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
wake_up(&conn->queues_wq);
return 0;
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr;
spin_lock_bh(&conn->response_queue_lock);
if (list_empty(&conn->response_queue_list)) {
spin_unlock_bh(&conn->response_queue_lock);
return NULL;
}
qr = list_first_entry(&conn->response_queue_list,
struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
return qr;
}
static void iscsit_remove_cmd_from_response_queue(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
spin_lock_bh(&conn->response_queue_lock);
if (!atomic_read(&cmd->response_queue_count)) {
spin_unlock_bh(&conn->response_queue_lock);
return;
}
list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
qr_list) {
if (qr->cmd != cmd)
continue;
atomic_dec(&qr->cmd->response_queue_count);
list_del(&qr->qr_list);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->response_queue_lock);
if (atomic_read(&cmd->response_queue_count)) {
pr_err("ITT: 0x%08x response_queue_count: %d\n",
cmd->init_task_tag,
atomic_read(&cmd->response_queue_count));
}
}
bool iscsit_conn_all_queues_empty(struct iscsit_conn *conn)
{
bool empty;
spin_lock_bh(&conn->immed_queue_lock);
empty = list_empty(&conn->immed_queue_list);
spin_unlock_bh(&conn->immed_queue_lock);
if (!empty)
return empty;
spin_lock_bh(&conn->response_queue_lock);
empty = list_empty(&conn->response_queue_list);
spin_unlock_bh(&conn->response_queue_lock);
return empty;
}
void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
spin_lock_bh(&conn->immed_queue_lock);
list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->immed_queue_count);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->immed_queue_lock);
spin_lock_bh(&conn->response_queue_lock);
list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
qr_list) {
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->response_queue_count);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->response_queue_lock);
}
void iscsit_release_cmd(struct iscsit_cmd *cmd)
{
struct iscsit_session *sess;
struct se_cmd *se_cmd = &cmd->se_cmd;
WARN_ON(!list_empty(&cmd->i_conn_node));
if (cmd->conn)
sess = cmd->conn->sess;
else
sess = cmd->sess;
BUG_ON(!sess || !sess->se_sess);
kfree(cmd->buf_ptr);
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
kfree(cmd->overflow_buf);
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
target_free_tag(sess->se_sess, se_cmd);
}
EXPORT_SYMBOL(iscsit_release_cmd);
void __iscsit_free_cmd(struct iscsit_cmd *cmd, bool check_queues)
{
struct iscsit_conn *conn = cmd->conn;
WARN_ON(!list_empty(&cmd->i_conn_node));
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
iscsit_free_r2ts_from_list(cmd);
}
if (cmd->data_direction == DMA_FROM_DEVICE)
iscsit_free_all_datain_reqs(cmd);
if (conn && check_queues) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
if (conn && conn->conn_transport->iscsit_unmap_cmd)
conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
}
void iscsit_free_cmd(struct iscsit_cmd *cmd, bool shutdown)
{
struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
int rc;
WARN_ON(!list_empty(&cmd->i_conn_node));
__iscsit_free_cmd(cmd, shutdown);
if (se_cmd) {
rc = transport_generic_free_cmd(se_cmd, shutdown);
if (!rc && shutdown && se_cmd->se_sess) {
__iscsit_free_cmd(cmd, shutdown);
target_put_sess_cmd(se_cmd);
}
} else {
iscsit_release_cmd(cmd);
}
}
EXPORT_SYMBOL(iscsit_free_cmd);
bool iscsit_check_session_usage_count(struct iscsit_session *sess,
bool can_sleep)
{
spin_lock_bh(&sess->session_usage_lock);
if (sess->session_usage_count != 0) {
sess->session_waiting_on_uc = 1;
spin_unlock_bh(&sess->session_usage_lock);
if (!can_sleep)
return true;
wait_for_completion(&sess->session_waiting_on_uc_comp);
return false;
}
spin_unlock_bh(&sess->session_usage_lock);
return false;
}
void iscsit_dec_session_usage_count(struct iscsit_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
sess->session_usage_count--;
if (!sess->session_usage_count && sess->session_waiting_on_uc)
complete(&sess->session_waiting_on_uc_comp);
spin_unlock_bh(&sess->session_usage_lock);
}
void iscsit_inc_session_usage_count(struct iscsit_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
sess->session_usage_count++;
spin_unlock_bh(&sess->session_usage_lock);
}
struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *sess, u16 cid)
{
struct iscsit_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
if ((conn->cid == cid) &&
(conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
iscsit_inc_conn_usage_count(conn);
spin_unlock_bh(&sess->conn_lock);
return conn;
}
}
spin_unlock_bh(&sess->conn_lock);
return NULL;
}
struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *sess, u16 cid)
{
struct iscsit_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
if (conn->cid == cid) {
iscsit_inc_conn_usage_count(conn);
spin_lock(&conn->state_lock);
atomic_set(&conn->connection_wait_rcfr, 1);
spin_unlock(&conn->state_lock);
spin_unlock_bh(&sess->conn_lock);
return conn;
}
}
spin_unlock_bh(&sess->conn_lock);
return NULL;
}
void iscsit_check_conn_usage_count(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
if (conn->conn_usage_count != 0) {
conn->conn_waiting_on_uc = 1;
spin_unlock_bh(&conn->conn_usage_lock);
wait_for_completion(&conn->conn_waiting_on_uc_comp);
return;
}
spin_unlock_bh(&conn->conn_usage_lock);
}
void iscsit_dec_conn_usage_count(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
conn->conn_usage_count--;
if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
complete(&conn->conn_waiting_on_uc_comp);
spin_unlock_bh(&conn->conn_usage_lock);
}
void iscsit_inc_conn_usage_count(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
conn->conn_usage_count++;
spin_unlock_bh(&conn->conn_usage_lock);
}
static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response)
{
u8 state;
struct iscsit_cmd *cmd;
cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
if (!cmd)
return -1;
cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
ISTATE_SEND_NOPIN_NO_RESPONSE;
cmd->init_task_tag = RESERVED_ITT;
cmd->targ_xfer_tag = (want_response) ?
session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (want_response)
iscsit_start_nopin_response_timer(conn);
iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
return 0;
}
void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
struct iscsit_conn *conn = from_timer(conn, t, nopin_response_timer);
struct iscsit_session *sess = conn->sess;
iscsit_inc_conn_usage_count(conn);
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_dec_conn_usage_count(conn);
return;
}
pr_err("Did not receive response to NOPIN on CID: %hu, failing"
" connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
conn->cid, sess->sess_ops->InitiatorName, sess->isid,
sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
void iscsit_mod_nopin_response_timer(struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
mod_timer(&conn->nopin_response_timer,
(get_jiffies_64() + na->nopin_response_timeout * HZ));
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_start_nopin_response_timer(struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
mod_timer(&conn->nopin_response_timer,
jiffies + na->nopin_response_timeout * HZ);
pr_debug("Started NOPIN Response Timer on CID: %d to %u"
" seconds\n", conn->cid, na->nopin_response_timeout);
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&conn->nopin_timer_lock);
del_timer_sync(&conn->nopin_response_timer);
spin_lock_bh(&conn->nopin_timer_lock);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_handle_nopin_timeout(struct timer_list *t)
{
struct iscsit_conn *conn = from_timer(conn, t, nopin_timer);
iscsit_inc_conn_usage_count(conn);
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_dec_conn_usage_count(conn);
return;
}
conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_add_nopin(conn, 1);
iscsit_dec_conn_usage_count(conn);
}
void __iscsit_start_nopin_timer(struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
lockdep_assert_held(&conn->nopin_timer_lock);
/*
* NOPIN timeout is disabled.
*/
if (!na->nopin_timeout)
return;
if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
return;
conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
pr_debug("Started NOPIN Timer on CID: %d at %u second"
" interval\n", conn->cid, na->nopin_timeout);
}
void iscsit_start_nopin_timer(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
__iscsit_start_nopin_timer(conn);
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
conn->nopin_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&conn->nopin_timer_lock);
del_timer_sync(&conn->nopin_timer);
spin_lock_bh(&conn->nopin_timer_lock);
conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_login_timeout(struct timer_list *t)
{
struct iscsit_conn *conn = from_timer(conn, t, login_timer);
struct iscsi_login *login = conn->login;
pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
spin_lock_bh(&conn->login_timer_lock);
login->login_failed = 1;
if (conn->login_kworker) {
pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
conn->login_kworker->comm, conn->login_kworker->pid);
send_sig(SIGINT, conn->login_kworker, 1);
} else {
schedule_delayed_work(&conn->login_work, 0);
}
spin_unlock_bh(&conn->login_timer_lock);
}
void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr)
{
pr_debug("Login timer started\n");
conn->login_kworker = kthr;
mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
}
int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr)
{
struct iscsi_login *login = conn->login;
int ret = 0;
spin_lock_bh(&conn->login_timer_lock);
if (login->login_failed) {
/* The timer has already expired */
ret = -1;
} else {
conn->login_kworker = kthr;
}
spin_unlock_bh(&conn->login_timer_lock);
return ret;
}
void iscsit_stop_login_timer(struct iscsit_conn *conn)
{
pr_debug("Login timer stopped\n");
timer_delete_sync(&conn->login_timer);
}
int iscsit_send_tx_data(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn,
int use_misc)
{
int tx_sent, tx_size;
u32 iov_count;
struct kvec *iov;
send_data:
tx_size = cmd->tx_size;
if (!use_misc) {
iov = &cmd->iov_data[0];
iov_count = cmd->iov_data_count;
} else {
iov = &cmd->iov_misc[0];
iov_count = cmd->iov_misc_count;
}
tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
if (tx_size != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_data;
} else
return -1;
}
cmd->tx_size = 0;
return 0;
}
int iscsit_fe_sendpage_sg(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct scatterlist *sg = cmd->first_data_sg;
struct bio_vec bvec;
struct msghdr msghdr = { .msg_flags = MSG_SPLICE_PAGES, };
struct kvec iov;
u32 tx_hdr_size, data_len;
u32 offset = cmd->first_data_sg_off;
int tx_sent, iov_off;
send_hdr:
tx_hdr_size = ISCSI_HDR_LEN;
if (conn->conn_ops->HeaderDigest)
tx_hdr_size += ISCSI_CRC_LEN;
iov.iov_base = cmd->pdu;
iov.iov_len = tx_hdr_size;
tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
if (tx_hdr_size != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_hdr;
}
return -1;
}
data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
/*
* Set iov_off used by padding and data digest tx_data() calls below
* in order to determine proper offset into cmd->iov_data[]
*/
if (conn->conn_ops->DataDigest) {
data_len -= ISCSI_CRC_LEN;
if (cmd->padding)
iov_off = (cmd->iov_data_count - 2);
else
iov_off = (cmd->iov_data_count - 1);
} else {
iov_off = (cmd->iov_data_count - 1);
}
/*
* Perform sendpage() for each page in the scatterlist
*/
while (data_len) {
u32 space = (sg->length - offset);
u32 sub_len = min_t(u32, data_len, space);
send_pg:
bvec_set_page(&bvec, sg_page(sg), sub_len, sg->offset + offset);
iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, sub_len);
tx_sent = conn->sock->ops->sendmsg(conn->sock, &msghdr,
sub_len);
if (tx_sent != sub_len) {
if (tx_sent == -EAGAIN) {
pr_err("sendmsg/splice returned -EAGAIN\n");
goto send_pg;
}
pr_err("sendmsg/splice failure: %d\n", tx_sent);
return -1;
}
data_len -= sub_len;
offset = 0;
sg = sg_next(sg);
}
send_padding:
if (cmd->padding) {
struct kvec *iov_p = &cmd->iov_data[iov_off++];
tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
if (cmd->padding != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_padding;
}
return -1;
}
}
send_datacrc:
if (conn->conn_ops->DataDigest) {
struct kvec *iov_d = &cmd->iov_data[iov_off];
tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
if (ISCSI_CRC_LEN != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_datacrc;
}
return -1;
}
}
return 0;
}
/*
* This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
* back to the Initiator when an expection condition occurs with the
* errors set in status_class and status_detail.
*
* Parameters: iSCSI Connection, Status Class, Status Detail.
* Returns: 0 on success, -1 on error.
*/
int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_detail)
{
struct iscsi_login_rsp *hdr;
struct iscsi_login *login = conn->conn_login;
login->login_failed = 1;
iscsit_collect_login_stats(conn, status_class, status_detail);
memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
hdr = (struct iscsi_login_rsp *)&login->rsp[0];
hdr->opcode = ISCSI_OP_LOGIN_RSP;
hdr->status_class = status_class;
hdr->status_detail = status_detail;
hdr->itt = conn->login_itt;
return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
}
void iscsit_print_session_params(struct iscsit_session *sess)
{
struct iscsit_conn *conn;
pr_debug("-----------------------------[Session Params for"
" SID: %u]-----------------------------\n", sess->sid);
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
iscsi_dump_conn_ops(conn->conn_ops);
spin_unlock_bh(&sess->conn_lock);
iscsi_dump_sess_ops(sess->sess_ops);
}
int rx_data(
struct iscsit_conn *conn,
struct kvec *iov,
int iov_count,
int data)
{
int rx_loop = 0, total_rx = 0;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
iov_iter_kvec(&msg.msg_iter, ITER_DEST, iov, iov_count, data);
while (msg_data_left(&msg)) {
rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
if (rx_loop <= 0) {
pr_debug("rx_loop: %d total_rx: %d\n",
rx_loop, total_rx);
return rx_loop;
}
total_rx += rx_loop;
pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
rx_loop, total_rx, data);
}
return total_rx;
}
int tx_data(
struct iscsit_conn *conn,
struct kvec *iov,
int iov_count,
int data)
{
struct msghdr msg;
int total_tx = 0;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
if (data <= 0) {
pr_err("Data length is: %d\n", data);
return -1;
}
memset(&msg, 0, sizeof(struct msghdr));
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, iov_count, data);
while (msg_data_left(&msg)) {
int tx_loop = sock_sendmsg(conn->sock, &msg);
if (tx_loop <= 0) {
pr_debug("tx_loop: %d total_tx %d\n",
tx_loop, total_tx);
return tx_loop;
}
total_tx += tx_loop;
pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
tx_loop, total_tx, data);
}
return total_tx;
}
void iscsit_collect_login_stats(
struct iscsit_conn *conn,
u8 status_class,
u8 status_detail)
{
struct iscsi_param *intrname = NULL;
struct iscsi_tiqn *tiqn;
struct iscsi_login_stats *ls;
tiqn = iscsit_snmp_get_tiqn(conn);
if (!tiqn)
return;
ls = &tiqn->login_stats;
spin_lock(&ls->lock);
if (status_class == ISCSI_STATUS_CLS_SUCCESS)
ls->accepts++;
else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
ls->redirects++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
(status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
ls->authenticate_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
(status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
ls->authorize_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
(status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
ls->negotiate_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
} else {
ls->other_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
}
/* Save initiator name, ip address and time, if it is a failed login */
if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
if (conn->param_list)
intrname = iscsi_find_param_from_key(INITIATORNAME,
conn->param_list);
strscpy(ls->last_intr_fail_name,
(intrname ? intrname->value : "Unknown"),
sizeof(ls->last_intr_fail_name));
ls->last_intr_fail_ip_family = conn->login_family;
ls->last_intr_fail_sockaddr = conn->login_sockaddr;
ls->last_fail_time = get_jiffies_64();
}
spin_unlock(&ls->lock);
}
struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *conn)
{
struct iscsi_portal_group *tpg;
if (!conn)
return NULL;
tpg = conn->tpg;
if (!tpg)
return NULL;
if (!tpg->tpg_tiqn)
return NULL;
return tpg->tpg_tiqn;
}
void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess)
{
struct iscsi_portal_group *tpg = sess->tpg;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
if (!tiqn)
return;
spin_lock_bh(&tiqn->sess_err_stats.lock);
strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
sess->sess_ops->InitiatorName,
sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
atomic_long_inc(&sess->conn_timeout_errors);
spin_unlock_bh(&tiqn->sess_err_stats.lock);
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_util.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
* This file contains error recovery level zero functions used by
* the iSCSI Target driver.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/sched/signal.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
/*
* Used to set values in struct iscsit_cmd that iscsit_dataout_check_sequence()
* checks against to determine a PDU's Offset+Length is within the current
* DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
*/
void iscsit_set_dataout_sequence_values(
struct iscsit_cmd *cmd)
{
struct iscsit_conn *conn = cmd->conn;
/*
* Still set seq_start_offset and seq_end_offset for Unsolicited
* DataOUT, even if DataSequenceInOrder=No.
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
return;
}
if (!conn->sess->sess_ops->DataSequenceInOrder)
return;
if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = (cmd->se_cmd.data_length >
conn->sess->sess_ops->MaxBurstLength) ?
(cmd->write_data_done +
conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length;
} else {
cmd->seq_start_offset = cmd->seq_end_offset;
cmd->seq_end_offset = ((cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength) >=
cmd->se_cmd.data_length) ? cmd->se_cmd.data_length :
(cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength);
}
}
static int iscsit_dataout_within_command_recovery_check(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* We do the within-command recovery checks here as it is
* the first function called in iscsi_check_pre_dataout().
* Basically, if we are in within-command recovery and
* the PDU does not contain the offset the sequence needs,
* dump the payload.
*
* This only applies to DataPDUInOrder=Yes, for
* DataPDUInOrder=No we only re-request the failed PDU
* and check that all PDUs in a sequence are received
* upon end of sequence.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
cmd->write_data_done != be32_to_cpu(hdr->offset))
goto dump;
cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
} else {
struct iscsi_seq *seq;
seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
payload_length);
if (!seq)
return DATAOUT_CANNOT_RECOVER;
/*
* Set the struct iscsi_seq pointer to reuse later.
*/
cmd->seq_ptr = seq;
if (conn->sess->sess_ops->DataPDUInOrder) {
if (seq->status ==
DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
(seq->offset != be32_to_cpu(hdr->offset) ||
seq->data_sn != be32_to_cpu(hdr->datasn)))
goto dump;
} else {
if (seq->status ==
DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
seq->data_sn != be32_to_cpu(hdr->datasn))
goto dump;
}
if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
goto dump;
if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
seq->status = 0;
}
return DATAOUT_NORMAL;
dump:
pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
" 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
static int iscsit_dataout_check_unsolicited_sequence(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
u32 first_burst_len;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
pr_err("Command ITT: 0x%08x with Offset: %u,"
" Length: %u outside of Unsolicited Sequence %u:%u while"
" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
cmd->seq_end_offset);
return DATAOUT_CANNOT_RECOVER;
}
first_burst_len = (cmd->first_burst_len + payload_length);
if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
pr_err("Total %u bytes exceeds FirstBurstLength: %u"
" for this Unsolicited DataOut Burst.\n",
first_burst_len, conn->sess->sess_ops->FirstBurstLength);
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return DATAOUT_CANNOT_RECOVER;
}
/*
* Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
* checks for the current Unsolicited DataOUT Sequence.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
/*
* Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
* sequence checks are handled in
* iscsit_dataout_datapduinorder_no_fbit().
*/
if (!conn->sess->sess_ops->DataPDUInOrder)
goto out;
if ((first_burst_len != cmd->se_cmd.data_length) &&
(first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data"
" received %u does not equal FirstBurstLength: %u, and"
" does not equal ExpXferLen %u.\n", first_burst_len,
conn->sess->sess_ops->FirstBurstLength,
cmd->se_cmd.data_length);
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return DATAOUT_CANNOT_RECOVER;
}
} else {
if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
pr_err("Command ITT: 0x%08x reached"
" FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
" error.\n", cmd->init_task_tag,
conn->sess->sess_ops->FirstBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
if (first_burst_len == cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x reached"
" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
" error.\n", cmd->init_task_tag, cmd->se_cmd.data_length);
return DATAOUT_CANNOT_RECOVER;
}
}
out:
return DATAOUT_NORMAL;
}
static int iscsit_dataout_check_sequence(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
u32 next_burst_len;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_seq *seq = NULL;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* For DataSequenceInOrder=Yes: Check that the offset and offset+length
* is within range as defined by iscsi_set_dataout_sequence_values().
*
* For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
* offset+length tuple.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
/*
* Due to possibility of recovery DataOUT sent by the initiator
* fullfilling an Recovery R2T, it's best to just dump the
* payload here, instead of erroring out.
*/
if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
pr_err("Command ITT: 0x%08x with Offset: %u,"
" Length: %u outside of Sequence %u:%u while"
" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
cmd->seq_end_offset);
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
next_burst_len = (cmd->next_burst_len + payload_length);
} else {
seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
payload_length);
if (!seq)
return DATAOUT_CANNOT_RECOVER;
/*
* Set the struct iscsi_seq pointer to reuse later.
*/
cmd->seq_ptr = seq;
if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
next_burst_len = (seq->next_burst_len + payload_length);
}
if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
" Length: %u exceeds MaxBurstLength: %u. protocol"
" error.\n", cmd->init_task_tag,
(next_burst_len - payload_length),
payload_length, conn->sess->sess_ops->MaxBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
/*
* Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
* checks for the current DataOUT Sequence.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
/*
* Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
* sequence checks are handled in
* iscsit_dataout_datapduinorder_no_fbit().
*/
if (!conn->sess->sess_ops->DataPDUInOrder)
goto out;
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((next_burst_len <
conn->sess->sess_ops->MaxBurstLength) &&
((cmd->write_data_done + payload_length) <
cmd->se_cmd.data_length)) {
pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
" before end of DataOUT sequence, protocol"
" error.\n", cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
} else {
if (next_burst_len < seq->xfer_len) {
pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
" before end of DataOUT sequence, protocol"
" error.\n", cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
}
} else {
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (next_burst_len ==
conn->sess->sess_ops->MaxBurstLength) {
pr_err("Command ITT: 0x%08x reached"
" MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
" not set, protocol error.", cmd->init_task_tag,
conn->sess->sess_ops->MaxBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
if ((cmd->write_data_done + payload_length) ==
cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x reached"
" last DataOUT PDU in sequence but ISCSI_FLAG_"
"CMD_FINAL is not set, protocol error.\n",
cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
} else {
if (next_burst_len == seq->xfer_len) {
pr_err("Command ITT: 0x%08x reached"
" last DataOUT PDU in sequence but ISCSI_FLAG_"
"CMD_FINAL is not set, protocol error.\n",
cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
}
}
out:
return DATAOUT_NORMAL;
}
static int iscsit_dataout_check_datasn(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
u32 data_sn = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* Considering the target has no method of re-requesting DataOUT
* by DataSN, if we receieve a greater DataSN than expected we
* assume the functions for DataPDUInOrder=[Yes,No] below will
* handle it.
*
* If the DataSN is less than expected, dump the payload.
*/
if (conn->sess->sess_ops->DataSequenceInOrder)
data_sn = cmd->data_sn;
else {
struct iscsi_seq *seq = cmd->seq_ptr;
data_sn = seq->data_sn;
}
if (be32_to_cpu(hdr->datasn) > data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" higher than expected 0x%08x.\n", cmd->init_task_tag,
be32_to_cpu(hdr->datasn), data_sn);
goto recover;
} else if (be32_to_cpu(hdr->datasn) < data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" lower than expected 0x%08x, discarding payload.\n",
cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
goto dump;
}
return DATAOUT_NORMAL;
recover:
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to perform within-command recovery"
" while ERL=0.\n");
return DATAOUT_CANNOT_RECOVER;
}
dump:
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
static int iscsit_dataout_pre_datapduinorder_yes(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
int dump = 0, recovery = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* For DataSequenceInOrder=Yes: If the offset is greater than the global
* DataPDUInOrder=Yes offset counter in struct iscsit_cmd a protcol error has
* occurred and fail the connection.
*
* For DataSequenceInOrder=No: If the offset is greater than the per
* sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
* error has occurred and fail the connection.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (be32_to_cpu(hdr->offset) != cmd->write_data_done) {
pr_err("Command ITT: 0x%08x, received offset"
" %u different than expected %u.\n", cmd->init_task_tag,
be32_to_cpu(hdr->offset), cmd->write_data_done);
recovery = 1;
goto recover;
}
} else {
struct iscsi_seq *seq = cmd->seq_ptr;
if (be32_to_cpu(hdr->offset) > seq->offset) {
pr_err("Command ITT: 0x%08x, received offset"
" %u greater than expected %u.\n", cmd->init_task_tag,
be32_to_cpu(hdr->offset), seq->offset);
recovery = 1;
goto recover;
} else if (be32_to_cpu(hdr->offset) < seq->offset) {
pr_err("Command ITT: 0x%08x, received offset"
" %u less than expected %u, discarding payload.\n",
cmd->init_task_tag, be32_to_cpu(hdr->offset),
seq->offset);
dump = 1;
goto dump;
}
}
return DATAOUT_NORMAL;
recover:
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to perform within-command recovery"
" while ERL=0.\n");
return DATAOUT_CANNOT_RECOVER;
}
dump:
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return (recovery) ? iscsit_recover_dataout_sequence(cmd,
be32_to_cpu(hdr->offset), payload_length) :
(dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
}
static int iscsit_dataout_pre_datapduinorder_no(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsi_pdu *pdu;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
pdu = iscsit_get_pdu_holder(cmd, be32_to_cpu(hdr->offset),
payload_length);
if (!pdu)
return DATAOUT_CANNOT_RECOVER;
cmd->pdu_ptr = pdu;
switch (pdu->status) {
case ISCSI_PDU_NOT_RECEIVED:
case ISCSI_PDU_CRC_FAILED:
case ISCSI_PDU_TIMED_OUT:
break;
case ISCSI_PDU_RECEIVED_OK:
pr_err("Command ITT: 0x%08x received already gotten"
" Offset: %u, Length: %u\n", cmd->init_task_tag,
be32_to_cpu(hdr->offset), payload_length);
return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
default:
return DATAOUT_CANNOT_RECOVER;
}
return DATAOUT_NORMAL;
}
static int iscsit_dataout_update_r2t(struct iscsit_cmd *cmd, u32 offset, u32 length)
{
struct iscsi_r2t *r2t;
if (cmd->unsolicited_data)
return 0;
r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
if (!r2t)
return -1;
spin_lock_bh(&cmd->r2t_lock);
r2t->seq_complete = 1;
cmd->outstanding_r2ts--;
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
static int iscsit_dataout_update_datapduinorder_no(
struct iscsit_cmd *cmd,
u32 data_sn,
int f_bit)
{
int ret = 0;
struct iscsi_pdu *pdu = cmd->pdu_ptr;
pdu->data_sn = data_sn;
switch (pdu->status) {
case ISCSI_PDU_NOT_RECEIVED:
pdu->status = ISCSI_PDU_RECEIVED_OK;
break;
case ISCSI_PDU_CRC_FAILED:
pdu->status = ISCSI_PDU_RECEIVED_OK;
break;
case ISCSI_PDU_TIMED_OUT:
pdu->status = ISCSI_PDU_RECEIVED_OK;
break;
default:
return DATAOUT_CANNOT_RECOVER;
}
if (f_bit) {
ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
return DATAOUT_NORMAL;
}
static int iscsit_dataout_post_crc_passed(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
int ret, send_r2t = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_seq *seq = NULL;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if (cmd->unsolicited_data) {
if ((cmd->first_burst_len + payload_length) ==
conn->sess->sess_ops->FirstBurstLength) {
if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset),
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(cmd,
be32_to_cpu(hdr->datasn),
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
cmd->first_burst_len += payload_length;
if (conn->sess->sess_ops->DataSequenceInOrder)
cmd->data_sn++;
else {
seq = cmd->seq_ptr;
seq->data_sn++;
seq->offset += payload_length;
}
if (send_r2t) {
if (seq)
seq->status = DATAOUT_SEQUENCE_COMPLETE;
cmd->first_burst_len = 0;
cmd->unsolicited_data = 0;
}
} else {
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((cmd->next_burst_len + payload_length) ==
conn->sess->sess_ops->MaxBurstLength) {
if (iscsit_dataout_update_r2t(cmd,
be32_to_cpu(hdr->offset),
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(
cmd, be32_to_cpu(hdr->datasn),
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
cmd->next_burst_len += payload_length;
cmd->data_sn++;
if (send_r2t)
cmd->next_burst_len = 0;
} else {
seq = cmd->seq_ptr;
if ((seq->next_burst_len + payload_length) ==
seq->xfer_len) {
if (iscsit_dataout_update_r2t(cmd,
be32_to_cpu(hdr->offset),
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(
cmd, be32_to_cpu(hdr->datasn),
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
seq->data_sn++;
seq->offset += payload_length;
seq->next_burst_len += payload_length;
if (send_r2t) {
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_COMPLETE;
}
}
}
if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
cmd->data_sn = 0;
cmd->write_data_done += payload_length;
if (cmd->write_data_done == cmd->se_cmd.data_length)
return DATAOUT_SEND_TO_TRANSPORT;
else if (send_r2t)
return DATAOUT_SEND_R2T;
else
return DATAOUT_NORMAL;
}
static int iscsit_dataout_post_crc_failed(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if (conn->sess->sess_ops->DataPDUInOrder)
goto recover;
/*
* The rest of this function is only called when DataPDUInOrder=No.
*/
pdu = cmd->pdu_ptr;
switch (pdu->status) {
case ISCSI_PDU_NOT_RECEIVED:
pdu->status = ISCSI_PDU_CRC_FAILED;
break;
case ISCSI_PDU_CRC_FAILED:
break;
case ISCSI_PDU_TIMED_OUT:
pdu->status = ISCSI_PDU_CRC_FAILED;
break;
default:
return DATAOUT_CANNOT_RECOVER;
}
recover:
return iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset),
payload_length);
}
/*
* Called from iscsit_handle_data_out() before DataOUT Payload is received
* and CRC computed.
*/
int iscsit_check_pre_dataout(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
int ret;
struct iscsit_conn *conn = cmd->conn;
ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
ret = iscsit_dataout_check_datasn(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
if (cmd->unsolicited_data) {
ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
} else {
ret = iscsit_dataout_check_sequence(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
}
return (conn->sess->sess_ops->DataPDUInOrder) ?
iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
iscsit_dataout_pre_datapduinorder_no(cmd, buf);
}
/*
* Called from iscsit_handle_data_out() after DataOUT Payload is received
* and CRC computed.
*/
int iscsit_check_post_dataout(
struct iscsit_cmd *cmd,
unsigned char *buf,
u8 data_crc_failed)
{
struct iscsit_conn *conn = cmd->conn;
cmd->dataout_timeout_retries = 0;
if (!data_crc_failed)
return iscsit_dataout_post_crc_passed(cmd, buf);
else {
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from DataOUT CRC"
" failure while ERL=0, closing session.\n");
iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
buf);
return DATAOUT_CANNOT_RECOVER;
}
iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
return iscsit_dataout_post_crc_failed(cmd, buf);
}
}
void iscsit_handle_time2retain_timeout(struct timer_list *t)
{
struct iscsit_session *sess = from_timer(sess, t, time2retain_timer);
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&se_tpg->session_lock);
return;
}
if (atomic_read(&sess->session_reinstatement)) {
pr_err("Exiting Time2Retain handler because"
" session_reinstatement=1\n");
spin_unlock_bh(&se_tpg->session_lock);
return;
}
sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
" iSCSI session.\n", sess->sid);
iscsit_fill_cxn_timeout_err_stats(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_close_session(sess, false);
}
void iscsit_start_time2retain_handler(struct iscsit_session *sess)
{
int tpg_active;
/*
* Only start Time2Retain timer when the associated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
spin_lock(&sess->tpg->tpg_state_lock);
tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&sess->tpg->tpg_state_lock);
if (!tpg_active)
return;
if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
return;
pr_debug("Starting Time2Retain timer for %u seconds on"
" SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
mod_timer(&sess->time2retain_timer,
jiffies + sess->sess_ops->DefaultTime2Retain * HZ);
}
int iscsit_stop_time2retain_timer(struct iscsit_session *sess)
{
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
lockdep_assert_held(&se_tpg->session_lock);
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
return -1;
if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
return 0;
sess->time2retain_timer_flags |= ISCSI_TF_STOP;
spin_unlock(&se_tpg->session_lock);
del_timer_sync(&sess->time2retain_timer);
spin_lock(&se_tpg->session_lock);
sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
pr_debug("Stopped Time2Retain Timer for SID: %u\n",
sess->sid);
return 0;
}
void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
goto sleep;
}
if (atomic_read(&conn->transport_failed)) {
spin_unlock_bh(&conn->state_lock);
goto sleep;
}
spin_unlock_bh(&conn->state_lock);
if (conn->tx_thread && conn->tx_thread_active)
send_sig(SIGINT, conn->tx_thread, 1);
if (conn->rx_thread && conn->rx_thread_active)
send_sig(SIGINT, conn->rx_thread, 1);
sleep:
wait_for_completion(&conn->conn_wait_rcfr_comp);
complete(&conn->conn_post_wait_comp);
}
void iscsit_cause_connection_reinstatement(struct iscsit_conn *conn, int sleep)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
return;
}
if (atomic_read(&conn->transport_failed)) {
spin_unlock_bh(&conn->state_lock);
return;
}
if (atomic_read(&conn->connection_reinstatement)) {
spin_unlock_bh(&conn->state_lock);
return;
}
if (conn->tx_thread && conn->tx_thread_active)
send_sig(SIGINT, conn->tx_thread, 1);
if (conn->rx_thread && conn->rx_thread_active)
send_sig(SIGINT, conn->rx_thread, 1);
atomic_set(&conn->connection_reinstatement, 1);
if (!sleep) {
spin_unlock_bh(&conn->state_lock);
return;
}
atomic_set(&conn->sleep_on_conn_wait_comp, 1);
spin_unlock_bh(&conn->state_lock);
wait_for_completion(&conn->conn_wait_comp);
complete(&conn->conn_post_wait_comp);
}
EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
void iscsit_fall_back_to_erl0(struct iscsit_session *sess)
{
pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
" %u\n", sess->sid);
atomic_set(&sess->session_fall_back_to_erl0, 1);
}
static void iscsit_handle_connection_cleanup(struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
!atomic_read(&sess->session_reinstatement) &&
!atomic_read(&sess->session_fall_back_to_erl0))
iscsit_connection_recovery_transport_reset(conn);
else {
pr_debug("Performing cleanup for failed iSCSI"
" Connection ID: %hu from %s\n", conn->cid,
sess->sess_ops->InitiatorName);
iscsit_close_connection(conn);
}
}
void iscsit_take_action_for_connection_exit(struct iscsit_conn *conn, bool *conn_freed)
{
*conn_freed = false;
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
return;
}
atomic_set(&conn->connection_exit, 1);
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock);
iscsit_close_connection(conn);
*conn_freed = true;
return;
}
if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
spin_unlock_bh(&conn->state_lock);
return;
}
pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
spin_unlock_bh(&conn->state_lock);
iscsit_handle_connection_cleanup(conn);
*conn_freed = true;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_erl0.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file houses the main functions for the iSCSI CHAP support
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <crypto/hash.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
static char *chap_get_digest_name(const int digest_type)
{
switch (digest_type) {
case CHAP_DIGEST_MD5:
return "md5";
case CHAP_DIGEST_SHA1:
return "sha1";
case CHAP_DIGEST_SHA256:
return "sha256";
case CHAP_DIGEST_SHA3_256:
return "sha3-256";
default:
return NULL;
}
}
static int chap_gen_challenge(
struct iscsit_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
int ret;
unsigned char *challenge_asciihex;
struct iscsi_chap *chap = conn->auth_protocol;
challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL);
if (!challenge_asciihex)
return -ENOMEM;
memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN);
ret = get_random_bytes_wait(chap->challenge, chap->challenge_len);
if (unlikely(ret))
goto out;
bin2hex(challenge_asciihex, chap->challenge,
chap->challenge_len);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
*c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
*c_len += 1;
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
out:
kfree(challenge_asciihex);
return ret;
}
static int chap_test_algorithm(const char *name)
{
struct crypto_shash *tfm;
tfm = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(tfm))
return -1;
crypto_free_shash(tfm);
return 0;
}
static int chap_check_algorithm(const char *a_str)
{
char *tmp, *orig, *token, *digest_name;
long digest_type;
int r = CHAP_DIGEST_UNKNOWN;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
return CHAP_DIGEST_UNKNOWN;
}
orig = tmp;
token = strsep(&tmp, "=");
if (!token)
goto out;
if (strcmp(token, "CHAP_A")) {
pr_err("Unable to locate CHAP_A key\n");
goto out;
}
while (token) {
token = strsep(&tmp, ",");
if (!token)
goto out;
if (kstrtol(token, 10, &digest_type))
continue;
digest_name = chap_get_digest_name(digest_type);
if (!digest_name)
continue;
pr_debug("Selected %s Algorithm\n", digest_name);
if (chap_test_algorithm(digest_name) < 0) {
pr_err("failed to allocate %s algo\n", digest_name);
} else {
r = digest_type;
goto out;
}
}
out:
kfree(orig);
return r;
}
static void chap_close(struct iscsit_conn *conn)
{
kfree(conn->auth_protocol);
conn->auth_protocol = NULL;
}
static struct iscsi_chap *chap_server_open(
struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
const char *a_str,
char *aic_str,
unsigned int *aic_len)
{
int digest_type;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
!(auth->naf_flags & NAF_PASSWORD_SET)) {
pr_err("CHAP user or password not set for"
" Initiator ACL\n");
return NULL;
}
conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
if (!conn->auth_protocol)
return NULL;
chap = conn->auth_protocol;
digest_type = chap_check_algorithm(a_str);
switch (digest_type) {
case CHAP_DIGEST_MD5:
chap->digest_size = MD5_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_SHA1:
chap->digest_size = SHA1_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_SHA256:
chap->digest_size = SHA256_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_SHA3_256:
chap->digest_size = SHA3_256_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
chap_close(conn);
return NULL;
}
chap->digest_name = chap_get_digest_name(digest_type);
/* Tie the challenge length to the digest size */
chap->challenge_len = chap->digest_size;
pr_debug("[server] Got CHAP_A=%d\n", digest_type);
*aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type);
*aic_len += 1;
pr_debug("[server] Sending CHAP_A=%d\n", digest_type);
/*
* Set Identifier.
*/
chap->id = conn->tpg->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
/*
* Generate Challenge.
*/
if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
chap_close(conn);
return NULL;
}
return chap;
}
static const char base64_lookup_table[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static int chap_base64_decode(u8 *dst, const char *src, size_t len)
{
int i, bits = 0, ac = 0;
const char *p;
u8 *cp = dst;
for (i = 0; i < len; i++) {
if (src[i] == '=')
return cp - dst;
p = strchr(base64_lookup_table, src[i]);
if (p == NULL || src[i] == 0)
return -2;
ac <<= 6;
ac += (p - base64_lookup_table);
bits += 6;
if (bits >= 8) {
*cp++ = (ac >> (bits - 8)) & 0xff;
ac &= ~(BIT(16) - BIT(bits - 8));
bits -= 8;
}
}
if (ac)
return -1;
return cp - dst;
}
static int chap_server_compute_hash(
struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
char *nr_out_ptr,
unsigned int *nr_out_len)
{
unsigned long id;
unsigned char id_as_uchar;
unsigned char type;
unsigned char identifier[10], *initiatorchg = NULL;
unsigned char *initiatorchg_binhex = NULL;
unsigned char *digest = NULL;
unsigned char *response = NULL;
unsigned char *client_digest = NULL;
unsigned char *server_digest = NULL;
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_shash *tfm = NULL;
struct shash_desc *desc = NULL;
int auth_ret = -1, ret, initiatorchg_len;
digest = kzalloc(chap->digest_size, GFP_KERNEL);
if (!digest) {
pr_err("Unable to allocate the digest buffer\n");
goto out;
}
response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL);
if (!response) {
pr_err("Unable to allocate the response buffer\n");
goto out;
}
client_digest = kzalloc(chap->digest_size, GFP_KERNEL);
if (!client_digest) {
pr_err("Unable to allocate the client_digest buffer\n");
goto out;
}
server_digest = kzalloc(chap->digest_size, GFP_KERNEL);
if (!server_digest) {
pr_err("Unable to allocate the server_digest buffer\n");
goto out;
}
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
if (!initiatorchg) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
if (!initiatorchg_binhex) {
pr_err("Unable to allocate initiatorchg_binhex buffer\n");
goto out;
}
/*
* Extract CHAP_N.
*/
if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
&type) < 0) {
pr_err("Could not find CHAP_N.\n");
goto out;
}
if (type == HEX) {
pr_err("Could not find CHAP_N.\n");
goto out;
}
/* Include the terminating NULL in the compare */
compare_len = strlen(auth->userid) + 1;
if (strncmp(chap_n, auth->userid, compare_len) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
pr_debug("[server] Got CHAP_N=%s\n", chap_n);
/*
* Extract CHAP_R.
*/
if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
&type) < 0) {
pr_err("Could not find CHAP_R.\n");
goto out;
}
switch (type) {
case HEX:
if (strlen(chap_r) != chap->digest_size * 2) {
pr_err("Malformed CHAP_R\n");
goto out;
}
if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
pr_err("Malformed CHAP_R: invalid HEX\n");
goto out;
}
break;
case BASE64:
if (chap_base64_decode(client_digest, chap_r, strlen(chap_r)) !=
chap->digest_size) {
pr_err("Malformed CHAP_R: invalid BASE64\n");
goto out;
}
break;
default:
pr_err("Could not find CHAP_R\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
tfm = crypto_alloc_shash(chap->digest_name, 0, 0);
if (IS_ERR(tfm)) {
tfm = NULL;
pr_err("Unable to allocate struct crypto_shash\n");
goto out;
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
if (!desc) {
pr_err("Unable to allocate struct shash_desc\n");
goto out;
}
desc->tfm = tfm;
ret = crypto_shash_init(desc);
if (ret < 0) {
pr_err("crypto_shash_init() failed\n");
goto out;
}
ret = crypto_shash_update(desc, &chap->id, 1);
if (ret < 0) {
pr_err("crypto_shash_update() failed for id\n");
goto out;
}
ret = crypto_shash_update(desc, (char *)&auth->password,
strlen(auth->password));
if (ret < 0) {
pr_err("crypto_shash_update() failed for password\n");
goto out;
}
ret = crypto_shash_finup(desc, chap->challenge,
chap->challenge_len, server_digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
bin2hex(response, server_digest, chap->digest_size);
pr_debug("[server] %s Server Digest: %s\n",
chap->digest_name, response);
if (memcmp(server_digest, client_digest, chap->digest_size) != 0) {
pr_debug("[server] %s Digests do not match!\n\n",
chap->digest_name);
goto out;
} else
pr_debug("[server] %s Digests match, CHAP connection"
" successful.\n\n", chap->digest_name);
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
*/
if (!auth->authenticate_target) {
auth_ret = 0;
goto out;
}
/*
* Get CHAP_I.
*/
ret = extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type);
if (ret == -ENOENT) {
pr_debug("Could not find CHAP_I. Initiator uses One way authentication.\n");
auth_ret = 0;
goto out;
}
if (ret < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
if (type == HEX)
ret = kstrtoul(&identifier[2], 0, &id);
else
ret = kstrtoul(identifier, 0, &id);
if (ret < 0) {
pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
goto out;
}
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
}
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
initiatorchg, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
switch (type) {
case HEX:
initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
if (!initiatorchg_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
if (initiatorchg_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
if (hex2bin(initiatorchg_binhex, initiatorchg,
initiatorchg_len) < 0) {
pr_err("Malformed CHAP_C: invalid HEX\n");
goto out;
}
break;
case BASE64:
initiatorchg_len = chap_base64_decode(initiatorchg_binhex,
initiatorchg,
strlen(initiatorchg));
if (initiatorchg_len < 0) {
pr_err("Malformed CHAP_C: invalid BASE64\n");
goto out;
}
if (!initiatorchg_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
if (initiatorchg_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
break;
default:
pr_err("Could not find CHAP_C.\n");
goto out;
}
pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
if (initiatorchg_len == chap->challenge_len &&
!memcmp(initiatorchg_binhex, chap->challenge,
initiatorchg_len)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
}
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
ret = crypto_shash_init(desc);
if (ret < 0) {
pr_err("crypto_shash_init() failed\n");
goto out;
}
/* To handle both endiannesses */
id_as_uchar = id;
ret = crypto_shash_update(desc, &id_as_uchar, 1);
if (ret < 0) {
pr_err("crypto_shash_update() failed for id\n");
goto out;
}
ret = crypto_shash_update(desc, auth->password_mutual,
strlen(auth->password_mutual));
if (ret < 0) {
pr_err("crypto_shash_update() failed for"
" password_mutual\n");
goto out;
}
/*
* Convert received challenge to binary hex.
*/
ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len,
digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for ma challenge\n");
goto out;
}
/*
* Generate CHAP_N and CHAP_R.
*/
*nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
/*
* Convert response from binary hex to ascii hext.
*/
bin2hex(response, digest, chap->digest_size);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_R=0x%s\n", response);
auth_ret = 0;
out:
kfree_sensitive(desc);
if (tfm)
crypto_free_shash(tfm);
kfree(initiatorchg);
kfree(initiatorchg_binhex);
kfree(digest);
kfree(response);
kfree(server_digest);
kfree(client_digest);
return auth_ret;
}
u32 chap_main_loop(
struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
char *in_text,
char *out_text,
int *in_len,
int *out_len)
{
struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);
if (!chap)
return 2;
chap->chap_state = CHAP_STAGE_SERVER_AIC;
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
if (chap_server_compute_hash(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
}
if (auth->authenticate_target)
chap->chap_state = CHAP_STAGE_SERVER_NR;
else
*out_len = 0;
chap_close(conn);
return 1;
}
return 2;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_auth.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the login functions used by the iSCSI Target driver.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <crypto/hash.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/idr.h>
#include <linux/tcp.h> /* TCP_NODELAY */
#include <net/ip.h>
#include <net/ipv6.h> /* ipv6_addr_v4mapped() */
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_target_core.h>
#include <target/iscsi/iscsi_target_stat.h>
#include "iscsi_target_device.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_login.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_parameters.h"
#include <target/iscsi/iscsi_transport.h>
static struct iscsi_login *iscsi_login_init_conn(struct iscsit_conn *conn)
{
struct iscsi_login *login;
login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
if (!login) {
pr_err("Unable to allocate memory for struct iscsi_login.\n");
return NULL;
}
conn->login = login;
login->conn = conn;
login->first_request = 1;
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
pr_err("Unable to allocate memory for response buffer.\n");
goto out_login;
}
login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->rsp_buf) {
pr_err("Unable to allocate memory for request buffer.\n");
goto out_req_buf;
}
conn->conn_login = login;
return login;
out_req_buf:
kfree(login->req_buf);
out_login:
kfree(login);
return NULL;
}
/*
* Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
* per struct iscsit_conn libcrypto contexts for crc32c and crc32-intel
*/
int iscsi_login_setup_crypto(struct iscsit_conn *conn)
{
struct crypto_ahash *tfm;
/*
* Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
* which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
* to software 1x8 byte slicing from crc32c.ko
*/
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("crypto_alloc_ahash() failed\n");
return -ENOMEM;
}
conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
if (!conn->conn_rx_hash) {
pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
crypto_free_ahash(tfm);
return -ENOMEM;
}
ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
if (!conn->conn_tx_hash) {
pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
ahash_request_free(conn->conn_rx_hash);
conn->conn_rx_hash = NULL;
crypto_free_ahash(tfm);
return -ENOMEM;
}
ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
return 0;
}
static int iscsi_login_check_initiator_version(
struct iscsit_conn *conn,
u8 version_max,
u8 version_min)
{
if ((version_max != 0x00) || (version_min != 0x00)) {
pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
" version Min/Max 0x%02x/0x%02x, rejecting login.\n",
version_min, version_max);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_NO_VERSION);
return -1;
}
return 0;
}
int iscsi_check_for_session_reinstatement(struct iscsit_conn *conn)
{
int sessiontype;
struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
struct iscsi_portal_group *tpg = conn->tpg;
struct iscsit_session *sess = NULL, *sess_p = NULL;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list);
sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list);
if (!initiatorname_param || !sessiontype_param) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
}
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
spin_lock_bh(&se_tpg->session_lock);
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
sess_p = se_sess->fabric_sess_ptr;
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess_p->conn_lock);
continue;
}
if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
(!strcmp(sess_p->sess_ops->InitiatorName,
initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
atomic_set(&sess_p->session_fall_back_to_erl0, 1);
atomic_set(&sess_p->session_close, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
sess = sess_p;
break;
}
spin_unlock(&sess_p->conn_lock);
}
spin_unlock_bh(&se_tpg->session_lock);
/*
* If the Time2Retain handler has expired, the session is already gone.
*/
if (!sess)
return 0;
pr_debug("%s iSCSI Session SID %u is still active for %s,"
" performing session reinstatement.\n", (sessiontype) ?
"Discovery" : "Normal", sess->sid,
sess->sess_ops->InitiatorName);
spin_lock_bh(&sess->conn_lock);
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
return 0;
}
static int iscsi_login_set_conn_values(
struct iscsit_session *sess,
struct iscsit_conn *conn,
__be16 cid)
{
int ret;
conn->sess = sess;
conn->cid = be16_to_cpu(cid);
/*
* Generate a random Status sequence number (statsn) for the new
* iSCSI connection.
*/
ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32));
if (unlikely(ret))
return ret;
mutex_lock(&auth_id_lock);
conn->auth_id = iscsit_global->auth_id++;
mutex_unlock(&auth_id_lock);
return 0;
}
__printf(2, 3) int iscsi_change_param_sprintf(
struct iscsit_conn *conn,
const char *fmt, ...)
{
va_list args;
unsigned char buf[64];
memset(buf, 0, sizeof buf);
va_start(args, fmt);
vsnprintf(buf, sizeof buf, fmt, args);
va_end(args);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
return 0;
}
EXPORT_SYMBOL(iscsi_change_param_sprintf);
/*
* This is the leading connection of a new session,
* or session reinstatement.
*/
static int iscsi_login_zero_tsih_s1(
struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsit_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
int ret;
sess = kzalloc(sizeof(struct iscsit_session), GFP_KERNEL);
if (!sess) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
return -ENOMEM;
}
if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
goto free_sess;
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
INIT_LIST_HEAD(&sess->sess_conn_list);
INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
INIT_LIST_HEAD(&sess->cr_active_list);
INIT_LIST_HEAD(&sess->cr_inactive_list);
init_completion(&sess->async_msg_comp);
init_completion(&sess->reinstatement_comp);
init_completion(&sess->session_wait_comp);
init_completion(&sess->session_waiting_on_uc_comp);
mutex_init(&sess->cmdsn_mutex);
spin_lock_init(&sess->conn_lock);
spin_lock_init(&sess->cr_a_lock);
spin_lock_init(&sess->cr_i_lock);
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
timer_setup(&sess->time2retain_timer,
iscsit_handle_time2retain_timeout, 0);
ret = ida_alloc(&sess_ida, GFP_KERNEL);
if (ret < 0) {
pr_err("Session ID allocation failed %d\n", ret);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto free_sess;
}
sess->session_index = ret;
sess->creation_time = get_jiffies_64();
/*
* The FFP CmdSN window values will be allocated from the TPG's
* Initiator Node's ACL once the login has been successfully completed.
*/
atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn));
sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
if (!sess->sess_ops) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
goto free_id;
}
sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto free_ops;
}
return 0;
free_ops:
kfree(sess->sess_ops);
free_id:
ida_free(&sess_ida, sess->session_index);
free_sess:
kfree(sess);
conn->sess = NULL;
return -ENOMEM;
}
static int iscsi_login_zero_tsih_s2(
struct iscsit_conn *conn)
{
struct iscsi_node_attrib *na;
struct iscsit_session *sess = conn->sess;
struct iscsi_param *param;
bool iser = false;
sess->tpg = conn->tpg;
/*
* Assign a new TPG Session Handle. Note this is protected with
* struct iscsi_portal_group->np_login_sem from iscsit_access_np().
*/
sess->tsih = ++sess->tpg->ntsih;
if (!sess->tsih)
sess->tsih = ++sess->tpg->ntsih;
/*
* Create the default params from user defined values..
*/
if (iscsi_copy_param_list(&conn->param_list,
conn->tpg->param_list, 1) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
iser = true;
iscsi_set_keys_to_negotiate(conn->param_list, iser);
if (sess->sess_ops->SessionType)
return iscsi_set_keys_irrelevant_for_discovery(
conn->param_list);
na = iscsit_tpg_get_node_attrib(sess);
/*
* If ACL allows non-authorized access in TPG with CHAP,
* then set None to AuthMethod.
*/
param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
if (param && !strstr(param->value, NONE)) {
if (!iscsi_conn_auth_required(conn))
if (iscsi_change_param_sprintf(conn, "AuthMethod=%s",
NONE))
return -1;
}
/*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
* See 5.3.1. Login Phase Start
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
/*
* Workaround for Initiators that have broken connection recovery logic.
*
* "We would really like to get rid of this." Linux-iSCSI.org team
*/
if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
return -1;
/*
* Set RDMAExtensions=Yes by default for iSER enabled network portals
*/
if (iser) {
struct iscsi_param *param;
unsigned long mrdsl, off;
int rc;
if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
return -1;
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
* Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
if (!param) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
rc = kstrtoul(param->value, 0, &mrdsl);
if (rc < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
off = mrdsl % PAGE_SIZE;
if (!off)
goto check_prot;
if (mrdsl < PAGE_SIZE)
mrdsl = PAGE_SIZE;
else
mrdsl -= off;
pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
" to PAGE_SIZE\n", mrdsl);
if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
return -1;
/*
* ISER currently requires that ImmediateData + Unsolicited
* Data be disabled when protection / signature MRs are enabled.
*/
check_prot:
if (sess->se_sess->sup_prot_ops &
(TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
TARGET_PROT_DOUT_INSERT)) {
if (iscsi_change_param_sprintf(conn, "ImmediateData=No"))
return -1;
if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes"))
return -1;
pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
" T10-PI enabled ISER session\n");
}
}
return 0;
}
static int iscsi_login_non_zero_tsih_s1(
struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
return iscsi_login_set_conn_values(NULL, conn, pdu->cid);
}
/*
* Add a new connection to an existing session.
*/
static int iscsi_login_non_zero_tsih_s2(
struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsi_portal_group *tpg = conn->tpg;
struct iscsit_session *sess = NULL, *sess_p = NULL;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
bool iser = false;
spin_lock_bh(&se_tpg->session_lock);
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
sess_p = (struct iscsit_session *)se_sess->fabric_sess_ptr;
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
(sess_p->tsih == be16_to_cpu(pdu->tsih))) {
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
sess = sess_p;
break;
}
}
spin_unlock_bh(&se_tpg->session_lock);
/*
* If the Time2Retain handler has expired, the session is already gone.
*/
if (!sess) {
pr_err("Initiator attempting to add a connection to"
" a non-existent session, rejecting iSCSI Login.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_NO_SESSION);
return -1;
}
/*
* Stop the Time2Retain timer if this is a failed session, we restart
* the timer if the login is not successful.
*/
spin_lock_bh(&sess->conn_lock);
if (sess->session_state == TARG_SESS_STATE_FAILED)
atomic_set(&sess->session_continuation, 1);
spin_unlock_bh(&sess->conn_lock);
if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 ||
iscsi_copy_param_list(&conn->param_list,
conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
iser = true;
iscsi_set_keys_to_negotiate(conn->param_list, iser);
/*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
* See 5.3.1. Login Phase Start
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
return 0;
}
int iscsi_login_post_auth_non_zero_tsih(
struct iscsit_conn *conn,
u16 cid,
u32 exp_statsn)
{
struct iscsit_conn *conn_ptr = NULL;
struct iscsi_conn_recovery *cr = NULL;
struct iscsit_session *sess = conn->sess;
/*
* By following item 5 in the login table, if we have found
* an existing ISID and a valid/existing TSIH and an existing
* CID we do connection reinstatement. Currently we dont not
* support it so we send back an non-zero status class to the
* initiator and release the new connection.
*/
conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
if (conn_ptr) {
pr_err("Connection exists with CID %hu for %s,"
" performing connection reinstatement.\n",
conn_ptr->cid, sess->sess_ops->InitiatorName);
iscsit_connection_reinstatement_rcfr(conn_ptr);
iscsit_dec_conn_usage_count(conn_ptr);
}
/*
* Check for any connection recovery entries containing CID.
* We use the original ExpStatSN sent in the first login request
* to acknowledge commands for the failed connection.
*
* Also note that an explict logout may have already been sent,
* but the response may not be sent due to additional connection
* loss.
*/
if (sess->sess_ops->ErrorRecoveryLevel == 2) {
cr = iscsit_get_inactive_connection_recovery_entry(
sess, cid);
if (cr) {
pr_debug("Performing implicit logout"
" for connection recovery on CID: %hu\n",
conn->cid);
iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
}
}
/*
* Else we follow item 4 from the login table in that we have
* found an existing ISID and a valid/existing TSIH and a new
* CID we go ahead and continue to add a new connection to the
* session.
*/
pr_debug("Adding CID %hu to existing session for %s.\n",
cid, sess->sess_ops->InitiatorName);
if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
pr_err("Adding additional connection to this session"
" would exceed MaxConnections %d, login failed.\n",
sess->sess_ops->MaxConnections);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_ISID_ERROR);
return -1;
}
return 0;
}
static void iscsi_post_login_start_timers(struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
/*
* FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
if (!sess->sess_ops->SessionType)
iscsit_start_nopin_timer(conn);
}
int iscsit_start_kthreads(struct iscsit_conn *conn)
{
int ret = 0;
spin_lock(&iscsit_global->ts_bitmap_lock);
conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
ISCSIT_BITMAP_BITS, get_order(1));
spin_unlock(&iscsit_global->ts_bitmap_lock);
if (conn->bitmap_id < 0) {
pr_err("bitmap_find_free_region() failed for"
" iscsit_start_kthreads()\n");
return -ENOMEM;
}
conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
"%s", ISCSI_TX_THREAD_NAME);
if (IS_ERR(conn->tx_thread)) {
pr_err("Unable to start iscsi_target_tx_thread\n");
ret = PTR_ERR(conn->tx_thread);
goto out_bitmap;
}
conn->tx_thread_active = true;
conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
"%s", ISCSI_RX_THREAD_NAME);
if (IS_ERR(conn->rx_thread)) {
pr_err("Unable to start iscsi_target_rx_thread\n");
ret = PTR_ERR(conn->rx_thread);
goto out_tx;
}
conn->rx_thread_active = true;
return 0;
out_tx:
send_sig(SIGINT, conn->tx_thread, 1);
kthread_stop(conn->tx_thread);
conn->tx_thread_active = false;
out_bitmap:
spin_lock(&iscsit_global->ts_bitmap_lock);
bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
get_order(1));
spin_unlock(&iscsit_global->ts_bitmap_lock);
return ret;
}
void iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsit_conn *conn,
u8 zero_tsih)
{
int stop_timer = 0;
struct iscsit_session *sess = conn->sess;
struct se_session *se_sess = sess->se_sess;
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
iscsit_inc_conn_usage_count(conn);
iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
ISCSI_LOGIN_STATUS_ACCEPT);
pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
/*
* SCSI Initiator -> SCSI Target Port Mapping
*/
if (!zero_tsih) {
iscsi_set_session_parameters(sess->sess_ops,
conn->param_list, 0);
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
spin_lock_bh(&sess->conn_lock);
atomic_set(&sess->session_continuation, 0);
if (sess->session_state == TARG_SESS_STATE_FAILED) {
pr_debug("Moving to"
" TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
stop_timer = 1;
}
pr_debug("iSCSI Login successful on CID: %hu from %pISpc to"
" %pISpc,%hu\n", conn->cid, &conn->login_sockaddr,
&conn->local_sockaddr, tpg->tpgt);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
pr_debug("Incremented iSCSI Connection count to %d"
" from node: %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
*/
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
/*
* Wakeup the sleeping iscsi_target_rx_thread() now that
* iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state.
*/
complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
if (stop_timer) {
spin_lock_bh(&se_tpg->session_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
}
iscsit_dec_session_usage_count(sess);
return;
}
iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
iscsit_determine_maxcmdsn(sess);
spin_lock_bh(&se_tpg->session_lock);
__transport_register_session(&sess->tpg->tpg_se_tpg,
se_sess->se_node_acl, se_sess, sess);
pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n",
conn->cid, &conn->login_sockaddr, &conn->local_sockaddr,
tpg->tpgt);
spin_lock_bh(&sess->conn_lock);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
pr_debug("Incremented iSCSI Connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
sess->sid = tpg->sid++;
if (!sess->sid)
sess->sid = tpg->sid++;
pr_debug("Established iSCSI session from node: %s\n",
sess->sess_ops->InitiatorName);
tpg->nsessions++;
if (tpg->tpg_tiqn)
tpg->tpg_tiqn->tiqn_nsessions++;
pr_debug("Incremented number of active iSCSI sessions to %u on"
" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
spin_unlock_bh(&se_tpg->session_lock);
iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
*/
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
/*
* Wakeup the sleeping iscsi_target_rx_thread() now that
* iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state.
*/
complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
}
int iscsit_setup_np(
struct iscsi_np *np,
struct sockaddr_storage *sockaddr)
{
struct socket *sock = NULL;
int backlog = ISCSIT_TCP_BACKLOG, ret, len;
switch (np->np_network_transport) {
case ISCSI_TCP:
np->np_ip_proto = IPPROTO_TCP;
np->np_sock_type = SOCK_STREAM;
break;
case ISCSI_SCTP_TCP:
np->np_ip_proto = IPPROTO_SCTP;
np->np_sock_type = SOCK_STREAM;
break;
case ISCSI_SCTP_UDP:
np->np_ip_proto = IPPROTO_SCTP;
np->np_sock_type = SOCK_SEQPACKET;
break;
default:
pr_err("Unsupported network_transport: %d\n",
np->np_network_transport);
return -EINVAL;
}
ret = sock_create(sockaddr->ss_family, np->np_sock_type,
np->np_ip_proto, &sock);
if (ret < 0) {
pr_err("sock_create() failed.\n");
return ret;
}
np->np_socket = sock;
/*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
memcpy(&np->np_sockaddr, sockaddr,
sizeof(struct sockaddr_storage));
if (sockaddr->ss_family == AF_INET6)
len = sizeof(struct sockaddr_in6);
else
len = sizeof(struct sockaddr_in);
/*
* Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
*/
if (np->np_network_transport == ISCSI_TCP)
tcp_sock_set_nodelay(sock->sk);
sock_set_reuseaddr(sock->sk);
ip_sock_set_freebind(sock->sk);
ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
if (ret < 0) {
pr_err("kernel_bind() failed: %d\n", ret);
goto fail;
}
ret = kernel_listen(sock, backlog);
if (ret != 0) {
pr_err("kernel_listen() failed: %d\n", ret);
goto fail;
}
return 0;
fail:
np->np_socket = NULL;
sock_release(sock);
return ret;
}
int iscsi_target_setup_login_socket(
struct iscsi_np *np,
struct sockaddr_storage *sockaddr)
{
struct iscsit_transport *t;
int rc;
t = iscsit_get_transport(np->np_network_transport);
if (!t)
return -EINVAL;
rc = t->iscsit_setup_np(np, sockaddr);
if (rc < 0) {
iscsit_put_transport(t);
return rc;
}
np->np_transport = t;
np->enabled = true;
return 0;
}
int iscsit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
{
struct socket *new_sock, *sock = np->np_socket;
struct sockaddr_in sock_in;
struct sockaddr_in6 sock_in6;
int rc;
rc = kernel_accept(sock, &new_sock, 0);
if (rc < 0)
return rc;
conn->sock = new_sock;
conn->login_family = np->np_sockaddr.ss_family;
if (np->np_sockaddr.ss_family == AF_INET6) {
memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in6, 1);
if (rc >= 0) {
if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
} else {
/* Pretend to be an ipv4 socket */
sock_in.sin_family = AF_INET;
sock_in.sin_port = sock_in6.sin6_port;
memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
}
}
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in6, 0);
if (rc >= 0) {
if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
} else {
/* Pretend to be an ipv4 socket */
sock_in.sin_family = AF_INET;
sock_in.sin_port = sock_in6.sin6_port;
memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
}
}
} else {
memset(&sock_in, 0, sizeof(struct sockaddr_in));
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in, 1);
if (rc >= 0)
memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in, 0);
if (rc >= 0)
memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
}
return 0;
}
int iscsit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
{
struct iscsi_login_req *login_req;
u32 padding = 0, payload_length;
if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
return -1;
login_req = (struct iscsi_login_req *)login->req;
payload_length = ntoh24(login_req->dlength);
padding = ((-payload_length) & 3);
pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
login_req->flags, login_req->itt, login_req->cmdsn,
login_req->exp_statsn, login_req->cid, payload_length);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
*/
if (login->first_request) {
login_req = (struct iscsi_login_req *)login->req;
login->leading_connection = (!login_req->tsih) ? 1 : 0;
login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
login->version_min = login_req->min_version;
login->version_max = login_req->max_version;
memcpy(login->isid, login_req->isid, 6);
login->cmd_sn = be32_to_cpu(login_req->cmdsn);
login->init_task_tag = login_req->itt;
login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
login->cid = be16_to_cpu(login_req->cid);
login->tsih = be16_to_cpu(login_req->tsih);
}
if (iscsi_target_check_login_request(conn, login) < 0)
return -1;
memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
if (iscsi_login_rx_data(conn, login->req_buf,
payload_length + padding) < 0)
return -1;
return 0;
}
int iscsit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
u32 length)
{
if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
return -1;
return 0;
}
static int
iscsit_conn_set_transport(struct iscsit_conn *conn, struct iscsit_transport *t)
{
int rc;
if (!t->owner) {
conn->conn_transport = t;
return 0;
}
rc = try_module_get(t->owner);
if (!rc) {
pr_err("try_module_get() failed for %s\n", t->name);
return -EINVAL;
}
conn->conn_transport = t;
return 0;
}
static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
{
struct iscsit_conn *conn;
conn = kzalloc(sizeof(struct iscsit_conn), GFP_KERNEL);
if (!conn) {
pr_err("Could not allocate memory for new connection\n");
return NULL;
}
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
init_waitqueue_head(&conn->queues_wq);
INIT_LIST_HEAD(&conn->conn_list);
INIT_LIST_HEAD(&conn->conn_cmd_list);
INIT_LIST_HEAD(&conn->immed_queue_list);
INIT_LIST_HEAD(&conn->response_queue_list);
init_completion(&conn->conn_post_wait_comp);
init_completion(&conn->conn_wait_comp);
init_completion(&conn->conn_wait_rcfr_comp);
init_completion(&conn->conn_waiting_on_uc_comp);
init_completion(&conn->conn_logout_comp);
init_completion(&conn->rx_half_close_comp);
init_completion(&conn->tx_half_close_comp);
init_completion(&conn->rx_login_comp);
spin_lock_init(&conn->cmd_lock);
spin_lock_init(&conn->conn_usage_lock);
spin_lock_init(&conn->immed_queue_lock);
spin_lock_init(&conn->nopin_timer_lock);
spin_lock_init(&conn->response_queue_lock);
spin_lock_init(&conn->state_lock);
spin_lock_init(&conn->login_worker_lock);
spin_lock_init(&conn->login_timer_lock);
timer_setup(&conn->nopin_response_timer,
iscsit_handle_nopin_response_timeout, 0);
timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
timer_setup(&conn->login_timer, iscsit_login_timeout, 0);
if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
goto free_conn;
conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
if (!conn->conn_ops) {
pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
goto put_transport;
}
if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate conn->conn_cpumask\n");
goto free_conn_ops;
}
if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate conn->allowed_cpumask\n");
goto free_conn_cpumask;
}
conn->cmd_cnt = target_alloc_cmd_counter();
if (!conn->cmd_cnt)
goto free_conn_allowed_cpumask;
return conn;
free_conn_allowed_cpumask:
free_cpumask_var(conn->allowed_cpumask);
free_conn_cpumask:
free_cpumask_var(conn->conn_cpumask);
free_conn_ops:
kfree(conn->conn_ops);
put_transport:
iscsit_put_transport(conn->conn_transport);
free_conn:
kfree(conn);
return NULL;
}
void iscsit_free_conn(struct iscsit_conn *conn)
{
target_free_cmd_counter(conn->cmd_cnt);
free_cpumask_var(conn->allowed_cpumask);
free_cpumask_var(conn->conn_cpumask);
kfree(conn->conn_ops);
iscsit_put_transport(conn->conn_transport);
kfree(conn);
}
void iscsi_target_login_sess_out(struct iscsit_conn *conn,
bool zero_tsih, bool new_sess)
{
if (!new_sess)
goto old_sess_out;
pr_err("iSCSI Login negotiation failed.\n");
iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
if (!zero_tsih || !conn->sess)
goto old_sess_out;
transport_free_session(conn->sess->se_sess);
ida_free(&sess_ida, conn->sess->session_index);
kfree(conn->sess->sess_ops);
kfree(conn->sess);
conn->sess = NULL;
old_sess_out:
/*
* If login negotiation fails check if the Time2Retain timer
* needs to be restarted.
*/
if (!zero_tsih && conn->sess) {
spin_lock_bh(&conn->sess->conn_lock);
if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
struct se_portal_group *se_tpg =
&conn->tpg->tpg_se_tpg;
atomic_set(&conn->sess->session_continuation, 0);
spin_unlock_bh(&conn->sess->conn_lock);
spin_lock_bh(&se_tpg->session_lock);
iscsit_start_time2retain_handler(conn->sess);
spin_unlock_bh(&se_tpg->session_lock);
} else
spin_unlock_bh(&conn->sess->conn_lock);
iscsit_dec_session_usage_count(conn->sess);
}
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;
tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
ahash_request_free(conn->conn_rx_hash);
crypto_free_ahash(tfm);
}
if (conn->param_list) {
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
}
iscsi_target_nego_release(conn);
if (conn->sock) {
sock_release(conn->sock);
conn->sock = NULL;
}
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
iscsit_free_conn(conn);
}
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
u8 *buffer, zero_tsih = 0;
int ret = 0, rc;
struct iscsit_conn *conn = NULL;
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
struct iscsi_login_req *pdu;
struct iscsi_tpg_np *tpg_np;
bool new_sess = false;
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock);
goto exit;
} else {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
}
spin_unlock_bh(&np->np_thread_lock);
conn = iscsit_alloc_conn(np);
if (!conn) {
/* Get another socket */
return 1;
}
rc = np->np_transport->iscsit_accept_np(np, conn);
if (rc == -ENOSYS) {
complete(&np->np_restart_comp);
iscsit_free_conn(conn);
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
iscsit_free_conn(conn);
/* Get another socket */
return 1;
}
spin_unlock_bh(&np->np_thread_lock);
iscsit_free_conn(conn);
return 1;
}
/*
* Perform the remaining iSCSI connection initialization items..
*/
login = iscsi_login_init_conn(conn);
if (!login) {
goto new_sess_out;
}
iscsit_start_login_timer(conn, current);
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP;
/*
* This will process the first login request + payload..
*/
rc = np->np_transport->iscsit_get_login_rx(conn, login);
if (rc == 1)
return 1;
else if (rc < 0)
goto new_sess_out;
buffer = &login->req[0];
pdu = (struct iscsi_login_req *)buffer;
/*
* Used by iscsit_tx_login_rsp() for Login Resonses PDUs
* when Status-Class != 0.
*/
conn->login_itt = pdu->itt;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
pr_err("iSCSI Network Portal on %pISpc currently not"
" active.\n", &np->np_sockaddr);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
goto new_sess_out;
}
spin_unlock_bh(&np->np_thread_lock);
conn->network_transport = np->np_network_transport;
pr_debug("Received iSCSI login request from %pISpc on %s Network"
" Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name,
&conn->local_sockaddr);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
if (iscsi_login_check_initiator_version(conn, pdu->max_version,
pdu->min_version) < 0)
goto new_sess_out;
zero_tsih = (pdu->tsih == 0x0000);
if (zero_tsih) {
/*
* This is the leading connection of a new session.
* We wait until after authentication to check for
* session reinstatement.
*/
if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
goto new_sess_out;
} else {
/*
* Add a new connection to an existing session.
* We check for a non-existant session in
* iscsi_login_non_zero_tsih_s2() below based
* on ISID/TSIH, but wait until after authentication
* to check for connection reinstatement, etc.
*/
if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
goto new_sess_out;
}
/*
* SessionType: Discovery
*
* Locates Default Portal
*
* SessionType: Normal
*
* Locates Target Portal from NP -> Target IQN
*/
rc = iscsi_target_locate_portal(np, conn, login);
if (rc < 0) {
tpg = conn->tpg;
goto new_sess_out;
}
login->zero_tsih = zero_tsih;
if (conn->sess)
conn->sess->se_sess->sup_prot_ops =
conn->conn_transport->iscsit_get_sup_prot_ops(conn);
tpg = conn->tpg;
if (!tpg) {
pr_err("Unable to locate struct iscsit_conn->tpg\n");
goto new_sess_out;
}
if (zero_tsih) {
if (iscsi_login_zero_tsih_s2(conn) < 0)
goto new_sess_out;
} else {
if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0)
goto old_sess_out;
}
if (conn->conn_transport->iscsit_validate_params) {
ret = conn->conn_transport->iscsit_validate_params(conn);
if (ret < 0) {
if (zero_tsih)
goto new_sess_out;
else
goto old_sess_out;
}
}
ret = iscsi_target_start_negotiation(login, conn);
if (ret < 0)
goto new_sess_out;
if (ret == 1) {
tpg_np = conn->tpg_np;
iscsi_post_login_handler(np, conn, zero_tsih);
iscsit_deaccess_np(np, tpg, tpg_np);
}
tpg = NULL;
tpg_np = NULL;
/* Get another socket */
return 1;
new_sess_out:
new_sess = true;
old_sess_out:
iscsit_stop_login_timer(conn);
tpg_np = conn->tpg_np;
iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
new_sess = false;
if (tpg) {
iscsit_deaccess_np(np, tpg, tpg_np);
tpg = NULL;
tpg_np = NULL;
}
return 1;
exit:
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
spin_unlock_bh(&np->np_thread_lock);
return 0;
}
int iscsi_target_login_thread(void *arg)
{
struct iscsi_np *np = arg;
int ret;
allow_signal(SIGINT);
while (1) {
ret = __iscsi_target_login_thread(np);
/*
* We break and exit here unless another sock_accept() call
* is expected.
*/
if (ret != 1)
break;
}
while (!kthread_should_stop()) {
msleep(100);
}
return 0;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_login.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Target specific Task Management functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tmr.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
u8 iscsit_tmr_abort_task(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsit_cmd *ref_cmd;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
pr_err("RefCmdSN 0x%08x does not equal"
" task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
hdr->refcmdsn, ref_cmd->cmd_sn);
return ISCSI_TMF_RSP_REJECTED;
}
se_tmr->ref_task_tag = (__force u32)hdr->rtt;
tmr_req->ref_cmd = ref_cmd;
tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
return ISCSI_TMF_RSP_COMPLETE;
}
/*
* Called from iscsit_handle_task_mgt_cmd().
*/
int iscsit_tmr_task_warm_reset(
struct iscsit_conn *conn,
struct iscsi_tmr_req *tmr_req,
unsigned char *buf)
{
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (!na->tmr_warm_reset) {
pr_err("TMR Opcode TARGET_WARM_RESET authorization"
" failed for Initiator Node: %s\n",
sess->se_sess->se_node_acl->initiatorname);
return -1;
}
/*
* Do the real work in transport_generic_do_tmr().
*/
return 0;
}
int iscsit_tmr_task_cold_reset(
struct iscsit_conn *conn,
struct iscsi_tmr_req *tmr_req,
unsigned char *buf)
{
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (!na->tmr_cold_reset) {
pr_err("TMR Opcode TARGET_COLD_RESET authorization"
" failed for Initiator Node: %s\n",
sess->se_sess->se_node_acl->initiatorname);
return -1;
}
/*
* Do the real work in transport_generic_do_tmr().
*/
return 0;
}
u8 iscsit_tmr_task_reassign(
struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsit_cmd *ref_cmd = NULL;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_conn_recovery *cr = NULL;
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
u64 ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
" ignoring request.\n");
return ISCSI_TMF_RSP_NOT_SUPPORTED;
}
ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
if (ret == -2) {
pr_err("Command ITT: 0x%08x is still alligent to CID:"
" %hu\n", ref_cmd->init_task_tag, cr->cid);
return ISCSI_TMF_RSP_TASK_ALLEGIANT;
} else if (ret == -1) {
pr_err("Unable to locate RefTaskTag: 0x%08x in"
" connection recovery command list.\n", hdr->rtt);
return ISCSI_TMF_RSP_NO_TASK;
}
/*
* Temporary check to prevent connection recovery for
* connections with a differing Max*DataSegmentLength.
*/
if (cr->maxrecvdatasegmentlength !=
conn->conn_ops->MaxRecvDataSegmentLength) {
pr_err("Unable to perform connection recovery for"
" differing MaxRecvDataSegmentLength, rejecting"
" TMR TASK_REASSIGN.\n");
return ISCSI_TMF_RSP_REJECTED;
}
if (cr->maxxmitdatasegmentlength !=
conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("Unable to perform connection recovery for"
" differing MaxXmitDataSegmentLength, rejecting"
" TMR TASK_REASSIGN.\n");
return ISCSI_TMF_RSP_REJECTED;
}
ref_lun = scsilun_to_int(&hdr->lun);
if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
pr_err("Unable to perform connection recovery for"
" differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n",
ref_lun, ref_cmd->se_cmd.orig_fe_lun);
return ISCSI_TMF_RSP_REJECTED;
}
se_tmr->ref_task_tag = (__force u32)hdr->rtt;
tmr_req->ref_cmd = ref_cmd;
tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
tmr_req->conn_recovery = cr;
tmr_req->task_reassign = 1;
/*
* Command can now be reassigned to a new connection.
* The task management response must be sent before the
* reassignment actually happens. See iscsi_tmr_post_handler().
*/
return ISCSI_TMF_RSP_COMPLETE;
}
static void iscsit_task_reassign_remove_cmd(
struct iscsit_cmd *cmd,
struct iscsi_conn_recovery *cr,
struct iscsit_session *sess)
{
int ret;
spin_lock(&cr->conn_recovery_cmd_lock);
ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
if (!ret) {
pr_debug("iSCSI connection recovery successful for CID:"
" %hu on SID: %u\n", cr->cid, sess->sid);
iscsit_remove_active_connection_recovery_entry(cr, sess);
}
}
static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req,
struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
" is NULL!\n", cmd->init_task_tag);
return -1;
}
cr = cmd->cr;
/*
* Reset the StatSN so a new one for this commands new connection
* will be assigned.
* Reset the ExpStatSN as well so we may receive Status SNACKs.
*/
cmd->stat_sn = cmd->exp_stat_sn = 0;
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
cmd->i_state = ISTATE_SEND_NOPIN;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
static int iscsit_task_reassign_complete_write(
struct iscsit_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
int no_build_r2ts = 0;
u32 length = 0, offset = 0;
struct iscsit_conn *conn = cmd->conn;
struct se_cmd *se_cmd = &cmd->se_cmd;
/*
* The Initiator must not send a R2T SNACK with a Begrun less than
* the TMR TASK_REASSIGN's ExpDataSN.
*/
if (!tmr_req->exp_data_sn) {
cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = 0;
} else {
cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
}
/*
* The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
* Initiator is expecting. The Target controls all WRITE operations
* so if we have received all DataOUT we can safety ignore Initiator.
*/
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
target_execute_cmd(se_cmd);
return 0;
}
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
/*
* Special case to deal with DataSequenceInOrder=No and Non-Immeidate
* Unsolicited DataOut.
*/
if (cmd->unsolicited_data) {
cmd->unsolicited_data = 0;
offset = cmd->next_burst_len = cmd->write_data_done;
if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
cmd->se_cmd.data_length) {
no_build_r2ts = 1;
length = (cmd->se_cmd.data_length - offset);
} else
length = (conn->sess->sess_ops->FirstBurstLength - offset);
spin_lock_bh(&cmd->r2t_lock);
if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
cmd->outstanding_r2ts++;
spin_unlock_bh(&cmd->r2t_lock);
if (no_build_r2ts)
return 0;
}
/*
* iscsit_build_r2ts_for_cmd() can handle the rest from here.
*/
return conn->conn_transport->iscsit_get_dataout(conn, cmd, true);
}
static int iscsit_task_reassign_complete_read(
struct iscsit_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct se_cmd *se_cmd = &cmd->se_cmd;
/*
* The Initiator must not send a Data SNACK with a BegRun less than
* the TMR TASK_REASSIGN's ExpDataSN.
*/
if (!tmr_req->exp_data_sn) {
cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = 0;
} else {
cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
}
if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
" transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
transport_handle_cdb_direct(se_cmd);
return 0;
}
if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
" from transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
return -1;
}
dr = iscsit_allocate_datain_req();
if (!dr)
return -1;
/*
* The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
* Initiator is expecting.
*/
dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
dr->runlength = 0;
dr->generate_recovery_values = 1;
dr->recovery = DATAIN_CONNECTION_RECOVERY;
iscsit_attach_datain_req(cmd, dr);
cmd->i_state = ISTATE_SEND_DATAIN;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
static int iscsit_task_reassign_complete_none(
struct iscsit_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
struct iscsit_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req,
struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
" is NULL!\n", cmd->init_task_tag);
return -1;
}
cr = cmd->cr;
/*
* Reset the StatSN so a new one for this commands new connection
* will be assigned.
* Reset the ExpStatSN as well so we may receive Status SNACKs.
*/
cmd->stat_sn = cmd->exp_stat_sn = 0;
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
switch (cmd->data_direction) {
case DMA_TO_DEVICE:
return iscsit_task_reassign_complete_write(cmd, tmr_req);
case DMA_FROM_DEVICE:
return iscsit_task_reassign_complete_read(cmd, tmr_req);
case DMA_NONE:
return iscsit_task_reassign_complete_none(cmd, tmr_req);
default:
pr_err("Unknown cmd->data_direction: 0x%02x\n",
cmd->data_direction);
return -1;
}
return 0;
}
static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req,
struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd;
int ret = 0;
if (!tmr_req->ref_cmd) {
pr_err("TMR Request is missing a RefCmd struct iscsit_cmd.\n");
return -1;
}
cmd = tmr_req->ref_cmd;
cmd->conn = conn;
switch (cmd->iscsi_opcode) {
case ISCSI_OP_NOOP_OUT:
ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
break;
case ISCSI_OP_SCSI_CMD:
ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
break;
default:
pr_err("Illegal iSCSI Opcode 0x%02x during"
" command reallegiance\n", cmd->iscsi_opcode);
return -1;
}
if (ret != 0)
return ret;
pr_debug("Completed connection reallegiance for Opcode: 0x%02x,"
" ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
cmd->init_task_tag, conn->cid);
return 0;
}
/*
* Handles special after-the-fact actions related to TMRs.
* Right now the only one that its really needed for is
* connection recovery releated TASK_REASSIGN.
*/
int iscsit_tmr_post_handler(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
if (tmr_req->task_reassign &&
(se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
return iscsit_task_reassign_complete(tmr_req, conn);
return 0;
}
EXPORT_SYMBOL(iscsit_tmr_post_handler);
/*
* Nothing to do here, but leave it for good measure. :-)
*/
static int iscsit_task_reassign_prepare_read(
struct iscsi_tmr_req *tmr_req,
struct iscsit_conn *conn)
{
return 0;
}
static void iscsit_task_reassign_prepare_unsolicited_dataout(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
int i, j;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
if (conn->sess->sess_ops->DataSequenceInOrder) {
cmd->data_sn = 0;
if (cmd->immediate_data)
cmd->r2t_offset += (cmd->first_burst_len -
cmd->seq_start_offset);
if (conn->sess->sess_ops->DataPDUInOrder) {
cmd->write_data_done -= (cmd->immediate_data) ?
(cmd->first_burst_len -
cmd->seq_start_offset) :
cmd->first_burst_len;
cmd->first_burst_len = 0;
return;
}
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
if ((pdu->offset >= cmd->seq_start_offset) &&
((pdu->offset + pdu->length) <=
cmd->seq_end_offset)) {
cmd->first_burst_len -= pdu->length;
cmd->write_data_done -= pdu->length;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
} else {
for (i = 0; i < cmd->seq_count; i++) {
seq = &cmd->seq_list[i];
if (seq->type != SEQTYPE_UNSOLICITED)
continue;
cmd->write_data_done -=
(seq->offset - seq->orig_offset);
cmd->first_burst_len = 0;
seq->data_sn = 0;
seq->offset = seq->orig_offset;
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
if (conn->sess->sess_ops->DataPDUInOrder)
continue;
for (j = 0; j < seq->pdu_count; j++) {
pdu = &cmd->pdu_list[j+seq->pdu_start];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
}
}
static int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req,
struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0;
/*
* The command was in the process of receiving Unsolicited DataOUT when
* the connection failed.
*/
if (cmd->unsolicited_data)
iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
/*
* The Initiator is requesting R2Ts starting from zero, skip
* checking acknowledged R2Ts and start checking struct iscsi_r2ts
* greater than zero.
*/
if (!tmr_req->exp_data_sn)
goto drop_unacknowledged_r2ts;
/*
* We now check that the PDUs in DataOUT sequences below
* the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
* expecting next) have all the DataOUT they require to complete
* the DataOUT sequence. First scan from R2TSN 0 to TMR
* TASK_REASSIGN ExpDataSN-1.
*
* If we have not received all DataOUT in question, we must
* make sure to make the appropriate changes to values in
* struct iscsit_cmd (and elsewhere depending on session parameters)
* so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
* will resend a new R2T for the DataOUT sequences in question.
*/
spin_lock_bh(&cmd->r2t_lock);
if (list_empty(&cmd->cmd_r2t_list)) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (r2t->r2t_sn >= tmr_req->exp_data_sn)
continue;
/*
* Safely ignore Recovery R2Ts and R2Ts that have completed
* DataOUT sequences.
*/
if (r2t->seq_complete)
continue;
if (r2t->recovery_r2t)
continue;
/*
* DataSequenceInOrder=Yes:
*
* Taking into account the iSCSI implementation requirement of
* MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
* DataSequenceInOrder=Yes, we must take into consideration
* the following:
*
* DataSequenceInOrder=No:
*
* Taking into account that the Initiator controls the (possibly
* random) PDU Order in (possibly random) Sequence Order of
* DataOUT the target requests with R2Ts, we must take into
* consideration the following:
*
* DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
*
* While processing non-complete R2T DataOUT sequence requests
* the Target will re-request only the total sequence length
* minus current received offset. This is because we must
* assume the initiator will continue sending DataOUT from the
* last PDU before the connection failed.
*
* DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
*
* While processing non-complete R2T DataOUT sequence requests
* the Target will re-request the entire DataOUT sequence if
* any single PDU is missing from the sequence. This is because
* we have no logical method to determine the next PDU offset,
* and we must assume the Initiator will be sending any random
* PDU offset in the current sequence after TASK_REASSIGN
* has completed.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (!first_incomplete_r2t) {
cmd->r2t_offset -= r2t->xfer_len;
goto next;
}
if (conn->sess->sess_ops->DataPDUInOrder) {
cmd->data_sn = 0;
cmd->r2t_offset -= (r2t->xfer_len -
cmd->next_burst_len);
first_incomplete_r2t = 0;
goto next;
}
cmd->data_sn = 0;
cmd->r2t_offset -= r2t->xfer_len;
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
if ((pdu->offset >= r2t->offset) &&
(pdu->offset < (r2t->offset +
r2t->xfer_len))) {
cmd->next_burst_len -= pdu->length;
cmd->write_data_done -= pdu->length;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
first_incomplete_r2t = 0;
} else {
struct iscsi_seq *seq;
seq = iscsit_get_seq_holder(cmd, r2t->offset,
r2t->xfer_len);
if (!seq) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
cmd->write_data_done -=
(seq->offset - seq->orig_offset);
seq->data_sn = 0;
seq->offset = seq->orig_offset;
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
cmd->seq_send_order--;
if (conn->sess->sess_ops->DataPDUInOrder)
goto next;
for (i = 0; i < seq->pdu_count; i++) {
pdu = &cmd->pdu_list[i+seq->pdu_start];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
next:
cmd->outstanding_r2ts--;
}
spin_unlock_bh(&cmd->r2t_lock);
/*
* We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
* TASK_REASSIGN to the last R2T in the list.. We are also careful
* to check that the Initiator is not requesting R2Ts for DataOUT
* sequences it has already completed.
*
* Free each R2T in question and adjust values in struct iscsit_cmd
* accordingly so iscsit_build_r2ts_for_cmd() do the rest of
* the work after the TMR TASK_REASSIGN Response is sent.
*/
drop_unacknowledged_r2ts:
cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
cmd->r2t_sn = tmr_req->exp_data_sn;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
/*
* Skip up to the R2T Sequence number provided by the
* iSCSI TASK_REASSIGN TMR
*/
if (r2t->r2t_sn < tmr_req->exp_data_sn)
continue;
if (r2t->seq_complete) {
pr_err("Initiator is requesting R2Ts from"
" R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
" Length: %u is already complete."
" BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
tmr_req->exp_data_sn, r2t->r2t_sn,
r2t->offset, r2t->xfer_len);
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
if (r2t->recovery_r2t) {
iscsit_free_r2t(r2t, cmd);
continue;
}
/* DataSequenceInOrder=Yes:
*
* Taking into account the iSCSI implementation requirement of
* MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
* DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
* entire transfer length from the commands R2T offset marker.
*
* DataSequenceInOrder=No:
*
* We subtract the difference from struct iscsi_seq between the
* current offset and original offset from cmd->write_data_done
* for account for DataOUT PDUs already received. Then reset
* the current offset to the original and zero out the current
* burst length, to make sure we re-request the entire DataOUT
* sequence.
*/
if (conn->sess->sess_ops->DataSequenceInOrder)
cmd->r2t_offset -= r2t->xfer_len;
else
cmd->seq_send_order--;
cmd->outstanding_r2ts--;
iscsit_free_r2t(r2t, cmd);
}
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
/*
* Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
* a given struct iscsit_cmd.
*/
int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req,
struct iscsit_conn *conn)
{
struct iscsit_cmd *ref_cmd = tmr_req->ref_cmd;
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0;
if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return 0;
if (ref_cmd->data_direction == DMA_NONE)
return 0;
/*
* For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
* of DataIN the Initiator is expecting.
*
* Also check that the Initiator is not re-requesting DataIN that has
* already been acknowledged with a DataAck SNACK.
*/
if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
pr_err("Received ExpDataSN: 0x%08x for READ"
" in TMR TASK_REASSIGN greater than command's"
" DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
ref_cmd->data_sn);
return -1;
}
if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
(tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
pr_err("Received ExpDataSN: 0x%08x for READ"
" in TMR TASK_REASSIGN for previously"
" acknowledged DataIN: 0x%08x,"
" protocol error\n", tmr_req->exp_data_sn,
ref_cmd->acked_data_sn);
return -1;
}
return iscsit_task_reassign_prepare_read(tmr_req, conn);
}
/*
* For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
* for R2Ts the Initiator is expecting.
*
* Do the magic in iscsit_task_reassign_prepare_write().
*/
if (ref_cmd->data_direction == DMA_TO_DEVICE) {
if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
pr_err("Received ExpDataSN: 0x%08x for WRITE"
" in TMR TASK_REASSIGN greater than command's"
" R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
ref_cmd->r2t_sn);
return -1;
}
return iscsit_task_reassign_prepare_write(tmr_req, conn);
}
pr_err("Unknown iSCSI data_direction: 0x%02x\n",
ref_cmd->data_direction);
return -1;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_tmr.c
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/module.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_transport_list);
static DEFINE_MUTEX(transport_mutex);
struct iscsit_transport *iscsit_get_transport(int type)
{
struct iscsit_transport *t;
mutex_lock(&transport_mutex);
list_for_each_entry(t, &g_transport_list, t_node) {
if (t->transport_type == type) {
if (t->owner && !try_module_get(t->owner)) {
t = NULL;
}
mutex_unlock(&transport_mutex);
return t;
}
}
mutex_unlock(&transport_mutex);
return NULL;
}
void iscsit_put_transport(struct iscsit_transport *t)
{
module_put(t->owner);
}
void iscsit_register_transport(struct iscsit_transport *t)
{
INIT_LIST_HEAD(&t->t_node);
mutex_lock(&transport_mutex);
list_add_tail(&t->t_node, &g_transport_list);
mutex_unlock(&transport_mutex);
pr_debug("Registered iSCSI transport: %s\n", t->name);
}
EXPORT_SYMBOL(iscsit_register_transport);
void iscsit_unregister_transport(struct iscsit_transport *t)
{
mutex_lock(&transport_mutex);
list_del(&t->t_node);
mutex_unlock(&transport_mutex);
pr_debug("Unregistered iSCSI transport: %s\n", t->name);
}
EXPORT_SYMBOL(iscsit_unregister_transport);
|
linux-master
|
drivers/target/iscsi/iscsi_target_transport.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains iSCSI Target Portal Group related functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h"
#include "iscsi_target_login.h"
#include "iscsi_target_nodeattrib.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_parameters.h"
#include <target/iscsi/iscsi_transport.h>
struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
{
struct iscsi_portal_group *tpg;
tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct iscsi_portal_group\n");
return NULL;
}
tpg->tpgt = tpgt;
tpg->tpg_state = TPG_STATE_FREE;
tpg->tpg_tiqn = tiqn;
INIT_LIST_HEAD(&tpg->tpg_gnp_list);
INIT_LIST_HEAD(&tpg->tpg_list);
mutex_init(&tpg->tpg_access_lock);
sema_init(&tpg->np_login_sem, 1);
spin_lock_init(&tpg->tpg_state_lock);
spin_lock_init(&tpg->tpg_np_lock);
return tpg;
}
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
int iscsit_load_discovery_tpg(void)
{
struct iscsi_param *param;
struct iscsi_portal_group *tpg;
int ret;
tpg = iscsit_alloc_portal_group(NULL, 1);
if (!tpg) {
pr_err("Unable to allocate struct iscsi_portal_group\n");
return -1;
}
/*
* Save iscsi_ops pointer for special case discovery TPG that
* doesn't exist as se_wwn->wwn_group within configfs.
*/
tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops;
ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1);
if (ret < 0) {
kfree(tpg);
return -1;
}
tpg->sid = 1; /* First Assigned LIO Session ID */
iscsit_set_default_tpg_attribs(tpg);
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto out;
/*
* By default we disable authentication for discovery sessions,
* this can be changed with:
*
* /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_ACTIVE;
spin_unlock(&tpg->tpg_state_lock);
iscsit_global->discovery_tpg = tpg;
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
free_pl_out:
iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
return -1;
}
void iscsit_release_discovery_tpg(void)
{
struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
if (!tpg)
return;
iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
iscsit_global->discovery_tpg = NULL;
}
struct iscsi_portal_group *iscsit_get_tpg_from_np(
struct iscsi_tiqn *tiqn,
struct iscsi_np *np,
struct iscsi_tpg_np **tpg_np_out)
{
struct iscsi_portal_group *tpg = NULL;
struct iscsi_tpg_np *tpg_np;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (tpg_np->tpg_np == np) {
*tpg_np_out = tpg_np;
kref_get(&tpg_np->tpg_np_kref);
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
return tpg;
}
}
spin_unlock(&tpg->tpg_np_lock);
}
spin_unlock(&tiqn->tiqn_tpg_lock);
return NULL;
}
int iscsit_get_tpg(
struct iscsi_portal_group *tpg)
{
return mutex_lock_interruptible(&tpg->tpg_access_lock);
}
void iscsit_put_tpg(struct iscsi_portal_group *tpg)
{
mutex_unlock(&tpg->tpg_access_lock);
}
static void iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg,
bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
if (shutdown)
tpg_np->tpg_np->enabled = false;
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
static void iscsit_clear_tpg_np_login_threads(
struct iscsi_portal_group *tpg,
bool shutdown)
{
struct iscsi_tpg_np *tpg_np;
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
continue;
}
spin_unlock(&tpg->tpg_np_lock);
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
spin_lock(&tpg->tpg_np_lock);
}
spin_unlock(&tpg->tpg_np_lock);
}
void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
{
iscsi_print_params(tpg->param_list);
}
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
a->authentication = TA_AUTHENTICATION;
a->login_timeout = TA_LOGIN_TIMEOUT;
a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
a->generate_node_acls = TA_GENERATE_NODE_ACLS;
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
a->default_erl = TA_DEFAULT_ERL;
a->t10_pi = TA_DEFAULT_T10_PI;
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
{
if (tpg->tpg_state != TPG_STATE_FREE) {
pr_err("Unable to add iSCSI Target Portal Group: %d"
" while not in TPG_STATE_FREE state.\n", tpg->tpgt);
return -EEXIST;
}
iscsit_set_default_tpg_attribs(tpg);
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto err_out;
tpg->tpg_attrib.tpg = tpg;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tiqn->tiqn_tpg_lock);
list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
tiqn->tiqn_ntpgs++;
pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
tiqn->tiqn, tpg->tpgt);
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
err_out:
if (tpg->param_list) {
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
return -ENOMEM;
}
int iscsit_tpg_del_portal_group(
struct iscsi_tiqn *tiqn,
struct iscsi_portal_group *tpg,
int force)
{
u8 old_state = tpg->tpg_state;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
pr_err("Unable to delete iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n",
tpg->tpgt);
tpg->tpg_state = old_state;
return -EPERM;
}
if (tpg->param_list) {
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
core_tpg_deregister(&tpg->tpg_se_tpg);
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_FREE;
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tiqn->tiqn_tpg_lock);
tiqn->tiqn_ntpgs--;
list_del(&tpg->tpg_list);
spin_unlock(&tiqn->tiqn_tpg_lock);
pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
tiqn->tiqn, tpg->tpgt);
kfree(tpg);
return 0;
}
int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
{
struct iscsi_param *param;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
int ret;
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
pr_err("iSCSI target portal group: %hu is already"
" active, ignoring request.\n", tpg->tpgt);
return -EINVAL;
}
/*
* Make sure that AuthMethod does not contain None as an option
* unless explictly disabled. Set the default to CHAP if authentication
* is enforced (as per default), and remove the NONE option.
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
return -EINVAL;
if (tpg->tpg_attrib.authentication) {
if (!strcmp(param->value, NONE)) {
ret = iscsi_update_param_value(param, CHAP);
if (ret)
goto err;
}
ret = iscsit_ta_authentication(tpg, 1);
if (ret < 0)
goto err;
}
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_ACTIVE;
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tiqn->tiqn_tpg_lock);
tiqn->tiqn_active_tpgs++;
pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
tpg->tpgt);
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
err:
return ret;
}
int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
{
struct iscsi_tiqn *tiqn;
u8 old_state = tpg->tpg_state;
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_INACTIVE) {
pr_err("iSCSI Target Portal Group: %hu is already"
" inactive, ignoring request.\n", tpg->tpgt);
spin_unlock(&tpg->tpg_state_lock);
return -EINVAL;
}
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
iscsit_clear_tpg_np_login_threads(tpg, false);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = old_state;
spin_unlock(&tpg->tpg_state_lock);
pr_err("Unable to disable iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n",
tpg->tpgt);
return -EPERM;
}
tiqn = tpg->tpg_tiqn;
if (!tiqn || (tpg == iscsit_global->discovery_tpg))
return 0;
spin_lock(&tiqn->tiqn_tpg_lock);
tiqn->tiqn_active_tpgs--;
pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
tpg->tpgt);
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
}
struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
struct iscsit_session *sess)
{
struct se_session *se_sess = sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
return &acl->node_attrib;
}
struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
struct iscsi_tpg_np *tpg_np,
int network_transport)
{
struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
spin_lock(&tpg_np->tpg_np_parent_lock);
list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
&tpg_np->tpg_np_parent_list, tpg_np_child_list) {
if (tpg_np_child->tpg_np->np_network_transport ==
network_transport) {
spin_unlock(&tpg_np->tpg_np_parent_lock);
return tpg_np_child;
}
}
spin_unlock(&tpg_np->tpg_np_parent_lock);
return NULL;
}
static bool iscsit_tpg_check_network_portal(
struct iscsi_tiqn *tiqn,
struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
struct iscsi_np *np;
bool match = false;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
np = tpg_np->tpg_np;
match = iscsit_check_np_match(sockaddr, np,
network_transport);
if (match)
break;
}
spin_unlock(&tpg->tpg_np_lock);
if (match)
break;
}
spin_unlock(&tiqn->tiqn_tpg_lock);
return match;
}
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
struct sockaddr_storage *sockaddr,
struct iscsi_tpg_np *tpg_np_parent,
int network_transport)
{
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np;
if (!tpg_np_parent) {
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
network_transport)) {
pr_err("Network Portal: %pISc already exists on a"
" different TPG on %s\n", sockaddr,
tpg->tpg_tiqn->tiqn);
return ERR_PTR(-EEXIST);
}
}
tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
if (!tpg_np) {
pr_err("Unable to allocate memory for"
" struct iscsi_tpg_np.\n");
return ERR_PTR(-ENOMEM);
}
np = iscsit_add_np(sockaddr, network_transport);
if (IS_ERR(np)) {
kfree(tpg_np);
return ERR_CAST(np);
}
INIT_LIST_HEAD(&tpg_np->tpg_np_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
spin_lock_init(&tpg_np->tpg_np_parent_lock);
init_completion(&tpg_np->tpg_np_comp);
kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock);
list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
tpg->num_tpg_nps++;
if (tpg->tpg_tiqn)
tpg->tpg_tiqn->tiqn_num_tpg_nps++;
spin_unlock(&tpg->tpg_np_lock);
if (tpg_np_parent) {
tpg_np->tpg_np_parent = tpg_np_parent;
spin_lock(&tpg_np_parent->tpg_np_parent_lock);
list_add_tail(&tpg_np->tpg_np_child_list,
&tpg_np_parent->tpg_np_parent_list);
spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
}
pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n",
tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
np->np_transport->name);
return tpg_np;
}
static int iscsit_tpg_release_np(
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg,
struct iscsi_np *np)
{
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n",
tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
np->np_transport->name);
tpg_np->tpg_np = NULL;
tpg_np->tpg = NULL;
kfree(tpg_np);
/*
* iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
*/
return iscsit_del_np(np);
}
int iscsit_tpg_del_network_portal(
struct iscsi_portal_group *tpg,
struct iscsi_tpg_np *tpg_np)
{
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
int ret = 0;
np = tpg_np->tpg_np;
if (!np) {
pr_err("Unable to locate struct iscsi_np from"
" struct iscsi_tpg_np\n");
return -EINVAL;
}
if (!tpg_np->tpg_np_parent) {
/*
* We are the parent tpg network portal. Release all of the
* child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
* list first.
*/
list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
&tpg_np->tpg_np_parent_list,
tpg_np_child_list) {
ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
if (ret < 0)
pr_err("iscsit_tpg_del_network_portal()"
" failed: %d\n", ret);
}
} else {
/*
* We are not the parent ISCSI_TCP tpg network portal. Release
* our own network portals from the child list.
*/
spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
list_del(&tpg_np->tpg_np_child_list);
spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
}
spin_lock(&tpg->tpg_np_lock);
list_del(&tpg_np->tpg_np_list);
tpg->num_tpg_nps--;
if (tpg->tpg_tiqn)
tpg->tpg_tiqn->tiqn_num_tpg_nps--;
spin_unlock(&tpg->tpg_np_lock);
return iscsit_tpg_release_np(tpg_np, tpg, np);
}
int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
{
unsigned char buf1[256], buf2[256], *none = NULL;
int len;
struct iscsi_param *param;
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((authentication != 1) && (authentication != 0)) {
pr_err("Illegal value for authentication parameter:"
" %u, ignoring request.\n", authentication);
return -EINVAL;
}
memset(buf1, 0, sizeof(buf1));
memset(buf2, 0, sizeof(buf2));
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
return -EINVAL;
if (authentication) {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
if (!none)
goto out;
if (!strncmp(none + 4, ",", 1)) {
if (!strcmp(buf1, none))
sprintf(buf2, "%s", none+5);
else {
none--;
*none = '\0';
len = sprintf(buf2, "%s", buf1);
none += 5;
sprintf(buf2 + len, "%s", none);
}
} else {
none--;
*none = '\0';
sprintf(buf2, "%s", buf1);
}
if (iscsi_update_param_value(param, buf2) < 0)
return -EINVAL;
} else {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
if (none)
goto out;
strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
out:
a->authentication = authentication;
pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
return 0;
}
int iscsit_ta_login_timeout(
struct iscsi_portal_group *tpg,
u32 login_timeout)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
pr_err("Requested Login Timeout %u larger than maximum"
" %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
return -EINVAL;
} else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
pr_err("Requested Logout Timeout %u smaller than"
" minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
return -EINVAL;
}
a->login_timeout = login_timeout;
pr_debug("Set Logout Timeout to %u for Target Portal Group"
" %hu\n", a->login_timeout, tpg->tpgt);
return 0;
}
int iscsit_ta_generate_node_acls(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->generate_node_acls = flag;
pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
if (flag == 1 && a->cache_dynamic_acls == 0) {
pr_debug("Explicitly setting cache_dynamic_acls=1 when "
"generate_node_acls=1\n");
a->cache_dynamic_acls = 1;
}
return 0;
}
int iscsit_ta_default_cmdsn_depth(
struct iscsi_portal_group *tpg,
u32 tcq_depth)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
pr_err("Requested Default Queue Depth: %u larger"
" than maximum %u\n", tcq_depth,
TA_DEFAULT_CMDSN_DEPTH_MAX);
return -EINVAL;
} else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
pr_err("Requested Default Queue Depth: %u smaller"
" than minimum %u\n", tcq_depth,
TA_DEFAULT_CMDSN_DEPTH_MIN);
return -EINVAL;
}
a->default_cmdsn_depth = tcq_depth;
pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
tpg->tpgt, a->default_cmdsn_depth);
return 0;
}
int iscsit_ta_cache_dynamic_acls(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (a->generate_node_acls == 1 && flag == 0) {
pr_debug("Skipping cache_dynamic_acls=0 when"
" generate_node_acls=1\n");
return 0;
}
a->cache_dynamic_acls = flag;
pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
" ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
"Enabled" : "Disabled");
return 0;
}
int iscsit_ta_demo_mode_write_protect(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->demo_mode_write_protect = flag;
pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
return 0;
}
int iscsit_ta_prod_mode_write_protect(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->prod_mode_write_protect = flag;
pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
" %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
"ON" : "OFF");
return 0;
}
int iscsit_ta_demo_mode_discovery(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->demo_mode_discovery = flag;
pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
" %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
"ON" : "OFF");
return 0;
}
int iscsit_ta_default_erl(
struct iscsi_portal_group *tpg,
u32 default_erl)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
pr_err("Illegal value for default_erl: %u\n", default_erl);
return -EINVAL;
}
a->default_erl = default_erl;
pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
return 0;
}
int iscsit_ta_t10_pi(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->t10_pi = flag;
pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
" %s\n", tpg->tpgt, (a->t10_pi) ?
"ON" : "OFF");
return 0;
}
int iscsit_ta_fabric_prot_type(
struct iscsi_portal_group *tpg,
u32 prot_type)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) {
pr_err("Illegal value for fabric_prot_type: %u\n", prot_type);
return -EINVAL;
}
a->fabric_prot_type = prot_type;
pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n",
tpg->tpgt, prot_type);
return 0;
}
int iscsit_ta_tpg_enabled_sendtargets(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->tpg_enabled_sendtargets = flag;
pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:"
" %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF");
return 0;
}
int iscsit_ta_login_keys_workaround(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->login_keys_workaround = flag;
pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
return 0;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_tpg.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to the iSCSI Target Core Driver.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <crypto/hash.h>
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/idr.h>
#include <linux/delay.h>
#include <linux/sched/signal.h>
#include <asm/unaligned.h>
#include <linux/inet.h>
#include <net/ipv6.h>
#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_login.h"
#include "iscsi_target_tmr.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_device.h"
#include <target/iscsi/iscsi_target_stat.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr;
DEFINE_IDA(sess_ida);
struct mutex auth_id_lock;
struct iscsit_global *iscsit_global;
struct kmem_cache *lio_qr_cache;
struct kmem_cache *lio_dr_cache;
struct kmem_cache *lio_ooo_cache;
struct kmem_cache *lio_r2t_cache;
static int iscsit_handle_immediate_data(struct iscsit_cmd *,
struct iscsi_scsi_req *, u32);
struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
{
struct iscsi_tiqn *tiqn = NULL;
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
if (!strcmp(tiqn->tiqn, buf)) {
spin_lock(&tiqn->tiqn_state_lock);
if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
tiqn->tiqn_access_count++;
spin_unlock(&tiqn->tiqn_state_lock);
spin_unlock(&tiqn_lock);
return tiqn;
}
spin_unlock(&tiqn->tiqn_state_lock);
}
}
spin_unlock(&tiqn_lock);
return NULL;
}
static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
{
spin_lock(&tiqn->tiqn_state_lock);
if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
spin_unlock(&tiqn->tiqn_state_lock);
return 0;
}
spin_unlock(&tiqn->tiqn_state_lock);
return -1;
}
void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
{
spin_lock(&tiqn->tiqn_state_lock);
tiqn->tiqn_access_count--;
spin_unlock(&tiqn->tiqn_state_lock);
}
/*
* Note that IQN formatting is expected to be done in userspace, and
* no explict IQN format checks are done here.
*/
struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
{
struct iscsi_tiqn *tiqn = NULL;
int ret;
if (strlen(buf) >= ISCSI_IQN_LEN) {
pr_err("Target IQN exceeds %d bytes\n",
ISCSI_IQN_LEN);
return ERR_PTR(-EINVAL);
}
tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
if (!tiqn)
return ERR_PTR(-ENOMEM);
sprintf(tiqn->tiqn, "%s", buf);
INIT_LIST_HEAD(&tiqn->tiqn_list);
INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
spin_lock_init(&tiqn->tiqn_state_lock);
spin_lock_init(&tiqn->tiqn_tpg_lock);
spin_lock_init(&tiqn->sess_err_stats.lock);
spin_lock_init(&tiqn->login_stats.lock);
spin_lock_init(&tiqn->logout_stats.lock);
tiqn->tiqn_state = TIQN_STATE_ACTIVE;
idr_preload(GFP_KERNEL);
spin_lock(&tiqn_lock);
ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
if (ret < 0) {
pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
spin_unlock(&tiqn_lock);
idr_preload_end();
kfree(tiqn);
return ERR_PTR(ret);
}
tiqn->tiqn_index = ret;
list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
spin_unlock(&tiqn_lock);
idr_preload_end();
pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
return tiqn;
}
static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
{
/*
* Wait for accesses to said struct iscsi_tiqn to end.
*/
spin_lock(&tiqn->tiqn_state_lock);
while (tiqn->tiqn_access_count != 0) {
spin_unlock(&tiqn->tiqn_state_lock);
msleep(10);
spin_lock(&tiqn->tiqn_state_lock);
}
spin_unlock(&tiqn->tiqn_state_lock);
}
void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
{
/*
* iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
* while holding tiqn->tiqn_state_lock. This means that all subsequent
* attempts to access this struct iscsi_tiqn will fail from both transport
* fabric and control code paths.
*/
if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
pr_err("iscsit_set_tiqn_shutdown() failed\n");
return;
}
iscsit_wait_for_tiqn(tiqn);
spin_lock(&tiqn_lock);
list_del(&tiqn->tiqn_list);
idr_remove(&tiqn_idr, tiqn->tiqn_index);
spin_unlock(&tiqn_lock);
pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
tiqn->tiqn);
kfree(tiqn);
}
int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
{
int ret;
/*
* Determine if the network portal is accepting storage traffic.
*/
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
return -1;
}
spin_unlock_bh(&np->np_thread_lock);
/*
* Determine if the portal group is accepting storage traffic.
*/
spin_lock_bh(&tpg->tpg_state_lock);
if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock_bh(&tpg->tpg_state_lock);
return -1;
}
spin_unlock_bh(&tpg->tpg_state_lock);
/*
* Here we serialize access across the TIQN+TPG Tuple.
*/
ret = down_interruptible(&tpg->np_login_sem);
if (ret != 0)
return -1;
spin_lock_bh(&tpg->tpg_state_lock);
if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock_bh(&tpg->tpg_state_lock);
up(&tpg->np_login_sem);
return -1;
}
spin_unlock_bh(&tpg->tpg_state_lock);
return 0;
}
void iscsit_login_kref_put(struct kref *kref)
{
struct iscsi_tpg_np *tpg_np = container_of(kref,
struct iscsi_tpg_np, tpg_np_kref);
complete(&tpg_np->tpg_np_comp);
}
int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
struct iscsi_tpg_np *tpg_np)
{
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
up(&tpg->np_login_sem);
if (tpg_np)
kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
if (tiqn)
iscsit_put_tiqn_for_login(tiqn);
return 0;
}
bool iscsit_check_np_match(
struct sockaddr_storage *sockaddr,
struct iscsi_np *np,
int network_transport)
{
struct sockaddr_in *sock_in, *sock_in_e;
struct sockaddr_in6 *sock_in6, *sock_in6_e;
bool ip_match = false;
u16 port, port_e;
if (sockaddr->ss_family == AF_INET6) {
sock_in6 = (struct sockaddr_in6 *)sockaddr;
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
if (!memcmp(&sock_in6->sin6_addr.in6_u,
&sock_in6_e->sin6_addr.in6_u,
sizeof(struct in6_addr)))
ip_match = true;
port = ntohs(sock_in6->sin6_port);
port_e = ntohs(sock_in6_e->sin6_port);
} else {
sock_in = (struct sockaddr_in *)sockaddr;
sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
ip_match = true;
port = ntohs(sock_in->sin_port);
port_e = ntohs(sock_in_e->sin_port);
}
if (ip_match && (port_e == port) &&
(np->np_network_transport == network_transport))
return true;
return false;
}
static struct iscsi_np *iscsit_get_np(
struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_np *np;
bool match;
lockdep_assert_held(&np_lock);
list_for_each_entry(np, &g_np_list, np_list) {
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
continue;
}
match = iscsit_check_np_match(sockaddr, np, network_transport);
if (match) {
/*
* Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called
* while iscsi_tpg_add_network_portal() is called.
*/
np->np_exports++;
spin_unlock_bh(&np->np_thread_lock);
return np;
}
spin_unlock_bh(&np->np_thread_lock);
}
return NULL;
}
struct iscsi_np *iscsit_add_np(
struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_np *np;
int ret;
mutex_lock(&np_lock);
/*
* Locate the existing struct iscsi_np if already active..
*/
np = iscsit_get_np(sockaddr, network_transport);
if (np) {
mutex_unlock(&np_lock);
return np;
}
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np) {
mutex_unlock(&np_lock);
return ERR_PTR(-ENOMEM);
}
np->np_flags |= NPF_IP_NETWORK;
np->np_network_transport = network_transport;
spin_lock_init(&np->np_thread_lock);
init_completion(&np->np_restart_comp);
INIT_LIST_HEAD(&np->np_list);
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
if (IS_ERR(np->np_thread)) {
pr_err("Unable to create kthread: iscsi_np\n");
ret = PTR_ERR(np->np_thread);
kfree(np);
mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
/*
* Increment the np_exports reference count now to prevent
* iscsit_del_np() below from being run while a new call to
* iscsi_tpg_add_network_portal() for a matching iscsi_np is
* active. We don't need to hold np->np_thread_lock at this
* point because iscsi_np has not been added to g_np_list yet.
*/
np->np_exports = 1;
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
list_add_tail(&np->np_list, &g_np_list);
mutex_unlock(&np_lock);
pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
&np->np_sockaddr, np->np_transport->name);
return np;
}
int iscsit_reset_np_thread(
struct iscsi_np *np,
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg,
bool shutdown)
{
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
send_sig(SIGINT, np->np_thread, 1);
wait_for_completion(&np->np_restart_comp);
spin_lock_bh(&np->np_thread_lock);
}
spin_unlock_bh(&np->np_thread_lock);
if (tpg_np && shutdown) {
kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
wait_for_completion(&tpg_np->tpg_np_comp);
}
return 0;
}
static void iscsit_free_np(struct iscsi_np *np)
{
if (np->np_socket)
sock_release(np->np_socket);
}
int iscsit_del_np(struct iscsi_np *np)
{
spin_lock_bh(&np->np_thread_lock);
np->np_exports--;
if (np->np_exports) {
np->enabled = true;
spin_unlock_bh(&np->np_thread_lock);
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
spin_unlock_bh(&np->np_thread_lock);
if (np->np_thread) {
/*
* We need to send the signal to wakeup Linux/Net
* which may be sleeping in sock_accept()..
*/
send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread);
np->np_thread = NULL;
}
np->np_transport->iscsit_free_np(np);
mutex_lock(&np_lock);
list_del(&np->np_list);
mutex_unlock(&np_lock);
pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
&np->np_sockaddr, np->np_transport->name);
iscsit_put_transport(np->np_transport);
kfree(np);
return 0;
}
static void iscsit_get_rx_pdu(struct iscsit_conn *);
int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
EXPORT_SYMBOL(iscsit_queue_rsp);
void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
__iscsit_free_cmd(cmd, true);
}
EXPORT_SYMBOL(iscsit_aborted_task);
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
u32, u32, const void *, void *);
static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
static int
iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
const void *data_buf, u32 data_buf_len)
{
struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
struct kvec *iov;
u32 niov = 0, tx_size = ISCSI_HDR_LEN;
int ret;
iov = &cmd->iov_misc[0];
iov[niov].iov_base = cmd->pdu;
iov[niov++].iov_len = ISCSI_HDR_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL,
header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest"
" to opcode 0x%x 0x%08x\n",
hdr->opcode, *header_digest);
}
if (data_buf_len) {
u32 padding = ((-data_buf_len) & 3);
iov[niov].iov_base = (void *)data_buf;
iov[niov++].iov_len = data_buf_len;
tx_size += data_buf_len;
if (padding != 0) {
iov[niov].iov_base = &cmd->pad_bytes;
iov[niov++].iov_len = padding;
tx_size += padding;
pr_debug("Attaching %u additional"
" padding bytes.\n", padding);
}
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
data_buf, data_buf_len,
padding, &cmd->pad_bytes,
&cmd->data_crc);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
pr_debug("Attached DataDigest for %u"
" bytes opcode 0x%x, CRC 0x%08x\n",
data_buf_len, hdr->opcode, cmd->data_crc);
}
}
cmd->iov_misc_count = niov;
cmd->tx_size = tx_size;
ret = iscsit_send_tx_data(cmd, conn, 1);
if (ret < 0) {
iscsit_tx_thread_wait_for_tcp(conn);
return ret;
}
return 0;
}
static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
u32 data_offset, u32 data_length);
static void iscsit_unmap_iovec(struct iscsit_cmd *);
static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
u32, u32, u32, u8 *);
static int
iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
const struct iscsi_datain *datain)
{
struct kvec *iov;
u32 iov_count = 0, tx_size = 0;
int ret, iov_ret;
iov = &cmd->iov_data[0];
iov[iov_count].iov_base = cmd->pdu;
iov[iov_count++].iov_len = ISCSI_HDR_LEN;
tx_size += ISCSI_HDR_LEN;
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL,
header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
*header_digest);
}
iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
cmd->orig_iov_data_count - (iov_count + 2),
datain->offset, datain->length);
if (iov_ret < 0)
return -1;
iov_count += iov_ret;
tx_size += datain->length;
cmd->padding = ((-datain->length) & 3);
if (cmd->padding) {
iov[iov_count].iov_base = cmd->pad_bytes;
iov[iov_count++].iov_len = cmd->padding;
tx_size += cmd->padding;
pr_debug("Attaching %u padding bytes\n", cmd->padding);
}
if (conn->conn_ops->DataDigest) {
cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
cmd, datain->offset,
datain->length,
cmd->padding,
cmd->pad_bytes);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
datain->length + cmd->padding, cmd->data_crc);
}
cmd->iov_data_count = iov_count;
cmd->tx_size = tx_size;
ret = iscsit_fe_sendpage_sg(cmd, conn);
iscsit_unmap_iovec(cmd);
if (ret < 0) {
iscsit_tx_thread_wait_for_tcp(conn);
return ret;
}
return 0;
}
static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr, const void *buf,
u32 buf_len)
{
if (dr)
return iscsit_xmit_datain_pdu(conn, cmd, buf);
else
return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
}
static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
{
return TARGET_PROT_NORMAL;
}
static struct iscsit_transport iscsi_target_transport = {
.name = "iSCSI/TCP",
.transport_type = ISCSI_TCP,
.rdma_shutdown = false,
.owner = NULL,
.iscsit_setup_np = iscsit_setup_np,
.iscsit_accept_np = iscsit_accept_np,
.iscsit_free_np = iscsit_free_np,
.iscsit_get_login_rx = iscsit_get_login_rx,
.iscsit_put_login_tx = iscsit_put_login_tx,
.iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
.iscsit_immediate_queue = iscsit_immediate_queue,
.iscsit_response_queue = iscsit_response_queue,
.iscsit_queue_data_in = iscsit_queue_rsp,
.iscsit_queue_status = iscsit_queue_rsp,
.iscsit_aborted_task = iscsit_aborted_task,
.iscsit_xmit_pdu = iscsit_xmit_pdu,
.iscsit_get_rx_pdu = iscsit_get_rx_pdu,
.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
};
static int __init iscsi_target_init_module(void)
{
int ret = 0, size;
pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
if (!iscsit_global)
return -1;
spin_lock_init(&iscsit_global->ts_bitmap_lock);
mutex_init(&auth_id_lock);
idr_init(&tiqn_idr);
ret = target_register_template(&iscsi_ops);
if (ret)
goto out;
size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
iscsit_global->ts_bitmap = vzalloc(size);
if (!iscsit_global->ts_bitmap)
goto configfs_out;
if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
goto bitmap_out;
}
cpumask_setall(iscsit_global->allowed_cpumask);
lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
goto cpumask_out;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
sizeof(struct iscsi_datain_req),
__alignof__(struct iscsi_datain_req), 0, NULL);
if (!lio_dr_cache) {
pr_err("Unable to kmem_cache_create() for"
" lio_dr_cache\n");
goto qr_out;
}
lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
sizeof(struct iscsi_ooo_cmdsn),
__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
if (!lio_ooo_cache) {
pr_err("Unable to kmem_cache_create() for"
" lio_ooo_cache\n");
goto dr_out;
}
lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
0, NULL);
if (!lio_r2t_cache) {
pr_err("Unable to kmem_cache_create() for"
" lio_r2t_cache\n");
goto ooo_out;
}
iscsit_register_transport(&iscsi_target_transport);
if (iscsit_load_discovery_tpg() < 0)
goto r2t_out;
return ret;
r2t_out:
iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_r2t_cache);
ooo_out:
kmem_cache_destroy(lio_ooo_cache);
dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
cpumask_out:
free_cpumask_var(iscsit_global->allowed_cpumask);
bitmap_out:
vfree(iscsit_global->ts_bitmap);
configfs_out:
/* XXX: this probably wants it to be it's own unwind step.. */
if (iscsit_global->discovery_tpg)
iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
target_unregister_template(&iscsi_ops);
out:
kfree(iscsit_global);
return -ENOMEM;
}
static void __exit iscsi_target_cleanup_module(void)
{
iscsit_release_discovery_tpg();
iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_qr_cache);
kmem_cache_destroy(lio_dr_cache);
kmem_cache_destroy(lio_ooo_cache);
kmem_cache_destroy(lio_r2t_cache);
/*
* Shutdown discovery sessions and disable discovery TPG
*/
if (iscsit_global->discovery_tpg)
iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
target_unregister_template(&iscsi_ops);
free_cpumask_var(iscsit_global->allowed_cpumask);
vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}
int iscsit_add_reject(
struct iscsit_conn *conn,
u8 reason,
unsigned char *buf)
{
struct iscsit_cmd *cmd;
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
return -1;
cmd->iscsi_opcode = ISCSI_OP_REJECT;
cmd->reject_reason = reason;
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_free_cmd(cmd, false);
return -1;
}
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
cmd->i_state = ISTATE_SEND_REJECT;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return -1;
}
EXPORT_SYMBOL(iscsit_add_reject);
static int iscsit_add_reject_from_cmd(
struct iscsit_cmd *cmd,
u8 reason,
bool add_to_conn,
unsigned char *buf)
{
struct iscsit_conn *conn;
const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
cmd->init_task_tag);
return -1;
}
conn = cmd->conn;
cmd->iscsi_opcode = ISCSI_OP_REJECT;
cmd->reject_reason = reason;
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_free_cmd(cmd, false);
return -1;
}
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
}
cmd->i_state = ISTATE_SEND_REJECT;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
/*
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}
return -1;
}
static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
unsigned char *buf)
{
return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
}
int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
{
return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
}
EXPORT_SYMBOL(iscsit_reject_cmd);
/*
* Map some portion of the allocated scatterlist to an iovec, suitable for
* kernel sockets to copy data in/out.
*/
static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
u32 data_offset, u32 data_length)
{
u32 i = 0, orig_data_length = data_length;
struct scatterlist *sg;
unsigned int page_off;
/*
* We know each entry in t_data_sg contains a page.
*/
u32 ent = data_offset / PAGE_SIZE;
if (!data_length)
return 0;
if (ent >= cmd->se_cmd.t_data_nents) {
pr_err("Initial page entry out-of-bounds\n");
goto overflow;
}
sg = &cmd->se_cmd.t_data_sg[ent];
page_off = (data_offset % PAGE_SIZE);
cmd->first_data_sg = sg;
cmd->first_data_sg_off = page_off;
while (data_length) {
u32 cur_len;
if (WARN_ON_ONCE(!sg || i >= nvec))
goto overflow;
cur_len = min_t(u32, data_length, sg->length - page_off);
iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
iov[i].iov_len = cur_len;
data_length -= cur_len;
page_off = 0;
sg = sg_next(sg);
i++;
}
cmd->kmapped_nents = i;
return i;
overflow:
pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
data_offset, orig_data_length, i, nvec);
for_each_sg(cmd->se_cmd.t_data_sg, sg,
cmd->se_cmd.t_data_nents, i) {
pr_err("[%d] off %d len %d\n",
i, sg->offset, sg->length);
}
return -1;
}
static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
{
u32 i;
struct scatterlist *sg;
sg = cmd->first_data_sg;
for (i = 0; i < cmd->kmapped_nents; i++)
kunmap(sg_page(&sg[i]));
}
static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
{
LIST_HEAD(ack_list);
struct iscsit_cmd *cmd, *cmd_p;
conn->exp_statsn = exp_statsn;
if (conn->sess->sess_ops->RDMAExtensions)
return;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) &&
iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock);
list_move_tail(&cmd->i_conn_node, &ack_list);
continue;
}
spin_unlock(&cmd->istate_lock);
}
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
iscsit_free_cmd(cmd, false);
}
}
static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
{
u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
iov_count += ISCSI_IOV_DATA_BUFFER;
cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
if (!cmd->iov_data)
return -ENOMEM;
cmd->orig_iov_data_count = iov_count;
return 0;
}
int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int data_direction, payload_length;
struct iscsi_ecdb_ahdr *ecdb_ahdr;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
unsigned char *cdb;
int sam_task_attr;
atomic_long_inc(&conn->sess->cmd_pdus);
hdr = (struct iscsi_scsi_req *) buf;
payload_length = ntoh24(hdr->dlength);
/* FIXME; Add checks for AdditionalHeaderSegment */
if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
" not set. Bad iSCSI Initiator.\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/*
* From RFC-3720 Section 10.3.1:
*
* "Either or both of R and W MAY be 1 when either the
* Expected Data Transfer Length and/or Bidirectional Read
* Expected Data Transfer Length are 0"
*
* For this case, go ahead and clear the unnecssary bits
* to avoid any confusion with ->data_direction.
*/
hdr->flags &= ~ISCSI_FLAG_CMD_READ;
hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
}
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
" MUST be set if Expected Data Transfer Length is not 0."
" Bad iSCSI Initiator\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
pr_err("Bidirectional operations not supported!\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
pr_err("Illegally set Immediate Bit in iSCSI Initiator"
" Scsi Command PDU.\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (payload_length && !conn->sess->sess_ops->ImmediateData) {
pr_err("ImmediateData=No but DataSegmentLength=%u,"
" protocol error.\n", payload_length);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if ((be32_to_cpu(hdr->data_length) == payload_length) &&
(!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
pr_err("Expected Data Transfer Length and Length of"
" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
" bit is not set protocol error\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (payload_length > be32_to_cpu(hdr->data_length)) {
pr_err("DataSegmentLength: %u is greater than"
" EDTL: %u, protocol error.\n", payload_length,
hdr->data_length);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("DataSegmentLength: %u is greater than"
" MaxXmitDataSegmentLength: %u, protocol error.\n",
payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
pr_err("DataSegmentLength: %u is greater than"
" FirstBurstLength: %u, protocol error.\n",
payload_length, conn->sess->sess_ops->FirstBurstLength);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
cdb = hdr->cdb;
if (hdr->hlength) {
ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
pr_err("Additional Header Segment type %d not supported!\n",
ecdb_ahdr->ahstype);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
}
cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
GFP_KERNEL);
if (cdb == NULL)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
be16_to_cpu(ecdb_ahdr->ahslength) - 1);
}
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
(hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
DMA_NONE;
cmd->data_direction = data_direction;
iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
/*
* Figure out the SAM Task Attribute for the incoming SCSI CDB
*/
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
sam_task_attr = TCM_SIMPLE_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
sam_task_attr = TCM_ORDERED_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
sam_task_attr = TCM_HEAD_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
sam_task_attr = TCM_ACA_TAG;
else {
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
" TCM_SIMPLE_TAG\n", iscsi_task_attr);
sam_task_attr = TCM_SIMPLE_TAG;
}
cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
cmd->i_state = ISTATE_NEW_CMD;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
cmd->immediate_data = (payload_length) ? 1 : 0;
cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
if (cmd->unsolicited_data)
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->first_burst_len = payload_length;
if (!conn->sess->sess_ops->RDMAExtensions &&
cmd->data_direction == DMA_FROM_DEVICE) {
struct iscsi_datain_req *dr;
dr = iscsit_allocate_datain_req();
if (!dr) {
if (cdb != hdr->cdb)
kfree(cdb);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
iscsit_attach_datain_req(cmd, dr);
}
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, be32_to_cpu(hdr->data_length),
cmd->data_direction, sam_task_attr,
cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
conn->cmd_cnt);
pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
GFP_KERNEL);
if (cdb != hdr->cdb)
kfree(cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
goto attach_cmd;
}
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
if (cmd->sense_reason)
goto attach_cmd;
cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
if (cmd->sense_reason)
goto attach_cmd;
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
attach_cmd:
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
/*
* Check if we need to delay processing because of ALUA
* Active/NonOptimized primary access state..
*/
core_alua_check_nonop_delay(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
{
iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock);
iscsit_start_dataout_timer(cmd, cmd->conn);
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_scsi_req *hdr)
{
int cmdsn_ret = 0;
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
* Data is attached.
*
* A PDU/CmdSN carrying Immediate Data can only
* be processed after the DataCRC has passed.
* If the DataCRC fails, the CmdSN MUST NOT
* be acknowledged. (See below)
*/
if (!cmd->immediate_data) {
cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
(unsigned char *)hdr, hdr->cmdsn);
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
/*
* If no Immediate Data is attached, it's OK to return now.
*/
if (!cmd->immediate_data) {
if (!cmd->sense_reason && cmd->unsolicited_data)
iscsit_set_unsolicited_dataout(cmd);
if (!cmd->sense_reason)
return 0;
target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
/*
* Early CHECK_CONDITIONs with ImmediateData never make it to command
* execution. These exceptions are processed in CmdSN order using
* iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
*/
if (cmd->sense_reason)
return 1;
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
*/
cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
if (cmd->sense_reason)
return 1;
return 0;
}
EXPORT_SYMBOL(iscsit_process_scsi_cmd);
static int
iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
bool dump_payload)
{
int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
int rc;
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
*/
if (dump_payload) {
u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
cmd->first_burst_len);
pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
cmd->se_cmd.data_length, cmd->write_data_done,
cmd->first_burst_len, length);
rc = iscsit_dump_data_payload(cmd->conn, length, 1);
pr_debug("Finished dumping immediate data\n");
if (rc < 0)
immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
} else {
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
}
if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
/*
* A PDU/CmdSN carrying Immediate Data passed
* DataCRC, check against ExpCmdSN/MaxCmdSN if
* Immediate Bit is not set.
*/
cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
(unsigned char *)hdr, hdr->cmdsn);
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
target_put_sess_cmd(&cmd->se_cmd);
return 0;
} else if (cmd->unsolicited_data)
iscsit_set_unsolicited_dataout(cmd);
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
/*
* Immediate Data failed DataCRC and ERL>=1,
* silently drop this PDU and let the initiator
* plug the CmdSN gap.
*
* FIXME: Send Unsolicited NOPIN with reserved
* TTT here to help the initiator figure out
* the missing CmdSN, although they should be
* intelligent enough to determine the missing
* CmdSN and issue a retry to plug the sequence.
*/
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
return -1;
return 0;
}
static int
iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
int rc, immed_data;
bool dump_payload = false;
rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
if (rc < 0)
return 0;
/*
* Allocation iovecs needed for struct socket operations for
* traditional iSCSI block I/O.
*/
if (iscsit_allocate_iovecs(cmd) < 0) {
return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
immed_data = cmd->immediate_data;
rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
if (rc < 0)
return rc;
else if (rc > 0)
dump_payload = true;
if (!immed_data)
return 0;
return iscsit_get_immediate_data(cmd, hdr, dump_payload);
}
static u32 iscsit_do_crypto_hash_sg(
struct ahash_request *hash,
struct iscsit_cmd *cmd,
u32 data_offset,
u32 data_length,
u32 padding,
u8 *pad_bytes)
{
u32 data_crc;
struct scatterlist *sg;
unsigned int page_off;
crypto_ahash_init(hash);
sg = cmd->first_data_sg;
page_off = cmd->first_data_sg_off;
if (data_length && page_off) {
struct scatterlist first_sg;
u32 len = min_t(u32, data_length, sg->length - page_off);
sg_init_table(&first_sg, 1);
sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
ahash_request_set_crypt(hash, &first_sg, NULL, len);
crypto_ahash_update(hash);
data_length -= len;
sg = sg_next(sg);
}
while (data_length) {
u32 cur_len = min_t(u32, data_length, sg->length);
ahash_request_set_crypt(hash, sg, NULL, cur_len);
crypto_ahash_update(hash);
data_length -= cur_len;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
}
if (padding) {
struct scatterlist pad_sg;
sg_init_one(&pad_sg, pad_bytes, padding);
ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
padding);
crypto_ahash_finup(hash);
} else {
ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
crypto_ahash_final(hash);
}
return data_crc;
}
static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
const void *buf, u32 payload_length, u32 padding,
const void *pad_bytes, void *data_crc)
{
struct scatterlist sg[2];
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(sg, buf, payload_length);
if (padding)
sg_set_buf(sg + 1, pad_bytes, padding);
ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
crypto_ahash_digest(hash);
}
int
__iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
struct iscsit_cmd *cmd, u32 payload_length,
bool *success)
{
struct iscsi_data *hdr = buf;
struct se_cmd *se_cmd;
int rc;
/* iSCSI write */
atomic_long_add(payload_length, &conn->sess->rx_data_octets);
pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
payload_length, conn->cid);
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
pr_err("Command ITT: 0x%08x received DataOUT after"
" last DataOUT received, dumping payload\n",
cmd->init_task_tag);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
if (cmd->data_direction != DMA_TO_DEVICE) {
pr_err("Command ITT: 0x%08x received DataOUT for a"
" NON-WRITE command.\n", cmd->init_task_tag);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
se_cmd = &cmd->se_cmd;
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
be32_to_cpu(hdr->offset), payload_length,
cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (cmd->unsolicited_data) {
int dump_unsolicited_data = 0;
if (conn->sess->sess_ops->InitialR2T) {
pr_err("Received unexpected unsolicited data"
" while InitialR2T=Yes, protocol error.\n");
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
return -1;
}
/*
* Special case for dealing with Unsolicited DataOUT
* and Unsupported SAM WRITE Opcodes and SE resource allocation
* failures;
*/
/* Something's amiss if we're not in WRITE_PENDING state... */
WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
dump_unsolicited_data = 1;
if (dump_unsolicited_data) {
/*
* Check if a delayed TASK_ABORTED status needs to
* be sent now if the ISCSI_FLAG_CMD_FINAL has been
* received with the unsolicited data out.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
iscsit_stop_dataout_timer(cmd);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
} else {
/*
* For the normal solicited data path:
*
* Check for a delayed TASK_ABORTED status and dump any
* incoming data out payload if one exists. Also, when the
* ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
* data out sequence, we decrement outstanding_r2ts. Once
* outstanding_r2ts reaches zero, go ahead and send the delayed
* TASK_ABORTED status.
*/
if (se_cmd->transport_state & CMD_T_ABORTED) {
if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
--cmd->outstanding_r2ts < 1)
iscsit_stop_dataout_timer(cmd);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
}
/*
* Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
* within-command recovery checks before receiving the payload.
*/
rc = iscsit_check_pre_dataout(cmd, buf);
if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
return 0;
else if (rc == DATAOUT_CANNOT_RECOVER)
return -1;
*success = true;
return 0;
}
EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
int
iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
struct iscsit_cmd **out_cmd)
{
struct iscsi_data *hdr = buf;
struct iscsit_cmd *cmd;
u32 payload_length = ntoh24(hdr->dlength);
int rc;
bool success = false;
if (!payload_length) {
pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
return 0;
}
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err_ratelimited("DataSegmentLength: %u is greater than"
" MaxXmitDataSegmentLength: %u\n", payload_length,
conn->conn_ops->MaxXmitDataSegmentLength);
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
if (!cmd)
return 0;
rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
if (success)
*out_cmd = cmd;
return rc;
}
EXPORT_SYMBOL(iscsit_check_dataout_hdr);
static int
iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_data *hdr)
{
struct kvec *iov;
u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
u32 payload_length;
int iov_ret, data_crc_failed = 0;
payload_length = min_t(u32, cmd->se_cmd.data_length,
ntoh24(hdr->dlength));
rx_size += payload_length;
iov = &cmd->iov_data[0];
iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
be32_to_cpu(hdr->offset), payload_length);
if (iov_ret < 0)
return -1;
iov_count += iov_ret;
padding = ((-payload_length) & 3);
if (padding != 0) {
iov[iov_count].iov_base = cmd->pad_bytes;
iov[iov_count++].iov_len = padding;
rx_size += padding;
pr_debug("Receiving %u padding bytes.\n", padding);
}
if (conn->conn_ops->DataDigest) {
iov[iov_count].iov_base = &checksum;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
rx_size += ISCSI_CRC_LEN;
}
WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
iscsit_unmap_iovec(cmd);
if (rx_got != rx_size)
return -1;
if (conn->conn_ops->DataDigest) {
u32 data_crc;
data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
be32_to_cpu(hdr->offset),
payload_length, padding,
cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
" does not match computed 0x%08x\n",
hdr->itt, hdr->offset, payload_length,
hdr->datasn, checksum, data_crc);
data_crc_failed = 1;
} else {
pr_debug("Got CRC32C DataDigest 0x%08x for"
" %u bytes of Data Out\n", checksum,
payload_length);
}
}
return data_crc_failed;
}
int
iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
bool data_crc_failed)
{
struct iscsit_conn *conn = cmd->conn;
int rc, ooo_cmdsn;
/*
* Increment post receive data and CRC values or perform
* within-command recovery.
*/
rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
return 0;
else if (rc == DATAOUT_SEND_R2T) {
iscsit_set_dataout_sequence_values(cmd);
conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
} else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
/*
* Handle extra special case for out of order
* Unsolicited Data Out.
*/
spin_lock_bh(&cmd->istate_lock);
ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
iscsit_stop_dataout_timer(cmd);
if (ooo_cmdsn)
return 0;
target_execute_cmd(&cmd->se_cmd);
return 0;
} else /* DATAOUT_CANNOT_RECOVER */
return -1;
return 0;
}
EXPORT_SYMBOL(iscsit_check_dataout_payload);
static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
{
struct iscsit_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
int rc;
bool data_crc_failed = false;
rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
if (rc < 0)
return 0;
else if (!cmd)
return 0;
rc = iscsit_get_dataout(conn, cmd, hdr);
if (rc < 0)
return rc;
else if (rc > 0)
data_crc_failed = true;
return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
}
int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_nopout *hdr)
{
u32 payload_length = ntoh24(hdr->dlength);
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
if (!cmd)
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
" not set, protocol error.\n");
if (!cmd)
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
" greater than MaxXmitDataSegmentLength: %u, protocol"
" error.\n", payload_length,
conn->conn_ops->MaxXmitDataSegmentLength);
if (!cmd)
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
hdr->itt == RESERVED_ITT ? "Response" : "Request",
hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
payload_length);
/*
* This is not a response to a Unsolicited NopIN, which means
* it can either be a NOPOUT ping request (with a valid ITT),
* or a NOPOUT not requesting a NOPIN (with a reserved ITT).
* Either way, make sure we allocate an struct iscsit_cmd, as both
* can contain ping data.
*/
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
cmd->i_state = ISTATE_SEND_NOPIN;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1 : 0);
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
}
return 0;
}
EXPORT_SYMBOL(iscsit_setup_nop_out);
int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_nopout *hdr)
{
struct iscsit_cmd *cmd_p = NULL;
int cmdsn_ret = 0;
/*
* Initiator is expecting a NopIN ping reply..
*/
if (hdr->itt != RESERVED_ITT) {
if (!cmd)
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
iscsit_add_cmd_to_response_queue(cmd, conn,
cmd->i_state);
return 0;
}
cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
(unsigned char *)hdr, hdr->cmdsn);
if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
return 0;
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
return 0;
}
/*
* This was a response to a unsolicited NOPIN ping.
*/
if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
if (!cmd_p)
return -EINVAL;
iscsit_stop_nopin_response_timer(conn);
cmd_p->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
iscsit_start_nopin_timer(conn);
return 0;
}
/*
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
if (cmd)
iscsit_free_cmd(cmd, false);
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
unsigned char *ping_data = NULL;
struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
struct kvec *iov = NULL;
u32 payload_length = ntoh24(hdr->dlength);
int ret;
ret = iscsit_setup_nop_out(conn, cmd, hdr);
if (ret < 0)
return 0;
/*
* Handle NOP-OUT payload for traditional iSCSI sockets
*/
if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
u32 checksum, data_crc, padding = 0;
int niov = 0, rx_got, rx_size = payload_length;
ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
if (!ping_data) {
ret = -1;
goto out;
}
iov = &cmd->iov_misc[0];
iov[niov].iov_base = ping_data;
iov[niov++].iov_len = payload_length;
padding = ((-payload_length) & 3);
if (padding != 0) {
pr_debug("Receiving %u additional bytes"
" for padding.\n", padding);
iov[niov].iov_base = &cmd->pad_bytes;
iov[niov++].iov_len = padding;
rx_size += padding;
}
if (conn->conn_ops->DataDigest) {
iov[niov].iov_base = &checksum;
iov[niov++].iov_len = ISCSI_CRC_LEN;
rx_size += ISCSI_CRC_LEN;
}
WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
if (rx_got != rx_size) {
ret = -1;
goto out;
}
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
payload_length, padding,
cmd->pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
" 0x%08x does not match computed 0x%08x\n",
checksum, data_crc);
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from"
" NOPOUT Ping DataCRC failure while in"
" ERL=0.\n");
ret = -1;
goto out;
} else {
/*
* Silently drop this PDU and let the
* initiator plug the CmdSN gap.
*/
pr_debug("Dropping NOPOUT"
" Command CmdSN: 0x%08x due to"
" DataCRC error.\n", hdr->cmdsn);
ret = 0;
goto out;
}
} else {
pr_debug("Got CRC32C DataDigest"
" 0x%08x for %u bytes of ping data.\n",
checksum, payload_length);
}
}
ping_data[payload_length] = '\0';
/*
* Attach ping data to struct iscsit_cmd->buf_ptr.
*/
cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
pr_debug("Got %u bytes of NOPOUT ping"
" data.\n", payload_length);
pr_debug("Ping Data: \"%s\"\n", ping_data);
}
return iscsit_process_nop_out(conn, cmd, hdr);
out:
if (cmd)
iscsit_free_cmd(cmd, false);
kfree(ping_data);
return ret;
}
static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
{
switch (iscsi_tmf) {
case ISCSI_TM_FUNC_ABORT_TASK:
return TMR_ABORT_TASK;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
return TMR_ABORT_TASK_SET;
case ISCSI_TM_FUNC_CLEAR_ACA:
return TMR_CLEAR_ACA;
case ISCSI_TM_FUNC_CLEAR_TASK_SET:
return TMR_CLEAR_TASK_SET;
case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
return TMR_LUN_RESET;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
return TMR_TARGET_WARM_RESET;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
return TMR_TARGET_COLD_RESET;
default:
return TMR_UNKNOWN;
}
}
int
iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct se_tmr_req *se_tmr;
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
function = hdr->flags;
pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
" 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
" 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
hdr->rtt, hdr->refcmdsn, conn->cid);
if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
hdr->rtt != RESERVED_ITT)) {
pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
hdr->rtt = RESERVED_ITT;
}
if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("Task Management Request TASK_REASSIGN not"
" issued as immediate command, bad iSCSI Initiator"
"implementation\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
if (!cmd->tmr_req) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
}
__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
scsilun_to_int(&hdr->lun),
conn->cmd_cnt);
target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
tcm_function = iscsit_convert_tmf(function);
if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
" 0x%02x\n", function);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
}
ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
GFP_KERNEL);
if (ret < 0)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
se_tmr = cmd->se_cmd.se_tmr_req;
tmr_req = cmd->tmr_req;
/*
* Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
ret = transport_lookup_tmr_lun(&cmd->se_cmd);
if (ret < 0) {
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
}
}
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
if (se_tmr->response)
goto attach;
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA:
case ISCSI_TM_FUNC_CLEAR_TASK_SET:
case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TASK_REASSIGN:
se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
/*
* Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful.
*/
if (se_tmr->response)
break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
break;
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function);
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach;
}
if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
(se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
se_tmr->call_transport = 1;
attach:
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
target_put_sess_cmd(&cmd->se_cmd);
return 0;
} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
}
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
return 0;
/*
* Found the referenced task, send to transport for processing.
*/
if (se_tmr->call_transport)
return transport_generic_handle_tmr(&cmd->se_cmd);
/*
* Could not find the referenced LUN, task, or Task Management
* command not authorized or supported. Change state and
* let the tx_thread send the response.
*
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
/* #warning FIXME: Support Text Command parameters besides SendTargets */
int
iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_text *hdr)
{
u32 payload_length = ntoh24(hdr->dlength);
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("Unable to accept text parameter length: %u"
"greater than MaxXmitDataSegmentLength %u.\n",
payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
pr_err("Multi sequence text commands currently not supported\n");
return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
(unsigned char *)hdr);
}
pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
hdr->exp_statsn, payload_length);
cmd->iscsi_opcode = ISCSI_OP_TEXT;
cmd->i_state = ISTATE_SEND_TEXTRSP;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return 0;
}
EXPORT_SYMBOL(iscsit_setup_text_cmd);
int
iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_text *hdr)
{
unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
int cmdsn_ret;
if (!text_in) {
cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
pr_err("Unable to locate text_in buffer for sendtargets"
" discovery\n");
goto reject;
}
goto empty_sendtargets;
}
if (strncmp("SendTargets=", text_in, 12) != 0) {
pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n");
goto reject;
}
/* '=' confirmed in strncmp */
text_ptr = strchr(text_in, '=');
BUG_ON(!text_ptr);
if (!strncmp("=All", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
pr_err("Unable to locate valid SendTargets%s value\n",
text_ptr);
goto reject;
}
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
empty_sendtargets:
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
(unsigned char *)hdr, hdr->cmdsn);
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
return 0;
}
return iscsit_execute_cmd(cmd, 0);
reject:
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
EXPORT_SYMBOL(iscsit_process_text_cmd);
static int
iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsi_text *hdr = (struct iscsi_text *)buf;
char *text_in = NULL;
u32 payload_length = ntoh24(hdr->dlength);
int rx_size, rc;
rc = iscsit_setup_text_cmd(conn, cmd, hdr);
if (rc < 0)
return 0;
rx_size = payload_length;
if (payload_length) {
u32 checksum = 0, data_crc = 0;
u32 padding = 0;
int niov = 0, rx_got;
struct kvec iov[2];
rx_size = ALIGN(payload_length, 4);
text_in = kzalloc(rx_size, GFP_KERNEL);
if (!text_in)
goto reject;
cmd->text_in_ptr = text_in;
memset(iov, 0, sizeof(iov));
iov[niov].iov_base = text_in;
iov[niov++].iov_len = rx_size;
padding = rx_size - payload_length;
if (padding)
pr_debug("Receiving %u additional bytes"
" for padding.\n", padding);
if (conn->conn_ops->DataDigest) {
iov[niov].iov_base = &checksum;
iov[niov++].iov_len = ISCSI_CRC_LEN;
rx_size += ISCSI_CRC_LEN;
}
WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
rx_got = rx_data(conn, &iov[0], niov, rx_size);
if (rx_got != rx_size)
goto reject;
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
text_in, rx_size, 0, NULL,
&data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
" 0x%08x does not match computed"
" 0x%08x\n", checksum, data_crc);
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from"
" Text Data digest failure while in"
" ERL=0.\n");
goto reject;
} else {
/*
* Silently drop this PDU and let the
* initiator plug the CmdSN gap.
*/
pr_debug("Dropping Text"
" Command CmdSN: 0x%08x due to"
" DataCRC error.\n", hdr->cmdsn);
kfree(text_in);
return 0;
}
} else {
pr_debug("Got CRC32C DataDigest"
" 0x%08x for %u bytes of text data.\n",
checksum, payload_length);
}
}
text_in[payload_length - 1] = '\0';
pr_debug("Successfully read %d bytes of text"
" data.\n", payload_length);
}
return iscsit_process_text_cmd(conn, cmd, hdr);
reject:
kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsit_conn *conn_p;
struct iscsit_session *sess = conn->sess;
pr_debug("Received logout request CLOSESESSION on CID: %hu"
" for SID: %u.\n", conn->cid, conn->sess->sid);
atomic_set(&sess->session_logout, 1);
atomic_set(&conn->conn_logout_remove, 1);
conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
iscsit_inc_conn_usage_count(conn);
iscsit_inc_session_usage_count(sess);
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
continue;
pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
}
spin_unlock_bh(&sess->conn_lock);
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsit_conn *l_conn;
struct iscsit_session *sess = conn->sess;
pr_debug("Received logout request CLOSECONNECTION for CID:"
" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
/*
* A Logout Request with a CLOSECONNECTION reason code for a CID
* can arrive on a connection with a differing CID.
*/
if (conn->cid == cmd->logout_cid) {
spin_lock_bh(&conn->state_lock);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
atomic_set(&conn->conn_logout_remove, 1);
conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
iscsit_inc_conn_usage_count(conn);
spin_unlock_bh(&conn->state_lock);
} else {
/*
* Handle all different cid CLOSECONNECTION requests in
* iscsit_logout_post_handler_diffcid() as to give enough
* time for any non immediate command's CmdSN to be
* acknowledged on the connection in question.
*
* Here we simply make sure the CID is still around.
*/
l_conn = iscsit_get_conn_from_cid(sess,
cmd->logout_cid);
if (!l_conn) {
cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
iscsit_add_cmd_to_response_queue(cmd, conn,
cmd->i_state);
return 0;
}
iscsit_dec_conn_usage_count(l_conn);
}
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
if (sess->sess_ops->ErrorRecoveryLevel != 2) {
pr_err("Received Logout Request REMOVECONNFORRECOVERY"
" while ERL!=2.\n");
cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
if (conn->cid == cmd->logout_cid) {
pr_err("Received Logout Request REMOVECONNFORRECOVERY"
" with CID: %hu on CID: %hu, implementation error.\n",
cmd->logout_cid, conn->cid);
cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
int
iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int cmdsn_ret, logout_remove = 0;
u8 reason_code = 0;
struct iscsi_logout *hdr;
struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
hdr = (struct iscsi_logout *) buf;
reason_code = (hdr->flags & 0x7f);
if (tiqn) {
spin_lock(&tiqn->logout_stats.lock);
if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
tiqn->logout_stats.normal_logouts++;
else
tiqn->logout_stats.abnormal_logouts++;
spin_unlock(&tiqn->logout_stats.lock);
}
pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
" ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
hdr->cid, conn->cid);
if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
pr_err("Received logout request on connection that"
" is not in logged in state, ignoring request.\n");
iscsit_free_cmd(cmd, false);
return 0;
}
cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
cmd->i_state = ISTATE_SEND_LOGOUTRSP;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->logout_cid = be16_to_cpu(hdr->cid);
cmd->logout_reason = reason_code;
cmd->data_direction = DMA_NONE;
/*
* We need to sleep in these cases (by returning 1) until the Logout
* Response gets sent in the tx thread.
*/
if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
be16_to_cpu(hdr->cid) == conn->cid))
logout_remove = 1;
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
/*
* Immediate commands are executed, well, immediately.
* Non-Immediate Logout Commands are executed in CmdSN order.
*/
if (cmd->immediate_cmd) {
int ret = iscsit_execute_cmd(cmd, 0);
if (ret < 0)
return ret;
} else {
cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
logout_remove = 0;
else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
}
return logout_remove;
}
EXPORT_SYMBOL(iscsit_handle_logout_cmd);
int iscsit_handle_snack(
struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsi_snack *hdr;
hdr = (struct iscsi_snack *) buf;
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
" CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
hdr->begrun, hdr->runlength, conn->cid);
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Initiator sent SNACK request while in"
" ErrorRecoveryLevel=0.\n");
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buf);
}
/*
* SNACK_DATA and SNACK_R2T are both 0, so check which function to
* call from inside iscsi_send_recovery_datain_or_r2t().
*/
switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
case 0:
return iscsit_handle_recovery_datain_or_r2t(conn, buf,
hdr->itt,
be32_to_cpu(hdr->ttt),
be32_to_cpu(hdr->begrun),
be32_to_cpu(hdr->runlength));
case ISCSI_FLAG_SNACK_TYPE_STATUS:
return iscsit_handle_status_snack(conn, hdr->itt,
be32_to_cpu(hdr->ttt),
be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
be32_to_cpu(hdr->begrun),
be32_to_cpu(hdr->runlength));
case ISCSI_FLAG_SNACK_TYPE_RDATA:
/* FIXME: Support R-Data SNACK */
pr_err("R-Data SNACK Not Supported.\n");
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buf);
default:
pr_err("Unknown SNACK type 0x%02x, protocol"
" error.\n", hdr->flags & 0x0f);
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buf);
}
return 0;
}
EXPORT_SYMBOL(iscsit_handle_snack);
static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
{
if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
(conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
wait_for_completion_interruptible_timeout(
&conn->rx_half_close_comp,
ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
}
}
static int iscsit_handle_immediate_data(
struct iscsit_cmd *cmd,
struct iscsi_scsi_req *hdr,
u32 length)
{
int iov_ret, rx_got = 0, rx_size = 0;
u32 checksum, iov_count = 0, padding = 0;
struct iscsit_conn *conn = cmd->conn;
struct kvec *iov;
void *overflow_buf = NULL;
BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
cmd->orig_iov_data_count - 2,
cmd->write_data_done, rx_size);
if (iov_ret < 0)
return IMMEDIATE_DATA_CANNOT_RECOVER;
iov_count = iov_ret;
iov = &cmd->iov_data[0];
if (rx_size < length) {
/*
* Special case: length of immediate data exceeds the data
* buffer size derived from the CDB.
*/
overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
if (!overflow_buf) {
iscsit_unmap_iovec(cmd);
return IMMEDIATE_DATA_CANNOT_RECOVER;
}
cmd->overflow_buf = overflow_buf;
iov[iov_count].iov_base = overflow_buf;
iov[iov_count].iov_len = length - rx_size;
iov_count++;
rx_size = length;
}
padding = ((-length) & 3);
if (padding != 0) {
iov[iov_count].iov_base = cmd->pad_bytes;
iov[iov_count++].iov_len = padding;
rx_size += padding;
}
if (conn->conn_ops->DataDigest) {
iov[iov_count].iov_base = &checksum;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
rx_size += ISCSI_CRC_LEN;
}
WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
iscsit_unmap_iovec(cmd);
if (rx_got != rx_size) {
iscsit_rx_thread_wait_for_tcp(conn);
return IMMEDIATE_DATA_CANNOT_RECOVER;
}
if (conn->conn_ops->DataDigest) {
u32 data_crc;
data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
cmd->write_data_done, length, padding,
cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ImmediateData CRC32C DataDigest 0x%08x"
" does not match computed 0x%08x\n", checksum,
data_crc);
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from"
" Immediate Data digest failure while"
" in ERL=0.\n");
iscsit_reject_cmd(cmd,
ISCSI_REASON_DATA_DIGEST_ERROR,
(unsigned char *)hdr);
return IMMEDIATE_DATA_CANNOT_RECOVER;
} else {
iscsit_reject_cmd(cmd,
ISCSI_REASON_DATA_DIGEST_ERROR,
(unsigned char *)hdr);
return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
}
} else {
pr_debug("Got CRC32C DataDigest 0x%08x for"
" %u bytes of Immediate Data\n", checksum,
length);
}
}
cmd->write_data_done += length;
if (cmd->write_data_done == cmd->se_cmd.data_length) {
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
}
return IMMEDIATE_DATA_NORMAL_OPERATION;
}
/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
with active network interface */
static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd;
struct iscsit_conn *conn_p;
bool found = false;
lockdep_assert_held(&conn->sess->conn_lock);
/*
* Only send a Asynchronous Message on connections whos network
* interface is still functional.
*/
list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
iscsit_inc_conn_usage_count(conn_p);
found = true;
break;
}
}
if (!found)
return;
cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
}
cmd->logout_cid = conn->cid;
cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
cmd->i_state = ISTATE_SEND_ASYNCMSG;
spin_lock_bh(&conn_p->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
spin_unlock_bh(&conn_p->cmd_lock);
iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
iscsit_dec_conn_usage_count(conn_p);
}
static int iscsit_send_conn_drop_async_message(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsi_async *hdr;
cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
hdr = (struct iscsi_async *) cmd->pdu;
hdr->opcode = ISCSI_OP_ASYNC_EVENT;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
cmd->init_task_tag = RESERVED_ITT;
cmd->targ_xfer_tag = 0xFFFFFFFF;
put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
hdr->param1 = cpu_to_be16(cmd->logout_cid);
hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
pr_debug("Sending Connection Dropped Async Message StatSN:"
" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
cmd->logout_cid, conn->cid);
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
{
if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
(conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
wait_for_completion_interruptible_timeout(
&conn->tx_half_close_comp,
ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
}
}
void
iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
bool set_statsn)
{
hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
hdr->flags = datain->flags;
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain->length);
if (hdr->flags & ISCSI_FLAG_DATA_ACK)
int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun);
else
put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
hdr->itt = cmd->init_task_tag;
if (hdr->flags & ISCSI_FLAG_DATA_ACK)
hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
else
hdr->ttt = cpu_to_be32(0xFFFFFFFF);
if (set_statsn)
hdr->statsn = cpu_to_be32(cmd->stat_sn);
else
hdr->statsn = cpu_to_be32(0xFFFFFFFF);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->datasn = cpu_to_be32(datain->data_sn);
hdr->offset = cpu_to_be32(datain->offset);
pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
ntohl(hdr->offset), datain->length, conn->cid);
}
EXPORT_SYMBOL(iscsit_build_datain_pdu);
static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
struct iscsi_datain datain;
struct iscsi_datain_req *dr;
int eodr = 0, ret;
bool set_statsn = false;
memset(&datain, 0, sizeof(struct iscsi_datain));
dr = iscsit_get_datain_values(cmd, &datain);
if (!dr) {
pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
cmd->init_task_tag);
return -1;
}
/*
* Be paranoid and double check the logic for now.
*/
if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x, datain.offset: %u and"
" datain.length: %u exceeds cmd->data_length: %u\n",
cmd->init_task_tag, datain.offset, datain.length,
cmd->se_cmd.data_length);
return -1;
}
atomic_long_add(datain.length, &conn->sess->tx_data_octets);
/*
* Special case for successfully execution w/ both DATAIN
* and Sense Data.
*/
if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
(cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
else {
if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
(dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
iscsit_increment_maxcmdsn(cmd, conn->sess);
cmd->stat_sn = conn->stat_sn++;
set_statsn = true;
} else if (dr->dr_complete ==
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
set_statsn = true;
}
iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
if (ret < 0)
return ret;
if (dr->dr_complete) {
eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2 : 1;
iscsit_free_datain_req(cmd, dr);
}
return eodr;
}
int
iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_logout_rsp *hdr)
{
struct iscsit_conn *logout_conn = NULL;
struct iscsi_conn_recovery *cr = NULL;
struct iscsit_session *sess = conn->sess;
/*
* The actual shutting down of Sessions and/or Connections
* for CLOSESESSION and CLOSECONNECTION Logout Requests
* is done in scsi_logout_post_handler().
*/
switch (cmd->logout_reason) {
case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
pr_debug("iSCSI session logout successful, setting"
" logout response to ISCSI_LOGOUT_SUCCESS.\n");
cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
break;
/*
* For CLOSECONNECTION logout requests carrying
* a matching logout CID -> local CID, the reference
* for the local CID will have been incremented in
* iscsi_logout_closeconnection().
*
* For CLOSECONNECTION logout requests carrying
* a different CID than the connection it arrived
* on, the connection responding to cmd->logout_cid
* is stopped in iscsit_logout_post_handler_diffcid().
*/
pr_debug("iSCSI CID: %hu logout on CID: %hu"
" successful.\n", cmd->logout_cid, conn->cid);
cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
break;
case ISCSI_LOGOUT_REASON_RECOVERY:
if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
(cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
break;
/*
* If the connection is still active from our point of view
* force connection recovery to occur.
*/
logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
cmd->logout_cid);
if (logout_conn) {
iscsit_connection_reinstatement_rcfr(logout_conn);
iscsit_dec_conn_usage_count(logout_conn);
}
cr = iscsit_get_inactive_connection_recovery_entry(
conn->sess, cmd->logout_cid);
if (!cr) {
pr_err("Unable to locate CID: %hu for"
" REMOVECONNFORRECOVERY Logout Request.\n",
cmd->logout_cid);
cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
break;
}
iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
" for recovery for CID: %hu on CID: %hu successful.\n",
cmd->logout_cid, conn->cid);
cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
break;
default:
pr_err("Unknown cmd->logout_reason: 0x%02x\n",
cmd->logout_reason);
return -1;
}
hdr->opcode = ISCSI_OP_LOGOUT_RSP;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
hdr->response = cmd->logout_response;
hdr->itt = cmd->init_task_tag;
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
cmd->init_task_tag, cmd->stat_sn, hdr->response,
cmd->logout_cid, conn->cid);
return 0;
}
EXPORT_SYMBOL(iscsit_build_logout_rsp);
static int
iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
int rc;
rc = iscsit_build_logout_rsp(cmd, conn,
(struct iscsi_logout_rsp *)&cmd->pdu[0]);
if (rc < 0)
return rc;
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
void
iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_nopin *hdr, bool nopout_response)
{
hdr->opcode = ISCSI_OP_NOOP_IN;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
hton24(hdr->dlength, cmd->buf_ptr_size);
if (nopout_response)
put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
hdr->itt = cmd->init_task_tag;
hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
conn->stat_sn;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
if (nopout_response)
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
"Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
/*
* Unsolicited NOPIN, either requesting a response or not.
*/
static int iscsit_send_unsolicited_nopin(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn,
int want_response)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
int ret;
iscsit_build_nopin_rsp(cmd, conn, hdr, false);
pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
if (ret < 0)
return ret;
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = want_response ?
ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
return 0;
}
static int
iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
iscsit_build_nopin_rsp(cmd, conn, hdr, true);
/*
* NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
* NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
*/
pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
cmd->buf_ptr,
cmd->buf_ptr_size);
}
static int iscsit_send_r2t(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsi_r2t *r2t;
struct iscsi_r2t_rsp *hdr;
int ret;
r2t = iscsit_get_r2t_from_list(cmd);
if (!r2t)
return -1;
hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_R2T;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun);
hdr->itt = cmd->init_task_tag;
if (conn->conn_transport->iscsit_get_r2t_ttt)
conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
else
r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
hdr->statsn = cpu_to_be32(conn->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
hdr->data_offset = cpu_to_be32(r2t->offset);
hdr->data_length = cpu_to_be32(r2t->xfer_len);
pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
r2t->offset, r2t->xfer_len, conn->cid);
spin_lock_bh(&cmd->r2t_lock);
r2t->sent_r2t = 1;
spin_unlock_bh(&cmd->r2t_lock);
ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
if (ret < 0) {
return ret;
}
spin_lock_bh(&cmd->dataout_timeout_lock);
iscsit_start_dataout_timer(cmd, conn);
spin_unlock_bh(&cmd->dataout_timeout_lock);
return 0;
}
/*
* @recovery: If called from iscsi_task_reassign_complete_write() for
* connection recovery.
*/
int iscsit_build_r2ts_for_cmd(
struct iscsit_conn *conn,
struct iscsit_cmd *cmd,
bool recovery)
{
int first_r2t = 1;
u32 offset = 0, xfer_len = 0;
spin_lock_bh(&cmd->r2t_lock);
if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
if (conn->sess->sess_ops->DataSequenceInOrder &&
!recovery)
cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
if (conn->sess->sess_ops->DataSequenceInOrder) {
offset = cmd->r2t_offset;
if (first_r2t && recovery) {
int new_data_end = offset +
conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len;
if (new_data_end > cmd->se_cmd.data_length)
xfer_len = cmd->se_cmd.data_length - offset;
else
xfer_len =
conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len;
} else {
int new_data_end = offset +
conn->sess->sess_ops->MaxBurstLength;
if (new_data_end > cmd->se_cmd.data_length)
xfer_len = cmd->se_cmd.data_length - offset;
else
xfer_len = conn->sess->sess_ops->MaxBurstLength;
}
if ((s32)xfer_len < 0) {
cmd->cmd_flags |= ICF_SENT_LAST_R2T;
break;
}
cmd->r2t_offset += xfer_len;
if (cmd->r2t_offset == cmd->se_cmd.data_length)
cmd->cmd_flags |= ICF_SENT_LAST_R2T;
} else {
struct iscsi_seq *seq;
seq = iscsit_get_seq_holder_for_r2t(cmd);
if (!seq) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
offset = seq->offset;
xfer_len = seq->xfer_len;
if (cmd->seq_send_order == cmd->seq_count)
cmd->cmd_flags |= ICF_SENT_LAST_R2T;
}
cmd->outstanding_r2ts++;
first_r2t = 0;
if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
break;
}
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
{
if (inc_stat_sn)
cmd->stat_sn = conn->stat_sn++;
atomic_long_inc(&conn->sess->rsp_pdus);
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
hdr->itt = cmd->init_task_tag;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
cmd->se_cmd.scsi_status, conn->cid);
}
EXPORT_SYMBOL(iscsit_build_rsp_pdu);
static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
void *data_buf = NULL;
u32 padding = 0, data_buf_len = 0;
iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
/*
* Attach SENSE DATA payload to iSCSI Response PDU
*/
if (cmd->se_cmd.sense_buffer &&
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
cmd->se_cmd.scsi_sense_length += sizeof (__be16);
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
data_buf = cmd->sense_buffer;
data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
if (padding) {
memset(cmd->sense_buffer +
cmd->se_cmd.scsi_sense_length, 0, padding);
pr_debug("Adding %u bytes of padding to"
" SENSE.\n", padding);
}
pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
" Response PDU\n",
cmd->se_cmd.scsi_sense_length);
}
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
data_buf_len);
}
static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
{
switch (se_tmr->response) {
case TMR_FUNCTION_COMPLETE:
return ISCSI_TMF_RSP_COMPLETE;
case TMR_TASK_DOES_NOT_EXIST:
return ISCSI_TMF_RSP_NO_TASK;
case TMR_LUN_DOES_NOT_EXIST:
return ISCSI_TMF_RSP_NO_LUN;
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
return ISCSI_TMF_RSP_NOT_SUPPORTED;
case TMR_FUNCTION_REJECTED:
default:
return ISCSI_TMF_RSP_REJECTED;
}
}
void
iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_tm_rsp *hdr)
{
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cmd->init_task_tag;
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Task Management Response ITT: 0x%08x,"
" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
}
EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
static int
iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
iscsit_build_task_mgt_rsp(cmd, conn, hdr);
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
#define SENDTARGETS_BUF_LIMIT 32768U
static int
iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
enum iscsit_transport_type network_transport,
int skip_bytes, bool *completed)
{
char *payload = NULL;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
struct iscsi_tpg_np *tpg_np;
int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
int target_name_printed;
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
bool active;
buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
SENDTARGETS_BUF_LIMIT);
payload = kzalloc(buffer_len, GFP_KERNEL);
if (!payload)
return -ENOMEM;
/*
* Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
* explicit case..
*/
if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
text_ptr = strchr(text_in, '=');
if (!text_ptr) {
pr_err("Unable to locate '=' string in text_in:"
" %s\n", text_in);
kfree(payload);
return -EINVAL;
}
/*
* Skip over '=' character..
*/
text_ptr += 1;
}
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
strcmp(tiqn->tiqn, text_ptr)) {
continue;
}
target_name_printed = 0;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
/* If demo_mode_discovery=0 and generate_node_acls=0
* (demo mode dislabed) do not return
* TargetName+TargetAddress unless a NodeACL exists.
*/
if ((tpg->tpg_attrib.generate_node_acls == 0) &&
(tpg->tpg_attrib.demo_mode_discovery == 0) &&
(!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
cmd->conn->sess->sess_ops->InitiatorName))) {
continue;
}
spin_lock(&tpg->tpg_state_lock);
active = (tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&tpg->tpg_state_lock);
if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
continue;
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
tpg_np_list) {
struct iscsi_np *np = tpg_np->tpg_np;
struct sockaddr_storage *sockaddr;
if (np->np_network_transport != network_transport)
continue;
if (!target_name_printed) {
len = sprintf(buf, "TargetName=%s",
tiqn->tiqn);
len += 1;
if ((len + payload_len) > buffer_len) {
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
end_of_buf = 1;
goto eob;
}
if (skip_bytes && len <= skip_bytes) {
skip_bytes -= len;
} else {
memcpy(payload + payload_len, buf, len);
payload_len += len;
target_name_printed = 1;
if (len > skip_bytes)
skip_bytes = 0;
}
}
if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
sockaddr = &conn->local_sockaddr;
else
sockaddr = &np->np_sockaddr;
len = sprintf(buf, "TargetAddress="
"%pISpc,%hu",
sockaddr,
tpg->tpgt);
len += 1;
if ((len + payload_len) > buffer_len) {
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
end_of_buf = 1;
goto eob;
}
if (skip_bytes && len <= skip_bytes) {
skip_bytes -= len;
} else {
memcpy(payload + payload_len, buf, len);
payload_len += len;
if (len > skip_bytes)
skip_bytes = 0;
}
}
spin_unlock(&tpg->tpg_np_lock);
}
spin_unlock(&tiqn->tiqn_tpg_lock);
eob:
if (end_of_buf) {
*completed = false;
break;
}
if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break;
}
spin_unlock(&tiqn_lock);
cmd->buf_ptr = payload;
return payload_len;
}
int
iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_text_rsp *hdr,
enum iscsit_transport_type network_transport)
{
int text_length, padding;
bool completed = true;
text_length = iscsit_build_sendtargets_response(cmd, network_transport,
cmd->read_data_done,
&completed);
if (text_length < 0)
return text_length;
if (completed) {
hdr->flags = ISCSI_FLAG_CMD_FINAL;
} else {
hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
cmd->read_data_done += text_length;
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
}
hdr->opcode = ISCSI_OP_TEXT_RSP;
padding = ((-text_length) & 3);
hton24(hdr->dlength, text_length);
hdr->itt = cmd->init_task_tag;
hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess);
/*
* Reset maxcmdsn_inc in multi-part text payload exchanges to
* correctly increment MaxCmdSN for each response answering a
* non immediate text request with a valid CmdSN.
*/
cmd->maxcmdsn_inc = 0;
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
return text_length + padding;
}
EXPORT_SYMBOL(iscsit_build_text_rsp);
static int iscsit_send_text_rsp(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
int text_length;
text_length = iscsit_build_text_rsp(cmd, conn, hdr,
conn->conn_transport->transport_type);
if (text_length < 0)
return text_length;
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
cmd->buf_ptr,
text_length);
}
void
iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_reject *hdr)
{
hdr->opcode = ISCSI_OP_REJECT;
hdr->reason = cmd->reject_reason;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
hton24(hdr->dlength, ISCSI_HDR_LEN);
hdr->ffffffff = cpu_to_be32(0xffffffff);
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
}
EXPORT_SYMBOL(iscsit_build_reject);
static int iscsit_send_reject(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
iscsit_build_reject(cmd, conn, hdr);
pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
cmd->buf_ptr,
ISCSI_HDR_LEN);
}
void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
{
int ord, cpu;
cpumask_var_t conn_allowed_cpumask;
/*
* bitmap_id is assigned from iscsit_global->ts_bitmap from
* within iscsit_start_kthreads()
*
* Here we use bitmap_id to determine which CPU that this
* iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
return;
}
}
} else {
cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
cpu_online_mask);
cpumask_clear(conn->conn_cpumask);
ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
for_each_cpu(cpu, conn_allowed_cpumask) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
free_cpumask_var(conn_allowed_cpumask);
return;
}
}
free_cpumask_var(conn_allowed_cpumask);
}
/*
* This should never be reached..
*/
dump_stack();
cpumask_setall(conn->conn_cpumask);
}
static void iscsit_thread_reschedule(struct iscsit_conn *conn)
{
/*
* If iscsit_global->allowed_cpumask modified, reschedule iSCSI
* connection's RX/TX threads update conn->allowed_cpumask.
*/
if (!cpumask_equal(iscsit_global->allowed_cpumask,
conn->allowed_cpumask)) {
iscsit_thread_get_cpumask(conn);
conn->conn_tx_reset_cpumask = 1;
conn->conn_rx_reset_cpumask = 1;
cpumask_copy(conn->allowed_cpumask,
iscsit_global->allowed_cpumask);
}
}
void iscsit_thread_check_cpumask(
struct iscsit_conn *conn,
struct task_struct *p,
int mode)
{
/*
* The TX and RX threads maybe call iscsit_thread_check_cpumask()
* at the same time. The RX thread might be faster and return from
* iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
* Then the TX thread sets it back to 1.
* The next time the RX thread loops, it sees conn_rx_reset_cpumask
* set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
*/
iscsit_thread_reschedule(conn);
/*
* mode == 1 signals iscsi_target_tx_thread() usage.
* mode == 0 signals iscsi_target_rx_thread() usage.
*/
if (mode == 1) {
if (!conn->conn_tx_reset_cpumask)
return;
} else {
if (!conn->conn_rx_reset_cpumask)
return;
}
/*
* Update the CPU mask for this single kthread so that
* both TX and RX kthreads are scheduled to run on the
* same CPU.
*/
set_cpus_allowed_ptr(p, conn->conn_cpumask);
if (mode == 1)
conn->conn_tx_reset_cpumask = 0;
else
conn->conn_rx_reset_cpumask = 0;
}
EXPORT_SYMBOL(iscsit_thread_check_cpumask);
int
iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
int ret;
switch (state) {
case ISTATE_SEND_R2T:
ret = iscsit_send_r2t(cmd, conn);
if (ret < 0)
goto err;
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
break;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
iscsit_mod_nopin_response_timer(conn);
ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
if (ret < 0)
goto err;
break;
case ISTATE_SEND_NOPIN_NO_RESPONSE:
ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
if (ret < 0)
goto err;
break;
default:
pr_err("Unknown Opcode: 0x%02x ITT:"
" 0x%08x, i_state: %d on CID: %hu\n",
cmd->iscsi_opcode, cmd->init_task_tag, state,
conn->cid);
goto err;
}
return 0;
err:
return -1;
}
EXPORT_SYMBOL(iscsit_immediate_queue);
static int
iscsit_handle_immediate_queue(struct iscsit_conn *conn)
{
struct iscsit_transport *t = conn->conn_transport;
struct iscsi_queue_req *qr;
struct iscsit_cmd *cmd;
u8 state;
int ret;
while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
atomic_set(&conn->check_immediate_queue, 0);
cmd = qr->cmd;
state = qr->state;
kmem_cache_free(lio_qr_cache, qr);
ret = t->iscsit_immediate_queue(conn, cmd, state);
if (ret < 0)
return ret;
}
return 0;
}
int
iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
int ret;
check_rsp_state:
switch (state) {
case ISTATE_SEND_DATAIN:
ret = iscsit_send_datain(cmd, conn);
if (ret < 0)
goto err;
else if (!ret)
/* more drs */
goto check_rsp_state;
else if (ret == 1) {
/* all done */
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
if (atomic_read(&conn->check_immediate_queue))
return 1;
return 0;
} else if (ret == 2) {
/* Still must send status,
SCF_TRANSPORT_TASK_SENSE was set */
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
state = ISTATE_SEND_STATUS;
goto check_rsp_state;
}
break;
case ISTATE_SEND_STATUS:
case ISTATE_SEND_STATUS_RECOVERY:
ret = iscsit_send_response(cmd, conn);
break;
case ISTATE_SEND_LOGOUTRSP:
ret = iscsit_send_logout(cmd, conn);
break;
case ISTATE_SEND_ASYNCMSG:
ret = iscsit_send_conn_drop_async_message(
cmd, conn);
break;
case ISTATE_SEND_NOPIN:
ret = iscsit_send_nopin(cmd, conn);
break;
case ISTATE_SEND_REJECT:
ret = iscsit_send_reject(cmd, conn);
break;
case ISTATE_SEND_TASKMGTRSP:
ret = iscsit_send_task_mgt_rsp(cmd, conn);
if (ret != 0)
break;
ret = iscsit_tmr_post_handler(cmd, conn);
if (ret != 0)
iscsit_fall_back_to_erl0(conn->sess);
break;
case ISTATE_SEND_TEXTRSP:
ret = iscsit_send_text_rsp(cmd, conn);
break;
default:
pr_err("Unknown Opcode: 0x%02x ITT:"
" 0x%08x, i_state: %d on CID: %hu\n",
cmd->iscsi_opcode, cmd->init_task_tag,
state, conn->cid);
goto err;
}
if (ret < 0)
goto err;
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
return -ECONNRESET;
fallthrough;
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
case ISTATE_SEND_NOPIN:
case ISTATE_SEND_STATUS_RECOVERY:
case ISTATE_SEND_TEXTRSP:
case ISTATE_SEND_TASKMGTRSP:
case ISTATE_SEND_REJECT:
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
break;
default:
pr_err("Unknown Opcode: 0x%02x ITT:"
" 0x%08x, i_state: %d on CID: %hu\n",
cmd->iscsi_opcode, cmd->init_task_tag,
cmd->i_state, conn->cid);
goto err;
}
if (atomic_read(&conn->check_immediate_queue))
return 1;
return 0;
err:
return -1;
}
EXPORT_SYMBOL(iscsit_response_queue);
static int iscsit_handle_response_queue(struct iscsit_conn *conn)
{
struct iscsit_transport *t = conn->conn_transport;
struct iscsi_queue_req *qr;
struct iscsit_cmd *cmd;
u8 state;
int ret;
while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
cmd = qr->cmd;
state = qr->state;
kmem_cache_free(lio_qr_cache, qr);
ret = t->iscsit_response_queue(conn, cmd, state);
if (ret == 1 || ret < 0)
return ret;
}
return 0;
}
int iscsi_target_tx_thread(void *arg)
{
int ret = 0;
struct iscsit_conn *conn = arg;
bool conn_freed = false;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
* are scheduled to run on the same CPU.
*/
iscsit_thread_check_cpumask(conn, current, 1);
wait_event_interruptible(conn->queues_wq,
!iscsit_conn_all_queues_empty(conn));
if (signal_pending(current))
goto transport_err;
get_immediate:
ret = iscsit_handle_immediate_queue(conn);
if (ret < 0)
goto transport_err;
ret = iscsit_handle_response_queue(conn);
if (ret == 1) {
goto get_immediate;
} else if (ret == -ECONNRESET) {
conn_freed = true;
goto out;
} else if (ret < 0) {
goto transport_err;
}
}
transport_err:
/*
* Avoid the normal connection failure code-path if this connection
* is still within LOGIN mode, and iscsi_np process context is
* responsible for cleaning up the early connection failure.
*/
if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
iscsit_take_action_for_connection_exit(conn, &conn_freed);
out:
if (!conn_freed) {
while (!kthread_should_stop()) {
msleep(100);
}
}
return 0;
}
static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
{
struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
struct iscsit_cmd *cmd;
int ret = 0;
switch (hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_SCSI_CMD:
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
break;
case ISCSI_OP_SCSI_DATA_OUT:
ret = iscsit_handle_data_out(conn, buf);
break;
case ISCSI_OP_NOOP_OUT:
cmd = NULL;
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
}
ret = iscsit_handle_nop_out(conn, cmd, buf);
break;
case ISCSI_OP_SCSI_TMFUNC:
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
break;
case ISCSI_OP_TEXT:
if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
if (!cmd)
goto reject;
} else {
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
}
ret = iscsit_handle_text_cmd(conn, cmd, buf);
break;
case ISCSI_OP_LOGOUT:
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_logout_cmd(conn, cmd, buf);
if (ret > 0)
wait_for_completion_timeout(&conn->conn_logout_comp,
SECONDS_FOR_LOGOUT_COMP * HZ);
break;
case ISCSI_OP_SNACK:
ret = iscsit_handle_snack(conn, buf);
break;
default:
pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Cannot recover from unknown"
" opcode while ERL=0, closing iSCSI connection.\n");
return -1;
}
pr_err("Unable to recover from unknown opcode while OFMarker=No,"
" closing iSCSI connection.\n");
ret = -1;
break;
}
return ret;
reject:
return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
{
bool ret;
spin_lock_bh(&conn->state_lock);
ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
spin_unlock_bh(&conn->state_lock);
return ret;
}
static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
{
int ret;
u8 *buffer, *tmp_buf, opcode;
u32 checksum = 0, digest = 0;
struct iscsi_hdr *hdr;
struct kvec iov;
buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return;
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
* are scheduled to run on the same CPU.
*/
iscsit_thread_check_cpumask(conn, current, 0);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
iov.iov_len = ISCSI_HDR_LEN;
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
break;
}
hdr = (struct iscsi_hdr *) buffer;
if (hdr->hlength) {
iov.iov_len = hdr->hlength * 4;
tmp_buf = krealloc(buffer,
ISCSI_HDR_LEN + iov.iov_len,
GFP_KERNEL);
if (!tmp_buf)
break;
buffer = tmp_buf;
iov.iov_base = &buffer[ISCSI_HDR_LEN];
ret = rx_data(conn, &iov, 1, iov.iov_len);
if (ret != iov.iov_len) {
iscsit_rx_thread_wait_for_tcp(conn);
break;
}
}
if (conn->conn_ops->HeaderDigest) {
iov.iov_base = &digest;
iov.iov_len = ISCSI_CRC_LEN;
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
break;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
ISCSI_HDR_LEN, 0, NULL,
&checksum);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
" received 0x%08x, computed 0x%08x\n",
digest, checksum);
/*
* Set the PDU to 0xff so it will intentionally
* hit default in the switch below.
*/
memset(buffer, 0xff, ISCSI_HDR_LEN);
atomic_long_inc(&conn->sess->conn_digest_errors);
} else {
pr_debug("Got HeaderDigest CRC32C"
" 0x%08x\n", checksum);
}
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
if (conn->sess->sess_ops->SessionType &&
((!(opcode & ISCSI_OP_TEXT)) ||
(!(opcode & ISCSI_OP_LOGOUT)))) {
pr_err("Received illegal iSCSI Opcode: 0x%02x"
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
break;
}
kfree(buffer);
}
int iscsi_target_rx_thread(void *arg)
{
int rc;
struct iscsit_conn *conn = arg;
bool conn_freed = false;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
/*
* Wait for iscsi_post_login_handler() to complete before allowing
* incoming iscsi/tcp socket I/O, and/or failing the connection.
*/
rc = wait_for_completion_interruptible(&conn->rx_login_comp);
if (rc < 0 || iscsi_target_check_conn_state(conn))
goto out;
if (!conn->conn_transport->iscsit_get_rx_pdu)
return 0;
conn->conn_transport->iscsit_get_rx_pdu(conn);
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn, &conn_freed);
out:
if (!conn_freed) {
while (!kthread_should_stop()) {
msleep(100);
}
}
return 0;
}
static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
{
LIST_HEAD(tmp_list);
struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsit_session *sess = conn->sess;
/*
* We expect this function to only ever be called from either RX or TX
* thread context via iscsit_close_connection() once the other context
* has been reset -> returned sleeping pre-handler state.
*/
spin_lock_bh(&conn->cmd_lock);
list_splice_init(&conn->conn_cmd_list, &tmp_list);
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
if (!se_cmd->se_tfo)
continue;
spin_lock_irq(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_ABORTED) {
if (!(se_cmd->transport_state & CMD_T_TAS))
/*
* LIO's abort path owns the cleanup for this,
* so put it back on the list and let
* aborted_task handle it.
*/
list_move_tail(&cmd->i_conn_node,
&conn->conn_cmd_list);
} else {
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
/*
* We never submitted the cmd to LIO core, so we have
* to tell LIO to perform the completion process.
*/
spin_unlock_irq(&se_cmd->t_state_lock);
target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
continue;
}
spin_unlock_irq(&se_cmd->t_state_lock);
}
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
iscsit_increment_maxcmdsn(cmd, sess);
iscsit_free_cmd(cmd, true);
}
/*
* Wait on commands that were cleaned up via the aborted_task path.
* LLDs that implement iscsit_wait_conn will already have waited for
* commands.
*/
if (!conn->conn_transport->iscsit_wait_conn) {
target_stop_cmd_counter(conn->cmd_cnt);
target_wait_for_cmds(conn->cmd_cnt);
}
}
static void iscsit_stop_timers_for_cmds(
struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
}
spin_unlock_bh(&conn->cmd_lock);
}
int iscsit_close_connection(
struct iscsit_conn *conn)
{
int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
struct iscsit_session *sess = conn->sess;
pr_debug("Closing iSCSI connection CID %hu on SID:"
" %u\n", conn->cid, sess->sid);
/*
* Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
* case just in case the RX Thread in iscsi_target_rx_opcode() is
* sleeping and the logout response never got sent because the
* connection failed.
*
* However for iser-target, isert_wait4logout() is using conn_logout_comp
* to signal logout response TX interrupt completion. Go ahead and skip
* this for iser since isert_rx_opcode() does not wait on logout failure,
* and to avoid iscsit_conn pointer dereference in iser-target code.
*/
if (!conn->conn_transport->rdma_shutdown)
complete(&conn->conn_logout_comp);
if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
if (conn->tx_thread &&
cmpxchg(&conn->tx_thread_active, true, false)) {
send_sig(SIGINT, conn->tx_thread, 1);
kthread_stop(conn->tx_thread);
}
} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
if (conn->rx_thread &&
cmpxchg(&conn->rx_thread_active, true, false)) {
send_sig(SIGINT, conn->rx_thread, 1);
kthread_stop(conn->rx_thread);
}
}
spin_lock(&iscsit_global->ts_bitmap_lock);
bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
get_order(1));
spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
* for reallegiance.
*
* During normal operation clear the out of order commands (but
* do not free the struct iscsi_ooo_cmdsn's) and release all
* struct iscsit_cmds.
*/
if (atomic_read(&conn->connection_recovery)) {
iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
iscsit_prepare_cmds_for_reallegiance(conn);
} else {
iscsit_clear_ooo_cmdsns_for_conn(conn);
iscsit_release_commands_from_conn(conn);
}
iscsit_free_queue_reqs_for_conn(conn);
/*
* Handle decrementing session or connection usage count if
* a logout response was not able to be sent because the
* connection failed. Fall back to Session Recovery here.
*/
if (atomic_read(&conn->conn_logout_remove)) {
if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
iscsit_dec_conn_usage_count(conn);
iscsit_dec_session_usage_count(sess);
}
if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
iscsit_dec_conn_usage_count(conn);
atomic_set(&conn->conn_logout_remove, 0);
atomic_set(&sess->session_reinstatement, 0);
atomic_set(&sess->session_fall_back_to_erl0, 1);
}
spin_lock_bh(&sess->conn_lock);
list_del(&conn->conn_list);
/*
* Attempt to let the Initiator know this connection failed by
* sending an Connection Dropped Async Message on another
* active connection.
*/
if (atomic_read(&conn->connection_recovery))
iscsit_build_conn_drop_async_message(conn);
spin_unlock_bh(&sess->conn_lock);
/*
* If connection reinstatement is being performed on this connection,
* up the connection reinstatement semaphore that is being blocked on
* in iscsit_cause_connection_reinstatement().
*/
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
spin_unlock_bh(&conn->state_lock);
complete(&conn->conn_wait_comp);
wait_for_completion(&conn->conn_post_wait_comp);
spin_lock_bh(&conn->state_lock);
}
/*
* If connection reinstatement is being performed on this connection
* by receiving a REMOVECONNFORRECOVERY logout request, up the
* connection wait rcfr semaphore that is being blocked on
* an iscsit_connection_reinstatement_rcfr().
*/
if (atomic_read(&conn->connection_wait_rcfr)) {
spin_unlock_bh(&conn->state_lock);
complete(&conn->conn_wait_rcfr_comp);
wait_for_completion(&conn->conn_post_wait_comp);
spin_lock_bh(&conn->state_lock);
}
atomic_set(&conn->connection_reinstatement, 1);
spin_unlock_bh(&conn->state_lock);
/*
* If any other processes are accessing this connection pointer we
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;
tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
ahash_request_free(conn->conn_rx_hash);
crypto_free_ahash(tfm);
}
if (conn->sock)
sock_release(conn->sock);
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
iscsit_free_conn(conn);
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
pr_debug("Decremented iSCSI connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
/*
* Make sure that if one connection fails in an non ERL=2 iSCSI
* Session that they all fail.
*/
if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
!atomic_read(&sess->session_logout))
atomic_set(&sess->session_fall_back_to_erl0, 1);
/*
* If this was not the last connection in the session, and we are
* performing session reinstatement or falling back to ERL=0, call
* iscsit_stop_session() without sleeping to shutdown the other
* active connections.
*/
if (atomic_read(&sess->nconn)) {
if (!atomic_read(&sess->session_reinstatement) &&
!atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
return 0;
}
if (!atomic_read(&sess->session_stop_active)) {
atomic_set(&sess->session_stop_active, 1);
spin_unlock_bh(&sess->conn_lock);
iscsit_stop_session(sess, 0, 0);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
return 0;
}
/*
* If this was the last connection in the session and one of the
* following is occurring:
*
* Session Reinstatement is not being performed, and are falling back
* to ERL=0 call iscsit_close_session().
*
* Session Logout was requested. iscsit_close_session() will be called
* elsewhere.
*
* Session Continuation is not being performed, start the Time2Retain
* handler and check if sleep_on_sess_wait_sem is active.
*/
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
complete_all(&sess->session_wait_comp);
iscsit_close_session(sess, true);
return 0;
} else if (atomic_read(&sess->session_logout)) {
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
if (atomic_read(&sess->session_close)) {
spin_unlock_bh(&sess->conn_lock);
complete_all(&sess->session_wait_comp);
iscsit_close_session(sess, true);
} else {
spin_unlock_bh(&sess->conn_lock);
}
return 0;
} else {
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
if (!atomic_read(&sess->session_continuation))
iscsit_start_time2retain_handler(sess);
if (atomic_read(&sess->session_close)) {
spin_unlock_bh(&sess->conn_lock);
complete_all(&sess->session_wait_comp);
iscsit_close_session(sess, true);
} else {
spin_unlock_bh(&sess->conn_lock);
}
return 0;
}
}
/*
* If the iSCSI Session for the iSCSI Initiator Node exists,
* forcefully shutdown the iSCSI NEXUS.
*/
int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
{
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (atomic_read(&sess->nconn)) {
pr_err("%d connection(s) still exist for iSCSI session"
" to %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
BUG();
}
spin_lock_bh(&se_tpg->session_lock);
atomic_set(&sess->session_logout, 1);
atomic_set(&sess->session_reinstatement, 1);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
if (sess->sess_ops->ErrorRecoveryLevel == 2)
iscsit_free_connection_recovery_entries(sess);
/*
* transport_deregister_session_configfs() will clear the
* struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
* can be setting it again with __transport_register_session() in
* iscsi_post_login_handler() again after the iscsit_stop_session()
* completes in iscsi_np context.
*/
transport_deregister_session_configfs(sess->se_sess);
/*
* If any other processes are accessing this session pointer we must
* wait until they have completed. If we are in an interrupt (the
* time2retain handler) and contain and active session usage count we
* restart the timer and exit.
*/
if (iscsit_check_session_usage_count(sess, can_sleep)) {
atomic_set(&sess->session_logout, 0);
iscsit_start_time2retain_handler(sess);
return 0;
}
transport_deregister_session(sess->se_sess);
iscsit_free_all_ooo_cmdsns(sess);
spin_lock_bh(&se_tpg->session_lock);
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
pr_debug("Released iSCSI session from node: %s\n",
sess->sess_ops->InitiatorName);
tpg->nsessions--;
if (tpg->tpg_tiqn)
tpg->tpg_tiqn->tiqn_nsessions--;
pr_debug("Decremented number of active iSCSI Sessions on"
" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
ida_free(&sess_ida, sess->session_index);
kfree(sess->sess_ops);
sess->sess_ops = NULL;
spin_unlock_bh(&se_tpg->session_lock);
kfree(sess);
return 0;
}
static void iscsit_logout_post_handler_closesession(
struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
int sleep = 1;
/*
* Traditional iscsi/tcp will invoke this logic from TX thread
* context during session logout, so clear tx_thread_active and
* sleep if iscsit_close_connection() has not already occured.
*
* Since iser-target invokes this logic from it's own workqueue,
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
if (!sleep)
return;
}
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
atomic_set(&sess->session_close, 1);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
}
static void iscsit_logout_post_handler_samecid(
struct iscsit_conn *conn)
{
int sleep = 1;
if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
if (!sleep)
return;
}
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
iscsit_cause_connection_reinstatement(conn, sleep);
iscsit_dec_conn_usage_count(conn);
}
static void iscsit_logout_post_handler_diffcid(
struct iscsit_conn *conn,
u16 cid)
{
struct iscsit_conn *l_conn;
struct iscsit_session *sess = conn->sess;
bool conn_found = false;
if (!sess)
return;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
if (l_conn->cid == cid) {
iscsit_inc_conn_usage_count(l_conn);
conn_found = true;
break;
}
}
spin_unlock_bh(&sess->conn_lock);
if (!conn_found)
return;
if (l_conn->sock)
l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
spin_lock_bh(&l_conn->state_lock);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
spin_unlock_bh(&l_conn->state_lock);
iscsit_cause_connection_reinstatement(l_conn, 1);
iscsit_dec_conn_usage_count(l_conn);
}
/*
* Return of 0 causes the TX thread to restart.
*/
int iscsit_logout_post_handler(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
int ret = 0;
switch (cmd->logout_reason) {
case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
case ISCSI_LOGOUT_CLEANUP_FAILED:
default:
iscsit_logout_post_handler_closesession(conn);
break;
}
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
if (conn->cid == cmd->logout_cid) {
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
case ISCSI_LOGOUT_CLEANUP_FAILED:
default:
iscsit_logout_post_handler_samecid(conn);
break;
}
} else {
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
iscsit_logout_post_handler_diffcid(conn,
cmd->logout_cid);
break;
case ISCSI_LOGOUT_CID_NOT_FOUND:
case ISCSI_LOGOUT_CLEANUP_FAILED:
default:
break;
}
ret = 1;
}
break;
case ISCSI_LOGOUT_REASON_RECOVERY:
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
case ISCSI_LOGOUT_CID_NOT_FOUND:
case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
case ISCSI_LOGOUT_CLEANUP_FAILED:
default:
break;
}
ret = 1;
break;
default:
break;
}
return ret;
}
EXPORT_SYMBOL(iscsit_logout_post_handler);
void iscsit_fail_session(struct iscsit_session *sess)
{
struct iscsit_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
}
spin_unlock_bh(&sess->conn_lock);
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
}
void iscsit_stop_session(
struct iscsit_session *sess,
int session_sleep,
int connection_sleep)
{
u16 conn_count = atomic_read(&sess->nconn);
struct iscsit_conn *conn, *conn_tmp = NULL;
int is_last;
spin_lock_bh(&sess->conn_lock);
if (connection_sleep) {
list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
conn_list) {
if (conn_count == 0)
break;
if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
is_last = 1;
} else {
iscsit_inc_conn_usage_count(conn_tmp);
is_last = 0;
}
iscsit_inc_conn_usage_count(conn);
spin_unlock_bh(&sess->conn_lock);
iscsit_cause_connection_reinstatement(conn, 1);
spin_lock_bh(&sess->conn_lock);
iscsit_dec_conn_usage_count(conn);
if (is_last == 0)
iscsit_dec_conn_usage_count(conn_tmp);
conn_count--;
}
} else {
list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
iscsit_cause_connection_reinstatement(conn, 0);
}
if (session_sleep && atomic_read(&sess->nconn)) {
spin_unlock_bh(&sess->conn_lock);
wait_for_completion(&sess->session_wait_comp);
} else
spin_unlock_bh(&sess->conn_lock);
}
int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
{
struct iscsit_session *sess;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
LIST_HEAD(free_list);
int session_count = 0;
spin_lock_bh(&se_tpg->session_lock);
if (tpg->nsessions && !force) {
spin_unlock_bh(&se_tpg->session_lock);
return -1;
}
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
continue;
}
iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);
}
spin_unlock_bh(&se_tpg->session_lock);
list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
list_del_init(&se_sess->sess_list);
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
session_count++;
}
pr_debug("Released %d iSCSI Session(s) from Target Portal"
" Group: %hu\n", session_count, tpg->tpgt);
return 0;
}
MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
MODULE_VERSION("4.1.x");
MODULE_AUTHOR("[email protected]");
MODULE_LICENSE("GPL");
module_init(iscsi_target_init_module);
module_exit(iscsi_target_cleanup_module);
|
linux-master
|
drivers/target/iscsi/iscsi_target.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the main functions related to Initiator Node Attributes.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <target/target_core_base.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target_nodeattrib.h"
static inline char *iscsit_na_get_initiatorname(
struct iscsi_node_acl *nacl)
{
struct se_node_acl *se_nacl = &nacl->se_node_acl;
return &se_nacl->initiatorname[0];
}
void iscsit_set_default_node_attribues(
struct iscsi_node_acl *acl,
struct iscsi_portal_group *tpg)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
a->authentication = NA_AUTHENTICATION_INHERITED;
a->dataout_timeout = NA_DATAOUT_TIMEOUT;
a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
a->nopin_timeout = NA_NOPIN_TIMEOUT;
a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
a->default_erl = tpg->tpg_attrib.default_erl;
}
int iscsit_na_dataout_timeout(
struct iscsi_node_acl *acl,
u32 dataout_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
pr_err("Requested DataOut Timeout %u larger than"
" maximum %u\n", dataout_timeout,
NA_DATAOUT_TIMEOUT_MAX);
return -EINVAL;
} else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
pr_err("Requested DataOut Timeout %u smaller than"
" minimum %u\n", dataout_timeout,
NA_DATAOUT_TIMEOUT_MIX);
return -EINVAL;
}
a->dataout_timeout = dataout_timeout;
pr_debug("Set DataOut Timeout to %u for Initiator Node"
" %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_dataout_timeout_retries(
struct iscsi_node_acl *acl,
u32 dataout_timeout_retries)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
pr_err("Requested DataOut Timeout Retries %u larger"
" than maximum %u", dataout_timeout_retries,
NA_DATAOUT_TIMEOUT_RETRIES_MAX);
return -EINVAL;
} else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
pr_err("Requested DataOut Timeout Retries %u smaller"
" than minimum %u", dataout_timeout_retries,
NA_DATAOUT_TIMEOUT_RETRIES_MIN);
return -EINVAL;
}
a->dataout_timeout_retries = dataout_timeout_retries;
pr_debug("Set DataOut Timeout Retries to %u for"
" Initiator Node %s\n", a->dataout_timeout_retries,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_nopin_timeout(
struct iscsi_node_acl *acl,
u32 nopin_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
struct iscsit_session *sess;
struct iscsit_conn *conn;
struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
struct se_session *se_sess;
u32 orig_nopin_timeout = a->nopin_timeout;
if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
pr_err("Requested NopIn Timeout %u larger than maximum"
" %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
return -EINVAL;
} else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
(nopin_timeout != 0)) {
pr_err("Requested NopIn Timeout %u smaller than"
" minimum %u and not 0\n", nopin_timeout,
NA_NOPIN_TIMEOUT_MIN);
return -EINVAL;
}
a->nopin_timeout = nopin_timeout;
pr_debug("Set NopIn Timeout to %u for Initiator"
" Node %s\n", a->nopin_timeout,
iscsit_na_get_initiatorname(acl));
/*
* Reenable disabled nopin_timeout timer for all iSCSI connections.
*/
if (!orig_nopin_timeout) {
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list,
conn_list) {
if (conn->conn_state !=
TARG_CONN_STATE_LOGGED_IN)
continue;
spin_lock(&conn->nopin_timer_lock);
__iscsit_start_nopin_timer(conn);
spin_unlock(&conn->nopin_timer_lock);
}
spin_unlock(&sess->conn_lock);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
}
return 0;
}
int iscsit_na_nopin_response_timeout(
struct iscsi_node_acl *acl,
u32 nopin_response_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
pr_err("Requested NopIn Response Timeout %u larger"
" than maximum %u\n", nopin_response_timeout,
NA_NOPIN_RESPONSE_TIMEOUT_MAX);
return -EINVAL;
} else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
pr_err("Requested NopIn Response Timeout %u smaller"
" than minimum %u\n", nopin_response_timeout,
NA_NOPIN_RESPONSE_TIMEOUT_MIN);
return -EINVAL;
}
a->nopin_response_timeout = nopin_response_timeout;
pr_debug("Set NopIn Response Timeout to %u for"
" Initiator Node %s\n", a->nopin_timeout,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_random_datain_pdu_offsets(
struct iscsi_node_acl *acl,
u32 random_datain_pdu_offsets)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
pr_err("Requested Random DataIN PDU Offsets: %u not"
" 0 or 1\n", random_datain_pdu_offsets);
return -EINVAL;
}
a->random_datain_pdu_offsets = random_datain_pdu_offsets;
pr_debug("Set Random DataIN PDU Offsets to %u for"
" Initiator Node %s\n", a->random_datain_pdu_offsets,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_random_datain_seq_offsets(
struct iscsi_node_acl *acl,
u32 random_datain_seq_offsets)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
pr_err("Requested Random DataIN Sequence Offsets: %u"
" not 0 or 1\n", random_datain_seq_offsets);
return -EINVAL;
}
a->random_datain_seq_offsets = random_datain_seq_offsets;
pr_debug("Set Random DataIN Sequence Offsets to %u for"
" Initiator Node %s\n", a->random_datain_seq_offsets,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_random_r2t_offsets(
struct iscsi_node_acl *acl,
u32 random_r2t_offsets)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
pr_err("Requested Random R2T Offsets: %u not"
" 0 or 1\n", random_r2t_offsets);
return -EINVAL;
}
a->random_r2t_offsets = random_r2t_offsets;
pr_debug("Set Random R2T Offsets to %u for"
" Initiator Node %s\n", a->random_r2t_offsets,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_default_erl(
struct iscsi_node_acl *acl,
u32 default_erl)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
pr_err("Requested default ERL: %u not 0, 1, or 2\n",
default_erl);
return -EINVAL;
}
a->default_erl = default_erl;
pr_debug("Set use ERL0 flag to %u for Initiator"
" Node %s\n", a->default_erl,
iscsit_na_get_initiatorname(acl));
return 0;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_nodeattrib.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains error recovery level two functions used by
* the iSCSI Target driver.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_datain_values.h"
#include "iscsi_target_util.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
/*
* FIXME: Does RData SNACK apply here as well?
*/
void iscsit_create_conn_recovery_datain_values(
struct iscsit_cmd *cmd,
__be32 exp_data_sn)
{
u32 data_sn = 0;
struct iscsit_conn *conn = cmd->conn;
cmd->next_burst_len = 0;
cmd->read_data_done = 0;
while (be32_to_cpu(exp_data_sn) > data_sn) {
if ((cmd->next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
cmd->read_data_done +=
conn->conn_ops->MaxRecvDataSegmentLength;
cmd->next_burst_len +=
conn->conn_ops->MaxRecvDataSegmentLength;
} else {
cmd->read_data_done +=
(conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len);
cmd->next_burst_len = 0;
}
data_sn++;
}
}
void iscsit_create_conn_recovery_dataout_values(
struct iscsit_cmd *cmd)
{
u32 write_data_done = 0;
struct iscsit_conn *conn = cmd->conn;
cmd->data_sn = 0;
cmd->next_burst_len = 0;
while (cmd->write_data_done > write_data_done) {
if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
cmd->write_data_done)
write_data_done += conn->sess->sess_ops->MaxBurstLength;
else
break;
}
cmd->write_data_done = write_data_done;
}
static int iscsit_attach_active_connection_recovery_entry(
struct iscsit_session *sess,
struct iscsi_conn_recovery *cr)
{
spin_lock(&sess->cr_a_lock);
list_add_tail(&cr->cr_list, &sess->cr_active_list);
spin_unlock(&sess->cr_a_lock);
return 0;
}
static int iscsit_attach_inactive_connection_recovery_entry(
struct iscsit_session *sess,
struct iscsi_conn_recovery *cr)
{
spin_lock(&sess->cr_i_lock);
list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
sess->conn_recovery_count++;
pr_debug("Incremented connection recovery count to %u for"
" SID: %u\n", sess->conn_recovery_count, sess->sid);
spin_unlock(&sess->cr_i_lock);
return 0;
}
struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsit_session *sess,
u16 cid)
{
struct iscsi_conn_recovery *cr;
spin_lock(&sess->cr_i_lock);
list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
if (cr->cid == cid) {
spin_unlock(&sess->cr_i_lock);
return cr;
}
}
spin_unlock(&sess->cr_i_lock);
return NULL;
}
void iscsit_free_connection_recovery_entries(struct iscsit_session *sess)
{
struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr, *cr_tmp;
spin_lock(&sess->cr_a_lock);
list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
list_del(&cr->cr_list);
spin_unlock(&sess->cr_a_lock);
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock(&sess->cr_a_lock);
kfree(cr);
}
spin_unlock(&sess->cr_a_lock);
spin_lock(&sess->cr_i_lock);
list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
list_del(&cr->cr_list);
spin_unlock(&sess->cr_i_lock);
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock(&sess->cr_i_lock);
kfree(cr);
}
spin_unlock(&sess->cr_i_lock);
}
int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
struct iscsit_session *sess)
{
spin_lock(&sess->cr_a_lock);
list_del(&cr->cr_list);
sess->conn_recovery_count--;
pr_debug("Decremented connection recovery count to %u for"
" SID: %u\n", sess->conn_recovery_count, sess->sid);
spin_unlock(&sess->cr_a_lock);
kfree(cr);
return 0;
}
static void iscsit_remove_inactive_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
struct iscsit_session *sess)
{
spin_lock(&sess->cr_i_lock);
list_del(&cr->cr_list);
spin_unlock(&sess->cr_i_lock);
}
/*
* Called with cr->conn_recovery_cmd_lock help.
*/
int iscsit_remove_cmd_from_connection_recovery(
struct iscsit_cmd *cmd,
struct iscsit_session *sess)
{
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
" is NULL!\n", cmd->init_task_tag);
BUG();
}
cr = cmd->cr;
list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
void iscsit_discard_cr_cmds_by_expstatsn(
struct iscsi_conn_recovery *cr,
u32 exp_statsn)
{
u32 dropped_count = 0;
struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsit_session *sess = cr->sess;
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
(cmd->deferred_i_state != ISTATE_REMOVE)) ||
(cmd->stat_sn >= exp_statsn)) {
continue;
}
dropped_count++;
pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
" 0x%08x, CID: %hu.\n", cmd->init_task_tag,
cmd->stat_sn, cr->cid);
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
pr_debug("Dropped %u total acknowledged commands on"
" CID: %hu less than old ExpStatSN: 0x%08x\n",
dropped_count, cr->cid, exp_statsn);
if (!cr->cmd_count) {
pr_debug("No commands to be reassigned for failed"
" connection CID: %hu on SID: %u\n",
cr->cid, sess->sid);
iscsit_remove_inactive_connection_recovery_entry(cr, sess);
iscsit_attach_active_connection_recovery_entry(sess, cr);
pr_debug("iSCSI connection recovery successful for CID:"
" %hu on SID: %u\n", cr->cid, sess->sid);
iscsit_remove_active_connection_recovery_entry(cr, sess);
} else {
iscsit_remove_inactive_connection_recovery_entry(cr, sess);
iscsit_attach_active_connection_recovery_entry(sess, cr);
}
}
int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *conn)
{
u32 dropped_count = 0;
struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
struct iscsit_session *sess = conn->sess;
mutex_lock(&sess->cmdsn_mutex);
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
&sess->sess_ooo_cmdsn_list, ooo_list) {
if (ooo_cmdsn->cid != conn->cid)
continue;
dropped_count++;
pr_debug("Dropping unacknowledged CmdSN:"
" 0x%08x during connection recovery on CID: %hu\n",
ooo_cmdsn->cmdsn, conn->cid);
iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
}
mutex_unlock(&sess->cmdsn_mutex);
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
pr_debug("Dropped %u total unacknowledged commands on CID:"
" %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
sess->exp_cmd_sn);
return 0;
}
int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *conn)
{
u32 cmd_count = 0;
struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr;
/*
* Allocate an struct iscsi_conn_recovery for this connection.
* Each struct iscsit_cmd contains an struct iscsi_conn_recovery pointer
* (struct iscsit_cmd->cr) so we need to allocate this before preparing the
* connection's command list for connection recovery.
*/
cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
if (!cr) {
pr_err("Unable to allocate memory for"
" struct iscsi_conn_recovery.\n");
return -1;
}
INIT_LIST_HEAD(&cr->cr_list);
INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
spin_lock_init(&cr->conn_recovery_cmd_lock);
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
* list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
* sending the TMR response.
*/
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
(cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
pr_debug("Not performing reallegiance on"
" Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
/*
* Special case where commands greater than or equal to
* the session's ExpCmdSN are attached to the connection
* list but not to the out of order CmdSN list. The one
* obvious case is when a command with immediate data
* attached must only check the CmdSN against ExpCmdSN
* after the data is received. The special case below
* is when the connection fails before data is received,
* but also may apply to other PDUs, so it has been
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
cmd_count++;
pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
" CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
" reallegiance.\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
conn->cid);
cmd->deferred_i_state = cmd->i_state;
cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
cmd->sess = conn->sess;
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
transport_wait_for_tasks(&cmd->se_cmd);
/*
* Add the struct iscsit_cmd to the connection recovery cmd list
*/
spin_lock(&cr->conn_recovery_cmd_lock);
list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock_bh(&conn->cmd_lock);
cmd->cr = cr;
cmd->conn = NULL;
}
spin_unlock_bh(&conn->cmd_lock);
/*
* Fill in the various values in the preallocated struct iscsi_conn_recovery.
*/
cr->cid = conn->cid;
cr->cmd_count = cmd_count;
cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
cr->maxxmitdatasegmentlength = conn->conn_ops->MaxXmitDataSegmentLength;
cr->sess = conn->sess;
iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
return 0;
}
int iscsit_connection_recovery_transport_reset(struct iscsit_conn *conn)
{
atomic_set(&conn->connection_recovery, 1);
if (iscsit_close_connection(conn) < 0)
return -1;
return 0;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_erl2.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/uio.h> /* struct kvec */
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_parameters.h"
int iscsi_login_rx_data(
struct iscsit_conn *conn,
char *buf,
int length)
{
int rx_got;
struct kvec iov;
memset(&iov, 0, sizeof(struct kvec));
iov.iov_len = length;
iov.iov_base = buf;
rx_got = rx_data(conn, &iov, 1, length);
if (rx_got != length) {
pr_err("rx_data returned %d, expecting %d.\n",
rx_got, length);
return -1;
}
return 0 ;
}
int iscsi_login_tx_data(
struct iscsit_conn *conn,
char *pdu_buf,
char *text_buf,
int text_length)
{
int length, tx_sent, iov_cnt = 1;
struct kvec iov[2];
length = (ISCSI_HDR_LEN + text_length);
memset(&iov[0], 0, 2 * sizeof(struct kvec));
iov[0].iov_len = ISCSI_HDR_LEN;
iov[0].iov_base = pdu_buf;
if (text_buf && text_length) {
iov[1].iov_len = text_length;
iov[1].iov_base = text_buf;
iov_cnt++;
}
tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
if (tx_sent != length) {
pr_err("tx_data returned %d, expecting %d.\n",
tx_sent, length);
return -1;
}
return 0;
}
void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
{
pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
"CRC32C" : "None");
pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
"CRC32C" : "None");
pr_debug("MaxRecvDataSegmentLength: %u\n",
conn_ops->MaxRecvDataSegmentLength);
}
void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
{
pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
pr_debug("TargetName: %s\n", sess_ops->TargetName);
pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
pr_debug("TargetPortalGroupTag: %hu\n",
sess_ops->TargetPortalGroupTag);
pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
pr_debug("InitialR2T: %s\n",
(sess_ops->InitialR2T) ? "Yes" : "No");
pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
"Yes" : "No");
pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
pr_debug("DefaultTime2Retain: %hu\n",
sess_ops->DefaultTime2Retain);
pr_debug("MaxOutstandingR2T: %hu\n",
sess_ops->MaxOutstandingR2T);
pr_debug("DataPDUInOrder: %s\n",
(sess_ops->DataPDUInOrder) ? "Yes" : "No");
pr_debug("DataSequenceInOrder: %s\n",
(sess_ops->DataSequenceInOrder) ? "Yes" : "No");
pr_debug("ErrorRecoveryLevel: %hu\n",
sess_ops->ErrorRecoveryLevel);
pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
"Discovery" : "Normal");
}
void iscsi_print_params(struct iscsi_param_list *param_list)
{
struct iscsi_param *param;
list_for_each_entry(param, ¶m_list->param_list, p_list)
pr_debug("%s: %s\n", param->name, param->value);
}
static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
char *name, char *value, u8 phase, u8 scope, u8 sender,
u16 type_range, u8 use)
{
struct iscsi_param *param = NULL;
param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
if (!param) {
pr_err("Unable to allocate memory for parameter.\n");
goto out;
}
INIT_LIST_HEAD(¶m->p_list);
param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n");
goto out;
}
param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n");
goto out;
}
param->phase = phase;
param->scope = scope;
param->sender = sender;
param->use = use;
param->type_range = type_range;
switch (param->type_range) {
case TYPERANGE_BOOL_AND:
param->type = TYPE_BOOL_AND;
break;
case TYPERANGE_BOOL_OR:
param->type = TYPE_BOOL_OR;
break;
case TYPERANGE_0_TO_2:
case TYPERANGE_0_TO_3600:
case TYPERANGE_0_TO_32767:
case TYPERANGE_0_TO_65535:
case TYPERANGE_1_TO_65535:
case TYPERANGE_2_TO_3600:
case TYPERANGE_512_TO_16777215:
param->type = TYPE_NUMBER;
break;
case TYPERANGE_AUTH:
case TYPERANGE_DIGEST:
param->type = TYPE_VALUE_LIST | TYPE_STRING;
break;
case TYPERANGE_ISCSINAME:
case TYPERANGE_SESSIONTYPE:
case TYPERANGE_TARGETADDRESS:
case TYPERANGE_UTF8:
param->type = TYPE_STRING;
break;
default:
pr_err("Unknown type_range 0x%02x\n",
param->type_range);
goto out;
}
list_add_tail(¶m->p_list, ¶m_list->param_list);
return param;
out:
if (param) {
kfree(param->value);
kfree(param->name);
kfree(param);
}
return NULL;
}
/* #warning Add extension keys */
int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
{
struct iscsi_param *param = NULL;
struct iscsi_param_list *pl;
pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
if (!pl) {
pr_err("Unable to allocate memory for"
" struct iscsi_param_list.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&pl->param_list);
INIT_LIST_HEAD(&pl->extra_response_list);
/*
* The format for setting the initial parameter definitions are:
*
* Parameter name:
* Initial value:
* Allowable phase:
* Scope:
* Allowable senders:
* Typerange:
* Use:
*/
param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_AUTH, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_DIGEST, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_DIGEST, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, MAXCONNECTIONS,
INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
TYPERANGE_UTF8, 0);
if (!param)
goto out;
param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_ISCSINAME, USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, INITIATORNAME,
INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
SCOPE_SESSION_WIDE, SENDER_INITIATOR,
TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
TYPERANGE_UTF8, USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, INITIATORALIAS,
INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, TARGETADDRESS,
INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
SCOPE_SESSION_WIDE, SENDER_TARGET,
TYPERANGE_TARGETADDRESS, USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
INITIAL_TARGETPORTALGROUPTAG,
PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, IMMEDIATEDATA,
INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, MAXXMITDATASEGMENTLENGTH,
INITIAL_MAXXMITDATASEGMENTLENGTH,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_512_TO_16777215, USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
INITIAL_MAXRECVDATASEGMENTLENGTH,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_512_TO_16777215, USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
INITIAL_FIRSTBURSTLENGTH,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
INITIAL_DEFAULTTIME2WAIT,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
INITIAL_DEFAULTTIME2RETAIN,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
INITIAL_MAXOUTSTANDINGR2T,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, DATAPDUINORDER,
INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
INITIAL_DATASEQUENCEINORDER,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
INITIAL_ERRORRECOVERYLEVEL,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_0_TO_2, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
/*
* Extra parameters for ISER from RFC-5046
*/
param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH,
INITIAL_INITIATORRECVDATASEGMENTLENGTH,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_512_TO_16777215, USE_ALL);
if (!param)
goto out;
param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH,
INITIAL_TARGETRECVDATASEGMENTLENGTH,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
TYPERANGE_512_TO_16777215, USE_ALL);
if (!param)
goto out;
*param_list_ptr = pl;
return 0;
out:
iscsi_release_param_list(pl);
return -1;
}
int iscsi_set_keys_to_negotiate(
struct iscsi_param_list *param_list,
bool iser)
{
struct iscsi_param *param;
param_list->iser = iser;
list_for_each_entry(param, ¶m_list->param_list, p_list) {
param->state = 0;
if (!strcmp(param->name, AUTHMETHOD)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, HEADERDIGEST)) {
if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATADIGEST)) {
if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXCONNECTIONS)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, TARGETNAME)) {
continue;
} else if (!strcmp(param->name, INITIATORNAME)) {
continue;
} else if (!strcmp(param->name, TARGETALIAS)) {
if (param->value)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORALIAS)) {
continue;
} else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIALR2T)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
continue;
} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATAPDUINORDER)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, SESSIONTYPE)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, IFMARKER)) {
SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKER)) {
SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, IFMARKINT)) {
SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKINT)) {
SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
if (iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
if (iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
if (iser)
SET_PSTATE_NEGOTIATE(param);
}
}
return 0;
}
int iscsi_set_keys_irrelevant_for_discovery(
struct iscsi_param_list *param_list)
{
struct iscsi_param *param;
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (!strcmp(param->name, MAXCONNECTIONS))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, INITIALR2T))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, IMMEDIATEDATA))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, MAXBURSTLENGTH))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, FIRSTBURSTLENGTH))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, DATAPDUINORDER))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, DATASEQUENCEINORDER))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, DEFAULTTIME2WAIT))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, IFMARKER))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, OFMARKER))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, IFMARKINT))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, OFMARKINT))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, RDMAEXTENSIONS))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH))
param->state &= ~PSTATE_NEGOTIATE;
}
return 0;
}
int iscsi_copy_param_list(
struct iscsi_param_list **dst_param_list,
struct iscsi_param_list *src_param_list,
int leading)
{
struct iscsi_param *param = NULL;
struct iscsi_param *new_param = NULL;
struct iscsi_param_list *param_list = NULL;
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
if (!param_list) {
pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(¶m_list->param_list);
INIT_LIST_HEAD(¶m_list->extra_response_list);
list_for_each_entry(param, &src_param_list->param_list, p_list) {
if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
if ((strcmp(param->name, "TargetName") != 0) &&
(strcmp(param->name, "InitiatorName") != 0) &&
(strcmp(param->name, "TargetPortalGroupTag") != 0))
continue;
}
new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
if (!new_param) {
pr_err("Unable to allocate memory for struct iscsi_param.\n");
goto err_out;
}
new_param->name = kstrdup(param->name, GFP_KERNEL);
new_param->value = kstrdup(param->value, GFP_KERNEL);
if (!new_param->value || !new_param->name) {
kfree(new_param->value);
kfree(new_param->name);
kfree(new_param);
pr_err("Unable to allocate memory for parameter name/value.\n");
goto err_out;
}
new_param->set_param = param->set_param;
new_param->phase = param->phase;
new_param->scope = param->scope;
new_param->sender = param->sender;
new_param->type = param->type;
new_param->use = param->use;
new_param->type_range = param->type_range;
list_add_tail(&new_param->p_list, ¶m_list->param_list);
}
if (!list_empty(¶m_list->param_list)) {
*dst_param_list = param_list;
} else {
pr_err("No parameters allocated.\n");
goto err_out;
}
return 0;
err_out:
iscsi_release_param_list(param_list);
return -ENOMEM;
}
static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
{
struct iscsi_extra_response *er, *er_tmp;
list_for_each_entry_safe(er, er_tmp, ¶m_list->extra_response_list,
er_list) {
list_del(&er->er_list);
kfree(er);
}
}
void iscsi_release_param_list(struct iscsi_param_list *param_list)
{
struct iscsi_param *param, *param_tmp;
list_for_each_entry_safe(param, param_tmp, ¶m_list->param_list,
p_list) {
list_del(¶m->p_list);
kfree(param->name);
kfree(param->value);
kfree(param);
}
iscsi_release_extra_responses(param_list);
kfree(param_list);
}
struct iscsi_param *iscsi_find_param_from_key(
char *key,
struct iscsi_param_list *param_list)
{
struct iscsi_param *param;
if (!key || !param_list) {
pr_err("Key or parameter list pointer is NULL.\n");
return NULL;
}
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (!strcmp(key, param->name))
return param;
}
pr_err("Unable to locate key \"%s\".\n", key);
return NULL;
}
EXPORT_SYMBOL(iscsi_find_param_from_key);
int iscsi_extract_key_value(char *textbuf, char **key, char **value)
{
*value = strchr(textbuf, '=');
if (!*value) {
pr_err("Unable to locate \"=\" separator for key,"
" ignoring request.\n");
return -1;
}
*key = textbuf;
**value = '\0';
*value = *value + 1;
return 0;
}
int iscsi_update_param_value(struct iscsi_param *param, char *value)
{
kfree(param->value);
param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
return -ENOMEM;
}
pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value);
return 0;
}
static int iscsi_add_notunderstood_response(
char *key,
char *value,
struct iscsi_param_list *param_list)
{
struct iscsi_extra_response *extra_response;
if (strlen(value) > VALUE_MAXLEN) {
pr_err("Value for notunderstood key \"%s\" exceeds %d,"
" protocol error.\n", key, VALUE_MAXLEN);
return -1;
}
extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
if (!extra_response) {
pr_err("Unable to allocate memory for"
" struct iscsi_extra_response.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&extra_response->er_list);
strscpy(extra_response->key, key, sizeof(extra_response->key));
strscpy(extra_response->value, NOTUNDERSTOOD,
sizeof(extra_response->value));
list_add_tail(&extra_response->er_list,
¶m_list->extra_response_list);
return 0;
}
static int iscsi_check_for_auth_key(char *key)
{
/*
* RFC 1994
*/
if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
!strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
!strcmp(key, "CHAP_R"))
return 1;
/*
* RFC 2945
*/
if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
!strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
!strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
!strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
return 1;
return 0;
}
static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
bool keys_workaround)
{
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(param->value, NO))
SET_PSTATE_REPLY_OPTIONAL(param);
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(param->value, YES))
SET_PSTATE_REPLY_OPTIONAL(param);
if (keys_workaround) {
/*
* Required for gPXE iSCSI boot client
*/
if (!strcmp(param->name, IMMEDIATEDATA))
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
if (keys_workaround) {
/*
* Required for Mellanox Flexboot PXE boot ROM
*/
if (!strcmp(param->name, FIRSTBURSTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
* Required for gPXE iSCSI boot client
*/
if (!strcmp(param->name, MAXCONNECTIONS))
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_PHASE_DECLARATIVE(param))
SET_PSTATE_REPLY_OPTIONAL(param);
}
static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
{
if (strcmp(value, YES) && strcmp(value, NO)) {
pr_err("Illegal value for \"%s\", must be either"
" \"%s\" or \"%s\".\n", param->name, YES, NO);
return -1;
}
return 0;
}
static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
{
char *tmpptr;
int value = 0;
value = simple_strtoul(value_ptr, &tmpptr, 0);
if (IS_TYPERANGE_0_TO_2(param)) {
if ((value < 0) || (value > 2)) {
pr_err("Illegal value for \"%s\", must be"
" between 0 and 2.\n", param->name);
return -1;
}
return 0;
}
if (IS_TYPERANGE_0_TO_3600(param)) {
if ((value < 0) || (value > 3600)) {
pr_err("Illegal value for \"%s\", must be"
" between 0 and 3600.\n", param->name);
return -1;
}
return 0;
}
if (IS_TYPERANGE_0_TO_32767(param)) {
if ((value < 0) || (value > 32767)) {
pr_err("Illegal value for \"%s\", must be"
" between 0 and 32767.\n", param->name);
return -1;
}
return 0;
}
if (IS_TYPERANGE_0_TO_65535(param)) {
if ((value < 0) || (value > 65535)) {
pr_err("Illegal value for \"%s\", must be"
" between 0 and 65535.\n", param->name);
return -1;
}
return 0;
}
if (IS_TYPERANGE_1_TO_65535(param)) {
if ((value < 1) || (value > 65535)) {
pr_err("Illegal value for \"%s\", must be"
" between 1 and 65535.\n", param->name);
return -1;
}
return 0;
}
if (IS_TYPERANGE_2_TO_3600(param)) {
if ((value < 2) || (value > 3600)) {
pr_err("Illegal value for \"%s\", must be"
" between 2 and 3600.\n", param->name);
return -1;
}
return 0;
}
if (IS_TYPERANGE_512_TO_16777215(param)) {
if ((value < 512) || (value > 16777215)) {
pr_err("Illegal value for \"%s\", must be"
" between 512 and 16777215.\n", param->name);
return -1;
}
return 0;
}
return 0;
}
static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
{
if (IS_PSTATE_PROPOSER(param))
return 0;
if (IS_TYPERANGE_AUTH_PARAM(param)) {
if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
strcmp(value, SPKM2) && strcmp(value, SRP) &&
strcmp(value, CHAP) && strcmp(value, NONE)) {
pr_err("Illegal value for \"%s\", must be"
" \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
" or \"%s\".\n", param->name, KRB5,
SPKM1, SPKM2, SRP, CHAP, NONE);
return -1;
}
}
if (IS_TYPERANGE_DIGEST_PARAM(param)) {
if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
pr_err("Illegal value for \"%s\", must be"
" \"%s\" or \"%s\".\n", param->name,
CRC32C, NONE);
return -1;
}
}
if (IS_TYPERANGE_SESSIONTYPE(param)) {
if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
pr_err("Illegal value for \"%s\", must be"
" \"%s\" or \"%s\".\n", param->name,
DISCOVERY, NORMAL);
return -1;
}
}
return 0;
}
static char *iscsi_check_valuelist_for_support(
struct iscsi_param *param,
char *value)
{
char *tmp1 = NULL, *tmp2 = NULL;
char *acceptor_values = NULL, *proposer_values = NULL;
acceptor_values = param->value;
proposer_values = value;
do {
if (!proposer_values)
return NULL;
tmp1 = strchr(proposer_values, ',');
if (tmp1)
*tmp1 = '\0';
acceptor_values = param->value;
do {
if (!acceptor_values) {
if (tmp1)
*tmp1 = ',';
return NULL;
}
tmp2 = strchr(acceptor_values, ',');
if (tmp2)
*tmp2 = '\0';
if (!strcmp(acceptor_values, proposer_values)) {
if (tmp2)
*tmp2 = ',';
goto out;
}
if (tmp2)
*tmp2++ = ',';
acceptor_values = tmp2;
} while (acceptor_values);
if (tmp1)
*tmp1++ = ',';
proposer_values = tmp1;
} while (proposer_values);
out:
return proposer_values;
}
static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
struct iscsit_conn *conn)
{
u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
char *negotiated_value = NULL;
if (IS_PSTATE_ACCEPTOR(param)) {
pr_err("Received key \"%s\" twice, protocol error.\n",
param->name);
return -1;
}
if (IS_PSTATE_REJECT(param))
return 0;
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(value, YES))
proposer_boolean_value = 1;
if (!strcmp(param->value, YES))
acceptor_boolean_value = 1;
if (acceptor_boolean_value && proposer_boolean_value)
do {} while (0);
else {
if (iscsi_update_param_value(param, NO) < 0)
return -1;
if (!proposer_boolean_value)
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(value, YES))
proposer_boolean_value = 1;
if (!strcmp(param->value, YES))
acceptor_boolean_value = 1;
if (acceptor_boolean_value || proposer_boolean_value) {
if (iscsi_update_param_value(param, YES) < 0)
return -1;
if (proposer_boolean_value)
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_TYPE_NUMBER(param)) {
char *tmpptr, buf[11];
u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
memset(buf, 0, sizeof(buf));
if (!strcmp(param->name, MAXCONNECTIONS) ||
!strcmp(param->name, MAXBURSTLENGTH) ||
!strcmp(param->name, FIRSTBURSTLENGTH) ||
!strcmp(param->name, MAXOUTSTANDINGR2T) ||
!strcmp(param->name, DEFAULTTIME2RETAIN) ||
!strcmp(param->name, ERRORRECOVERYLEVEL)) {
if (proposer_value > acceptor_value) {
sprintf(buf, "%u", acceptor_value);
if (iscsi_update_param_value(param,
&buf[0]) < 0)
return -1;
} else {
if (iscsi_update_param_value(param, value) < 0)
return -1;
}
} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
if (acceptor_value > proposer_value) {
sprintf(buf, "%u", acceptor_value);
if (iscsi_update_param_value(param,
&buf[0]) < 0)
return -1;
} else {
if (iscsi_update_param_value(param, value) < 0)
return -1;
}
} else {
if (iscsi_update_param_value(param, value) < 0)
return -1;
}
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
struct iscsi_param *param_mxdsl;
unsigned long long tmp;
int rc;
rc = kstrtoull(param->value, 0, &tmp);
if (rc < 0)
return -1;
conn->conn_ops->MaxRecvDataSegmentLength = tmp;
pr_debug("Saving op->MaxRecvDataSegmentLength from"
" original initiator received value: %u\n",
conn->conn_ops->MaxRecvDataSegmentLength);
param_mxdsl = iscsi_find_param_from_key(
MAXXMITDATASEGMENTLENGTH,
conn->param_list);
if (!param_mxdsl)
return -1;
rc = iscsi_update_param_value(param,
param_mxdsl->value);
if (rc < 0)
return -1;
pr_debug("Updated %s to target MXDSL value: %s\n",
param->name, param->value);
}
} else if (IS_TYPE_VALUE_LIST(param)) {
negotiated_value = iscsi_check_valuelist_for_support(
param, value);
if (!negotiated_value) {
pr_err("Proposer's value list \"%s\" contains"
" no valid values from Acceptor's value list"
" \"%s\".\n", value, param->value);
return -1;
}
if (iscsi_update_param_value(param, negotiated_value) < 0)
return -1;
} else if (IS_PHASE_DECLARATIVE(param)) {
if (iscsi_update_param_value(param, value) < 0)
return -1;
SET_PSTATE_REPLY_OPTIONAL(param);
}
return 0;
}
static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
{
if (IS_PSTATE_RESPONSE_GOT(param)) {
pr_err("Received key \"%s\" twice, protocol error.\n",
param->name);
return -1;
}
if (IS_TYPE_VALUE_LIST(param)) {
char *comma_ptr = NULL, *tmp_ptr = NULL;
comma_ptr = strchr(value, ',');
if (comma_ptr) {
pr_err("Illegal \",\" in response for \"%s\".\n",
param->name);
return -1;
}
tmp_ptr = iscsi_check_valuelist_for_support(param, value);
if (!tmp_ptr)
return -1;
}
if (iscsi_update_param_value(param, value) < 0)
return -1;
return 0;
}
static int iscsi_check_value(struct iscsi_param *param, char *value)
{
char *comma_ptr = NULL;
if (!strcmp(value, REJECT)) {
if (!strcmp(param->name, IFMARKINT) ||
!strcmp(param->name, OFMARKINT)) {
/*
* Reject is not fatal for [I,O]FMarkInt, and causes
* [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
*/
SET_PSTATE_REJECT(param);
return 0;
}
pr_err("Received %s=%s\n", param->name, value);
return -1;
}
if (!strcmp(value, IRRELEVANT)) {
pr_debug("Received %s=%s\n", param->name, value);
SET_PSTATE_IRRELEVANT(param);
return 0;
}
if (!strcmp(value, NOTUNDERSTOOD)) {
if (!IS_PSTATE_PROPOSER(param)) {
pr_err("Received illegal offer %s=%s\n",
param->name, value);
return -1;
}
/* #warning FIXME: Add check for X-ExtensionKey here */
pr_err("Standard iSCSI key \"%s\" cannot be answered"
" with \"%s\", protocol error.\n", param->name, value);
return -1;
}
do {
comma_ptr = NULL;
comma_ptr = strchr(value, ',');
if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
pr_err("Detected value separator \",\", but"
" key \"%s\" does not allow a value list,"
" protocol error.\n", param->name);
return -1;
}
if (comma_ptr)
*comma_ptr = '\0';
if (strlen(value) > VALUE_MAXLEN) {
pr_err("Value for key \"%s\" exceeds %d,"
" protocol error.\n", param->name,
VALUE_MAXLEN);
return -1;
}
if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
if (iscsi_check_boolean_value(param, value) < 0)
return -1;
} else if (IS_TYPE_NUMBER(param)) {
if (iscsi_check_numerical_value(param, value) < 0)
return -1;
} else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
if (iscsi_check_string_or_list_value(param, value) < 0)
return -1;
} else {
pr_err("Huh? 0x%02x\n", param->type);
return -1;
}
if (comma_ptr)
*comma_ptr++ = ',';
value = comma_ptr;
} while (value);
return 0;
}
static struct iscsi_param *__iscsi_check_key(
char *key,
int sender,
struct iscsi_param_list *param_list)
{
struct iscsi_param *param;
if (strlen(key) > KEY_MAXLEN) {
pr_err("Length of key name \"%s\" exceeds %d.\n",
key, KEY_MAXLEN);
return NULL;
}
param = iscsi_find_param_from_key(key, param_list);
if (!param)
return NULL;
if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
pr_err("Key \"%s\" may not be sent to %s,"
" protocol error.\n", param->name,
(sender & SENDER_RECEIVER) ? "target" : "initiator");
return NULL;
}
if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
pr_err("Key \"%s\" may not be sent to %s,"
" protocol error.\n", param->name,
(sender & SENDER_RECEIVER) ? "initiator" : "target");
return NULL;
}
return param;
}
static struct iscsi_param *iscsi_check_key(
char *key,
int phase,
int sender,
struct iscsi_param_list *param_list)
{
struct iscsi_param *param;
/*
* Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
*/
if (strlen(key) > KEY_MAXLEN) {
pr_err("Length of key name \"%s\" exceeds %d.\n",
key, KEY_MAXLEN);
return NULL;
}
param = iscsi_find_param_from_key(key, param_list);
if (!param)
return NULL;
if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
pr_err("Key \"%s\" may not be sent to %s,"
" protocol error.\n", param->name,
(sender & SENDER_RECEIVER) ? "target" : "initiator");
return NULL;
}
if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
pr_err("Key \"%s\" may not be sent to %s,"
" protocol error.\n", param->name,
(sender & SENDER_RECEIVER) ? "initiator" : "target");
return NULL;
}
if (IS_PSTATE_ACCEPTOR(param)) {
pr_err("Key \"%s\" received twice, protocol error.\n",
key);
return NULL;
}
if (!phase)
return param;
if (!(param->phase & phase)) {
char *phase_name;
switch (phase) {
case PHASE_SECURITY:
phase_name = "Security";
break;
case PHASE_OPERATIONAL:
phase_name = "Operational";
break;
default:
phase_name = "Unknown";
}
pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
param->name, phase_name);
return NULL;
}
return param;
}
static int iscsi_enforce_integrity_rules(
u8 phase,
struct iscsi_param_list *param_list)
{
char *tmpptr;
u8 DataSequenceInOrder = 0;
u8 ErrorRecoveryLevel = 0, SessionType = 0;
u32 FirstBurstLength = 0, MaxBurstLength = 0;
struct iscsi_param *param = NULL;
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (!(param->phase & phase))
continue;
if (!strcmp(param->name, SESSIONTYPE))
if (!strcmp(param->value, NORMAL))
SessionType = 1;
if (!strcmp(param->name, ERRORRECOVERYLEVEL))
ErrorRecoveryLevel = simple_strtoul(param->value,
&tmpptr, 0);
if (!strcmp(param->name, DATASEQUENCEINORDER))
if (!strcmp(param->value, YES))
DataSequenceInOrder = 1;
if (!strcmp(param->name, MAXBURSTLENGTH))
MaxBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
}
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (!(param->phase & phase))
continue;
if (!SessionType && !IS_PSTATE_ACCEPTOR(param))
continue;
if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
if (strcmp(param->value, "1")) {
if (iscsi_update_param_value(param, "1") < 0)
return -1;
pr_debug("Reset \"%s\" to \"%s\".\n",
param->name, param->value);
}
}
if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
if (strcmp(param->value, "1")) {
if (iscsi_update_param_value(param, "1") < 0)
return -1;
pr_debug("Reset \"%s\" to \"%s\".\n",
param->name, param->value);
}
}
if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
FirstBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
if (FirstBurstLength > MaxBurstLength) {
char tmpbuf[11];
memset(tmpbuf, 0, sizeof(tmpbuf));
sprintf(tmpbuf, "%u", MaxBurstLength);
if (iscsi_update_param_value(param, tmpbuf))
return -1;
pr_debug("Reset \"%s\" to \"%s\".\n",
param->name, param->value);
}
}
}
return 0;
}
int iscsi_decode_text_input(
u8 phase,
u8 sender,
char *textbuf,
u32 length,
struct iscsit_conn *conn)
{
struct iscsi_param_list *param_list = conn->param_list;
char *tmpbuf, *start = NULL, *end = NULL;
tmpbuf = kmemdup_nul(textbuf, length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -ENOMEM;
}
start = tmpbuf;
end = (start + length);
while (start < end) {
char *key, *value;
struct iscsi_param *param;
if (iscsi_extract_key_value(start, &key, &value) < 0)
goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
if (phase & PHASE_SECURITY) {
if (iscsi_check_for_auth_key(key) > 0) {
kfree(tmpbuf);
return 1;
}
}
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
if (iscsi_add_notunderstood_response(key, value,
param_list) < 0)
goto free_buffer;
start += strlen(key) + strlen(value) + 2;
continue;
}
if (iscsi_check_value(param, value) < 0)
goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
if (iscsi_check_proposer_state(param, value) < 0)
goto free_buffer;
SET_PSTATE_RESPONSE_GOT(param);
} else {
if (iscsi_check_acceptor_state(param, value, conn) < 0)
goto free_buffer;
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
free_buffer:
kfree(tmpbuf);
return -1;
}
int iscsi_encode_text_output(
u8 phase,
u8 sender,
char *textbuf,
u32 *length,
struct iscsi_param_list *param_list,
bool keys_workaround)
{
char *output_buf = NULL;
struct iscsi_extra_response *er;
struct iscsi_param *param;
output_buf = textbuf + *length;
if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
return -1;
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (!(param->sender & sender))
continue;
if (IS_PSTATE_ACCEPTOR(param) &&
!IS_PSTATE_RESPONSE_SENT(param) &&
!IS_PSTATE_REPLY_OPTIONAL(param) &&
(param->phase & phase)) {
*length += sprintf(output_buf, "%s=%s",
param->name, param->value);
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_RESPONSE_SENT(param);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
continue;
}
if (IS_PSTATE_NEGOTIATE(param) &&
!IS_PSTATE_ACCEPTOR(param) &&
!IS_PSTATE_PROPOSER(param) &&
(param->phase & phase)) {
*length += sprintf(output_buf, "%s=%s",
param->name, param->value);
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_PROPOSER(param);
iscsi_check_proposer_for_optional_reply(param,
keys_workaround);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
}
}
list_for_each_entry(er, ¶m_list->extra_response_list, er_list) {
*length += sprintf(output_buf, "%s=%s", er->key, er->value);
*length += 1;
output_buf = textbuf + *length;
pr_debug("Sending key: %s=%s\n", er->key, er->value);
}
iscsi_release_extra_responses(param_list);
return 0;
}
int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
{
int ret = 0;
struct iscsi_param *param;
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (IS_PSTATE_NEGOTIATE(param) &&
IS_PSTATE_PROPOSER(param) &&
!IS_PSTATE_RESPONSE_GOT(param) &&
!IS_PSTATE_REPLY_OPTIONAL(param) &&
!IS_PHASE_DECLARATIVE(param)) {
pr_err("No response for proposed key \"%s\".\n",
param->name);
ret = -1;
}
}
return ret;
}
int iscsi_change_param_value(
char *keyvalue,
struct iscsi_param_list *param_list,
int check_key)
{
char *key = NULL, *value = NULL;
struct iscsi_param *param;
int sender = 0;
if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
return -1;
if (!check_key) {
param = __iscsi_check_key(keyvalue, sender, param_list);
if (!param)
return -1;
} else {
param = iscsi_check_key(keyvalue, 0, sender, param_list);
if (!param)
return -1;
param->set_param = 1;
if (iscsi_check_value(param, value) < 0) {
param->set_param = 0;
return -1;
}
param->set_param = 0;
}
if (iscsi_update_param_value(param, value) < 0)
return -1;
return 0;
}
void iscsi_set_connection_parameters(
struct iscsi_conn_ops *ops,
struct iscsi_param_list *param_list)
{
char *tmpptr;
struct iscsi_param *param;
pr_debug("---------------------------------------------------"
"---------------\n");
list_for_each_entry(param, ¶m_list->param_list, p_list) {
/*
* Special case to set MAXXMITDATASEGMENTLENGTH from the
* target requested MaxRecvDataSegmentLength, even though
* this key is not sent over the wire.
*/
if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
ops->MaxXmitDataSegmentLength =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("MaxXmitDataSegmentLength: %s\n",
param->value);
}
if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
continue;
if (!strcmp(param->name, AUTHMETHOD)) {
pr_debug("AuthMethod: %s\n",
param->value);
} else if (!strcmp(param->name, HEADERDIGEST)) {
ops->HeaderDigest = !strcmp(param->value, CRC32C);
pr_debug("HeaderDigest: %s\n",
param->value);
} else if (!strcmp(param->name, DATADIGEST)) {
ops->DataDigest = !strcmp(param->value, CRC32C);
pr_debug("DataDigest: %s\n",
param->value);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
/*
* At this point iscsi_check_acceptor_state() will have
* set ops->MaxRecvDataSegmentLength from the original
* initiator provided value.
*/
pr_debug("MaxRecvDataSegmentLength: %u\n",
ops->MaxRecvDataSegmentLength);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
ops->InitiatorRecvDataSegmentLength =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("InitiatorRecvDataSegmentLength: %s\n",
param->value);
ops->MaxRecvDataSegmentLength =
ops->InitiatorRecvDataSegmentLength;
pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n");
} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
ops->TargetRecvDataSegmentLength =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("TargetRecvDataSegmentLength: %s\n",
param->value);
ops->MaxXmitDataSegmentLength =
ops->TargetRecvDataSegmentLength;
pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n");
}
}
pr_debug("----------------------------------------------------"
"--------------\n");
}
void iscsi_set_session_parameters(
struct iscsi_sess_ops *ops,
struct iscsi_param_list *param_list,
int leading)
{
char *tmpptr;
struct iscsi_param *param;
pr_debug("----------------------------------------------------"
"--------------\n");
list_for_each_entry(param, ¶m_list->param_list, p_list) {
if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
continue;
if (!strcmp(param->name, INITIATORNAME)) {
if (!param->value)
continue;
if (leading)
snprintf(ops->InitiatorName,
sizeof(ops->InitiatorName),
"%s", param->value);
pr_debug("InitiatorName: %s\n",
param->value);
} else if (!strcmp(param->name, INITIATORALIAS)) {
if (!param->value)
continue;
snprintf(ops->InitiatorAlias,
sizeof(ops->InitiatorAlias),
"%s", param->value);
pr_debug("InitiatorAlias: %s\n",
param->value);
} else if (!strcmp(param->name, TARGETNAME)) {
if (!param->value)
continue;
if (leading)
snprintf(ops->TargetName,
sizeof(ops->TargetName),
"%s", param->value);
pr_debug("TargetName: %s\n",
param->value);
} else if (!strcmp(param->name, TARGETALIAS)) {
if (!param->value)
continue;
snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
"%s", param->value);
pr_debug("TargetAlias: %s\n",
param->value);
} else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
ops->TargetPortalGroupTag =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("TargetPortalGroupTag: %s\n",
param->value);
} else if (!strcmp(param->name, MAXCONNECTIONS)) {
ops->MaxConnections =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("MaxConnections: %s\n",
param->value);
} else if (!strcmp(param->name, INITIALR2T)) {
ops->InitialR2T = !strcmp(param->value, YES);
pr_debug("InitialR2T: %s\n",
param->value);
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
ops->ImmediateData = !strcmp(param->value, YES);
pr_debug("ImmediateData: %s\n",
param->value);
} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
ops->MaxBurstLength =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("MaxBurstLength: %s\n",
param->value);
} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
ops->FirstBurstLength =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("FirstBurstLength: %s\n",
param->value);
} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
ops->DefaultTime2Wait =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("DefaultTime2Wait: %s\n",
param->value);
} else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
ops->DefaultTime2Retain =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("DefaultTime2Retain: %s\n",
param->value);
} else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
ops->MaxOutstandingR2T =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("MaxOutstandingR2T: %s\n",
param->value);
} else if (!strcmp(param->name, DATAPDUINORDER)) {
ops->DataPDUInOrder = !strcmp(param->value, YES);
pr_debug("DataPDUInOrder: %s\n",
param->value);
} else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
ops->DataSequenceInOrder = !strcmp(param->value, YES);
pr_debug("DataSequenceInOrder: %s\n",
param->value);
} else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
ops->ErrorRecoveryLevel =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("ErrorRecoveryLevel: %s\n",
param->value);
} else if (!strcmp(param->name, SESSIONTYPE)) {
ops->SessionType = !strcmp(param->value, DISCOVERY);
pr_debug("SessionType: %s\n",
param->value);
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
ops->RDMAExtensions = !strcmp(param->value, YES);
pr_debug("RDMAExtensions: %s\n",
param->value);
}
}
pr_debug("----------------------------------------------------"
"--------------\n");
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_parameters.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains error recovery level one used by the iSCSI Target driver.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/list.h>
#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
* situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
*
* dump_padding_digest denotes if padding and data digests need
* to be dumped.
*/
int iscsit_dump_data_payload(
struct iscsit_conn *conn,
u32 buf_len,
int dump_padding_digest)
{
char *buf;
int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
u32 length, offset = 0, size;
struct kvec iov;
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
if (dump_padding_digest) {
buf_len = ALIGN(buf_len, 4);
if (conn->conn_ops->DataDigest)
buf_len += ISCSI_CRC_LEN;
}
length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
pr_err("Unable to allocate %u bytes for offload"
" buffer.\n", length);
return -1;
}
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
rx_got = rx_data(conn, &iov, 1, size);
if (rx_got != size) {
ret = DATAOUT_CANNOT_RECOVER;
break;
}
offset += size;
}
kfree(buf);
return ret;
}
/*
* Used for retransmitting R2Ts from a R2T SNACK request.
*/
static int iscsit_send_recovery_r2t_for_snack(
struct iscsit_cmd *cmd,
struct iscsi_r2t *r2t)
{
/*
* If the struct iscsi_r2t has not been sent yet, we can safely
* ignore retransmission
* of the R2TSN in question.
*/
spin_lock_bh(&cmd->r2t_lock);
if (!r2t->sent_r2t) {
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
r2t->sent_r2t = 0;
spin_unlock_bh(&cmd->r2t_lock);
iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
return 0;
}
static int iscsit_handle_r2t_snack(
struct iscsit_cmd *cmd,
unsigned char *buf,
u32 begrun,
u32 runlength)
{
u32 last_r2tsn;
struct iscsi_r2t *r2t;
/*
* Make sure the initiator is not requesting retransmission
* of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
*/
if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
(begrun <= cmd->acked_data_sn)) {
pr_err("ITT: 0x%08x, R2T SNACK requesting"
" retransmission of R2TSN: 0x%08x to 0x%08x but already"
" acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (runlength) {
if ((begrun + runlength) > cmd->r2t_sn) {
pr_err("Command ITT: 0x%08x received R2T SNACK"
" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
" current R2TSN: 0x%08x, protocol error.\n",
cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
last_r2tsn = (begrun + runlength);
} else
last_r2tsn = cmd->r2t_sn;
while (begrun < last_r2tsn) {
r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
if (!r2t)
return -1;
if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
return -1;
begrun++;
}
return 0;
}
/*
* Generates Offsets and NextBurstLength based on Begrun and Runlength
* carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
*
* For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
*
* FIXME: How is this handled for a RData SNACK?
*/
int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr)
{
u32 data_sn = 0, data_sn_count = 0;
u32 pdu_start = 0, seq_no = 0;
u32 begrun = dr->begrun;
struct iscsit_conn *conn = cmd->conn;
while (begrun > data_sn++) {
data_sn_count++;
if ((dr->next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
dr->read_data_done +=
conn->conn_ops->MaxRecvDataSegmentLength;
dr->next_burst_len +=
conn->conn_ops->MaxRecvDataSegmentLength;
} else {
dr->read_data_done +=
(conn->sess->sess_ops->MaxBurstLength -
dr->next_burst_len);
dr->next_burst_len = 0;
pdu_start += data_sn_count;
data_sn_count = 0;
seq_no++;
}
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
cmd->seq_no = seq_no;
cmd->pdu_start = pdu_start;
cmd->pdu_send_order = data_sn_count;
}
return 0;
}
/*
* Generates Offsets and NextBurstLength based on Begrun and Runlength
* carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
*
* For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
*
* FIXME: How is this handled for a RData SNACK?
*/
int iscsit_create_recovery_datain_values_datasequenceinorder_no(
struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr)
{
int found_seq = 0, i;
u32 data_sn, read_data_done = 0, seq_send_order = 0;
u32 begrun = dr->begrun;
u32 runlength = dr->runlength;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_seq *first_seq = NULL, *seq = NULL;
if (!cmd->seq_list) {
pr_err("struct iscsit_cmd->seq_list is NULL!\n");
return -1;
}
/*
* Calculate read_data_done for all sequences containing a
* first_datasn and last_datasn less than the BegRun.
*
* Locate the struct iscsi_seq the BegRun lies within and calculate
* NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
*
* Also use struct iscsi_seq->seq_send_order to determine where to start.
*/
for (i = 0; i < cmd->seq_count; i++) {
seq = &cmd->seq_list[i];
if (!seq->seq_send_order)
first_seq = seq;
/*
* No data has been transferred for this DataIN sequence, so the
* seq->first_datasn and seq->last_datasn have not been set.
*/
if (!seq->sent) {
pr_err("Ignoring non-sent sequence 0x%08x ->"
" 0x%08x\n\n", seq->first_datasn,
seq->last_datasn);
continue;
}
/*
* This DataIN sequence is precedes the received BegRun, add the
* total xfer_len of the sequence to read_data_done and reset
* seq->pdu_send_order.
*/
if ((seq->first_datasn < begrun) &&
(seq->last_datasn < begrun)) {
pr_err("Pre BegRun sequence 0x%08x ->"
" 0x%08x\n", seq->first_datasn,
seq->last_datasn);
read_data_done += cmd->seq_list[i].xfer_len;
seq->next_burst_len = seq->pdu_send_order = 0;
continue;
}
/*
* The BegRun lies within this DataIN sequence.
*/
if ((seq->first_datasn <= begrun) &&
(seq->last_datasn >= begrun)) {
pr_err("Found sequence begrun: 0x%08x in"
" 0x%08x -> 0x%08x\n", begrun,
seq->first_datasn, seq->last_datasn);
seq_send_order = seq->seq_send_order;
data_sn = seq->first_datasn;
seq->next_burst_len = seq->pdu_send_order = 0;
found_seq = 1;
/*
* For DataPDUInOrder=Yes, while the first DataSN of
* the sequence is less than the received BegRun, add
* the MaxRecvDataSegmentLength to read_data_done and
* to the sequence's next_burst_len;
*
* For DataPDUInOrder=No, while the first DataSN of the
* sequence is less than the received BegRun, find the
* struct iscsi_pdu of the DataSN in question and add the
* MaxRecvDataSegmentLength to read_data_done and to the
* sequence's next_burst_len;
*/
if (conn->sess->sess_ops->DataPDUInOrder) {
while (data_sn < begrun) {
seq->pdu_send_order++;
read_data_done +=
conn->conn_ops->MaxRecvDataSegmentLength;
seq->next_burst_len +=
conn->conn_ops->MaxRecvDataSegmentLength;
data_sn++;
}
} else {
int j;
struct iscsi_pdu *pdu;
while (data_sn < begrun) {
seq->pdu_send_order++;
for (j = 0; j < seq->pdu_count; j++) {
pdu = &cmd->pdu_list[
seq->pdu_start + j];
if (pdu->data_sn == data_sn) {
read_data_done +=
pdu->length;
seq->next_burst_len +=
pdu->length;
}
}
data_sn++;
}
}
continue;
}
/*
* This DataIN sequence is larger than the received BegRun,
* reset seq->pdu_send_order and continue.
*/
if ((seq->first_datasn > begrun) ||
(seq->last_datasn > begrun)) {
pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
seq->first_datasn, seq->last_datasn);
seq->next_burst_len = seq->pdu_send_order = 0;
continue;
}
}
if (!found_seq) {
if (!begrun) {
if (!first_seq) {
pr_err("ITT: 0x%08x, Begrun: 0x%08x"
" but first_seq is NULL\n",
cmd->init_task_tag, begrun);
return -1;
}
seq_send_order = first_seq->seq_send_order;
seq->next_burst_len = seq->pdu_send_order = 0;
goto done;
}
pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
" BegRun: 0x%08x, RunLength: 0x%08x while"
" DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
cmd->init_task_tag, begrun, runlength,
(conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
return -1;
}
done:
dr->read_data_done = read_data_done;
dr->seq_send_order = seq_send_order;
return 0;
}
static int iscsit_handle_recovery_datain(
struct iscsit_cmd *cmd,
unsigned char *buf,
u32 begrun,
u32 runlength)
{
struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct se_cmd *se_cmd = &cmd->se_cmd;
if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
cmd->init_task_tag);
return 0;
}
/*
* Make sure the initiator is not requesting retransmission
* of DataSNs already acknowledged by a Data ACK SNACK.
*/
if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
(begrun <= cmd->acked_data_sn)) {
pr_err("ITT: 0x%08x, Data SNACK requesting"
" retransmission of DataSN: 0x%08x to 0x%08x but"
" already acked to DataSN: 0x%08x by Data ACK SNACK,"
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
/*
* Make sure BegRun and RunLength in the Data SNACK are sane.
* Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
*/
if ((begrun + runlength) > (cmd->data_sn - 1)) {
pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
": 0x%08x greater than maximum DataSN: 0x%08x.\n",
begrun, runlength, (cmd->data_sn - 1));
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
buf);
}
dr = iscsit_allocate_datain_req();
if (!dr)
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
dr->data_sn = dr->begrun = begrun;
dr->runlength = runlength;
dr->generate_recovery_values = 1;
dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
iscsit_attach_datain_req(cmd, dr);
cmd->i_state = ISTATE_SEND_DATAIN;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
int iscsit_handle_recovery_datain_or_r2t(
struct iscsit_conn *conn,
unsigned char *buf,
itt_t init_task_tag,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
{
struct iscsit_cmd *cmd;
cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
if (!cmd)
return 0;
/*
* FIXME: This will not work for bidi commands.
*/
switch (cmd->data_direction) {
case DMA_TO_DEVICE:
return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
case DMA_FROM_DEVICE:
return iscsit_handle_recovery_datain(cmd, buf, begrun,
runlength);
default:
pr_err("Unknown cmd->data_direction: 0x%02x\n",
cmd->data_direction);
return -1;
}
return 0;
}
/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
int iscsit_handle_status_snack(
struct iscsit_conn *conn,
itt_t init_task_tag,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
{
struct iscsit_cmd *cmd = NULL;
u32 last_statsn;
int found_cmd;
if (!begrun) {
begrun = conn->exp_statsn;
} else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
conn->cid);
return 0;
}
last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
while (begrun < last_statsn) {
found_cmd = 0;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->stat_sn == begrun) {
found_cmd = 1;
break;
}
}
spin_unlock_bh(&conn->cmd_lock);
if (!found_cmd) {
pr_err("Unable to find StatSN: 0x%08x for"
" a Status SNACK, assuming this was a"
" protactic SNACK for an untransmitted"
" StatSN, ignoring.\n", begrun);
begrun++;
continue;
}
spin_lock_bh(&cmd->istate_lock);
if (cmd->i_state == ISTATE_SEND_DATAIN) {
spin_unlock_bh(&cmd->istate_lock);
pr_err("Ignoring Status SNACK for BegRun:"
" 0x%08x, RunLength: 0x%08x, assuming this was"
" a protactic SNACK for an untransmitted"
" StatSN\n", begrun, runlength);
begrun++;
continue;
}
spin_unlock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
begrun++;
}
return 0;
}
int iscsit_handle_data_ack(
struct iscsit_conn *conn,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
{
struct iscsit_cmd *cmd = NULL;
cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
if (!cmd) {
pr_err("Data ACK SNACK for TTT: 0x%08x is"
" invalid.\n", targ_xfer_tag);
return -1;
}
if (begrun <= cmd->acked_data_sn) {
pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
" less than the already acked DataSN: 0x%08x.\n",
cmd->init_task_tag, begrun, cmd->acked_data_sn);
return -1;
}
/*
* For Data ACK SNACK, BegRun is the next expected DataSN.
* (see iSCSI v19: 10.16.6)
*/
cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = (begrun - 1);
pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
" updated acked DataSN to 0x%08x.\n",
cmd->init_task_tag, cmd->acked_data_sn);
return 0;
}
static int iscsit_send_recovery_r2t(
struct iscsit_cmd *cmd,
u32 offset,
u32 xfer_len)
{
int ret;
spin_lock_bh(&cmd->r2t_lock);
ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
spin_unlock_bh(&cmd->r2t_lock);
return ret;
}
int iscsit_dataout_datapduinorder_no_fbit(
struct iscsit_cmd *cmd,
struct iscsi_pdu *pdu)
{
int i, send_recovery_r2t = 0, recovery = 0;
u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *first_pdu = NULL;
/*
* Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
* of the DataOUT sequence.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
for (i = 0; i < cmd->pdu_count; i++) {
if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
if (!first_pdu)
first_pdu = &cmd->pdu_list[i];
xfer_len += cmd->pdu_list[i].length;
pdu_count++;
} else if (pdu_count)
break;
}
} else {
struct iscsi_seq *seq = cmd->seq_ptr;
first_pdu = &cmd->pdu_list[seq->pdu_start];
pdu_count = seq->pdu_count;
}
if (!first_pdu || !pdu_count)
return DATAOUT_CANNOT_RECOVER;
/*
* Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
* The following ugly logic does batching of not received PDUs.
*/
for (i = 0; i < pdu_count; i++) {
if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
if (!send_recovery_r2t)
continue;
if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_recovery_r2t = length = offset = 0;
continue;
}
/*
* Set recovery = 1 for any missing, CRC failed, or timed
* out PDUs to let the DataOUT logic know that this sequence
* has not been completed yet.
*
* Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
* We assume if the PDU either failed CRC or timed out
* that a Recovery R2T has already been sent.
*/
recovery = 1;
if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
continue;
if (!offset)
offset = first_pdu[i].offset;
length += first_pdu[i].length;
send_recovery_r2t = 1;
}
if (send_recovery_r2t)
if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
return DATAOUT_CANNOT_RECOVER;
return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
}
static int iscsit_recalculate_dataout_values(
struct iscsit_cmd *cmd,
u32 pdu_offset,
u32 pdu_length,
u32 *r2t_offset,
u32 *r2t_length)
{
int i;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
if (conn->sess->sess_ops->DataSequenceInOrder) {
cmd->data_sn = 0;
if (conn->sess->sess_ops->DataPDUInOrder) {
*r2t_offset = cmd->write_data_done;
*r2t_length = (cmd->seq_end_offset -
cmd->write_data_done);
return 0;
}
*r2t_offset = cmd->seq_start_offset;
*r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
if ((pdu->offset >= cmd->seq_start_offset) &&
((pdu->offset + pdu->length) <=
cmd->seq_end_offset)) {
if (!cmd->unsolicited_data)
cmd->next_burst_len -= pdu->length;
else
cmd->first_burst_len -= pdu->length;
cmd->write_data_done -= pdu->length;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
} else {
struct iscsi_seq *seq = NULL;
seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
if (!seq)
return -1;
*r2t_offset = seq->orig_offset;
*r2t_length = seq->xfer_len;
cmd->write_data_done -= (seq->offset - seq->orig_offset);
if (cmd->immediate_data)
cmd->first_burst_len = cmd->write_data_done;
seq->data_sn = 0;
seq->offset = seq->orig_offset;
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
if (conn->sess->sess_ops->DataPDUInOrder)
return 0;
for (i = 0; i < seq->pdu_count; i++) {
pdu = &cmd->pdu_list[i+seq->pdu_start];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
return 0;
}
int iscsit_recover_dataout_sequence(
struct iscsit_cmd *cmd,
u32 pdu_offset,
u32 pdu_length)
{
u32 r2t_length = 0, r2t_offset = 0;
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
spin_unlock_bh(&cmd->istate_lock);
if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
&r2t_offset, &r2t_length) < 0)
return DATAOUT_CANNOT_RECOVER;
iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
{
struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
if (!ooo_cmdsn) {
pr_err("Unable to allocate memory for"
" struct iscsi_ooo_cmdsn.\n");
return NULL;
}
INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
return ooo_cmdsn;
}
static int iscsit_attach_ooo_cmdsn(
struct iscsit_session *sess,
struct iscsi_ooo_cmdsn *ooo_cmdsn)
{
struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
lockdep_assert_held(&sess->cmdsn_mutex);
/*
* We attach the struct iscsi_ooo_cmdsn entry to the out of order
* list in increasing CmdSN order.
* This allows iscsi_execute_ooo_cmdsns() to detect any
* additional CmdSN holes while performing delayed execution.
*/
if (list_empty(&sess->sess_ooo_cmdsn_list))
list_add_tail(&ooo_cmdsn->ooo_list,
&sess->sess_ooo_cmdsn_list);
else {
ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
typeof(*ooo_tail), ooo_list);
/*
* CmdSN is greater than the tail of the list.
*/
if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
list_add_tail(&ooo_cmdsn->ooo_list,
&sess->sess_ooo_cmdsn_list);
else {
/*
* CmdSN is either lower than the head, or somewhere
* in the middle.
*/
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
ooo_list) {
if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
continue;
/* Insert before this entry */
list_add(&ooo_cmdsn->ooo_list,
ooo_tmp->ooo_list.prev);
break;
}
}
}
return 0;
}
/*
* Removes an struct iscsi_ooo_cmdsn from a session's list,
* called with struct iscsit_session->cmdsn_mutex held.
*/
void iscsit_remove_ooo_cmdsn(
struct iscsit_session *sess,
struct iscsi_ooo_cmdsn *ooo_cmdsn)
{
list_del(&ooo_cmdsn->ooo_list);
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
}
void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *conn)
{
struct iscsi_ooo_cmdsn *ooo_cmdsn;
struct iscsit_session *sess = conn->sess;
mutex_lock(&sess->cmdsn_mutex);
list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
if (ooo_cmdsn->cid != conn->cid)
continue;
ooo_cmdsn->cmd = NULL;
}
mutex_unlock(&sess->cmdsn_mutex);
}
int iscsit_execute_ooo_cmdsns(struct iscsit_session *sess)
{
int ooo_count = 0;
struct iscsit_cmd *cmd = NULL;
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
lockdep_assert_held(&sess->cmdsn_mutex);
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
&sess->sess_ooo_cmdsn_list, ooo_list) {
if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
continue;
if (!ooo_cmdsn->cmd) {
sess->exp_cmd_sn++;
iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
continue;
}
cmd = ooo_cmdsn->cmd;
cmd->i_state = cmd->deferred_i_state;
ooo_count++;
sess->exp_cmd_sn++;
pr_debug("Executing out of order CmdSN: 0x%08x,"
" incremented ExpCmdSN to 0x%08x.\n",
cmd->cmd_sn, sess->exp_cmd_sn);
iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
if (iscsit_execute_cmd(cmd, 1) < 0)
return -1;
}
return ooo_count;
}
/*
* Called either:
*
* 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
* or iscsi_check_received_cmdsn().
* 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
* for immediate commands.
*/
int iscsit_execute_cmd(struct iscsit_cmd *cmd, int ooo)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct iscsit_conn *conn = cmd->conn;
int lr = 0;
spin_lock_bh(&cmd->istate_lock);
if (ooo)
cmd->cmd_flags &= ~ICF_OOO_CMDSN;
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
/*
* Go ahead and send the CHECK_CONDITION status for
* any SCSI CDB exceptions that may have occurred.
*/
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);
return 0;
}
spin_unlock_bh(&cmd->istate_lock);
if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
return transport_send_check_condition_and_sense(se_cmd,
cmd->sense_reason, 0);
}
/*
* Special case for delayed CmdSN with Immediate
* Data and/or Unsolicited Data Out attached.
*/
if (cmd->immediate_data) {
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
spin_unlock_bh(&cmd->istate_lock);
target_execute_cmd(&cmd->se_cmd);
return 0;
}
spin_unlock_bh(&cmd->istate_lock);
if (!(cmd->cmd_flags &
ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
iscsit_set_dataout_sequence_values(cmd);
conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
}
return 0;
}
/*
* The default handler.
*/
spin_unlock_bh(&cmd->istate_lock);
if ((cmd->data_direction == DMA_TO_DEVICE) &&
!(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
iscsit_set_unsolicited_dataout(cmd);
}
return transport_handle_cdb_direct(&cmd->se_cmd);
case ISCSI_OP_NOOP_OUT:
case ISCSI_OP_TEXT:
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break;
case ISCSI_OP_SCSI_TMFUNC:
if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);
return 0;
}
spin_unlock_bh(&cmd->istate_lock);
return transport_generic_handle_tmr(&cmd->se_cmd);
case ISCSI_OP_LOGOUT:
spin_unlock_bh(&cmd->istate_lock);
switch (cmd->logout_reason) {
case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
lr = iscsit_logout_closesession(cmd, cmd->conn);
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
lr = iscsit_logout_closeconnection(cmd, cmd->conn);
break;
case ISCSI_LOGOUT_REASON_RECOVERY:
lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
break;
default:
pr_err("Unknown iSCSI Logout Request Code:"
" 0x%02x\n", cmd->logout_reason);
return -1;
}
return lr;
default:
spin_unlock_bh(&cmd->istate_lock);
pr_err("Cannot perform out of order execution for"
" unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
return -1;
}
return 0;
}
void iscsit_free_all_ooo_cmdsns(struct iscsit_session *sess)
{
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
mutex_lock(&sess->cmdsn_mutex);
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
&sess->sess_ooo_cmdsn_list, ooo_list) {
list_del(&ooo_cmdsn->ooo_list);
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
}
mutex_unlock(&sess->cmdsn_mutex);
}
int iscsit_handle_ooo_cmdsn(
struct iscsit_session *sess,
struct iscsit_cmd *cmd,
u32 cmdsn)
{
int batch = 0;
struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
cmd->deferred_i_state = cmd->i_state;
cmd->i_state = ISTATE_DEFERRED_CMD;
cmd->cmd_flags |= ICF_OOO_CMDSN;
if (list_empty(&sess->sess_ooo_cmdsn_list))
batch = 1;
else {
ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
typeof(*ooo_tail), ooo_list);
if (ooo_tail->cmdsn != (cmdsn - 1))
batch = 1;
}
ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
if (!ooo_cmdsn)
return -ENOMEM;
ooo_cmdsn->cmd = cmd;
ooo_cmdsn->batch_count = (batch) ?
(cmdsn - sess->exp_cmd_sn) : 1;
ooo_cmdsn->cid = cmd->conn->cid;
ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
ooo_cmdsn->cmdsn = cmdsn;
if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
return -ENOMEM;
}
return 0;
}
static int iscsit_set_dataout_timeout_values(
struct iscsit_cmd *cmd,
u32 *offset,
u32 *length)
{
struct iscsit_conn *conn = cmd->conn;
struct iscsi_r2t *r2t;
if (cmd->unsolicited_data) {
*offset = 0;
*length = (conn->sess->sess_ops->FirstBurstLength >
cmd->se_cmd.data_length) ?
cmd->se_cmd.data_length :
conn->sess->sess_ops->FirstBurstLength;
return 0;
}
spin_lock_bh(&cmd->r2t_lock);
if (list_empty(&cmd->cmd_r2t_list)) {
pr_err("cmd->cmd_r2t_list is empty!\n");
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
*offset = r2t->offset;
*length = r2t->xfer_len;
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
}
spin_unlock_bh(&cmd->r2t_lock);
pr_err("Unable to locate any incomplete DataOUT"
" sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
return -1;
}
/*
* NOTE: Called from interrupt (timer) context.
*/
void iscsit_handle_dataout_timeout(struct timer_list *t)
{
u32 pdu_length = 0, pdu_offset = 0;
u32 r2t_length = 0, r2t_offset = 0;
struct iscsit_cmd *cmd = from_timer(cmd, t, dataout_timer);
struct iscsit_conn *conn = cmd->conn;
struct iscsit_session *sess = NULL;
struct iscsi_node_attrib *na;
iscsit_inc_conn_usage_count(conn);
spin_lock_bh(&cmd->dataout_timeout_lock);
if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&cmd->dataout_timeout_lock);
iscsit_dec_conn_usage_count(conn);
return;
}
cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
sess = conn->sess;
na = iscsit_tpg_get_node_attrib(sess);
if (!sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from DataOut timeout while"
" in ERL=0, closing iSCSI connection for I_T Nexus"
" %s,i,0x%6phN,%s,t,0x%02x\n",
sess->sess_ops->InitiatorName, sess->isid,
sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
pr_err("Command ITT: 0x%08x exceeded max retries"
" for DataOUT timeout %u, closing iSCSI connection for"
" I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
cmd->init_task_tag, na->dataout_timeout_retries,
sess->sess_ops->InitiatorName, sess->isid,
sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (conn->sess->sess_ops->DataPDUInOrder) {
pdu_offset = cmd->write_data_done;
if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len)) > cmd->se_cmd.data_length)
pdu_length = (cmd->se_cmd.data_length -
cmd->write_data_done);
else
pdu_length = (conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len);
} else {
pdu_offset = cmd->seq_start_offset;
pdu_length = (cmd->seq_end_offset -
cmd->seq_start_offset);
}
} else {
if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
&pdu_length) < 0)
goto failure;
}
if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
&r2t_offset, &r2t_length) < 0)
goto failure;
pr_debug("Command ITT: 0x%08x timed out waiting for"
" completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
"", r2t_offset, r2t_length);
if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
goto failure;
iscsit_start_dataout_timer(cmd, conn);
spin_unlock_bh(&cmd->dataout_timeout_lock);
iscsit_dec_conn_usage_count(conn);
return;
failure:
spin_unlock_bh(&cmd->dataout_timeout_lock);
iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
void iscsit_mod_dataout_timer(struct iscsit_cmd *cmd)
{
struct iscsit_conn *conn = cmd->conn;
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&cmd->dataout_timeout_lock);
if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&cmd->dataout_timeout_lock);
return;
}
mod_timer(&cmd->dataout_timer,
(get_jiffies_64() + na->dataout_timeout * HZ));
pr_debug("Updated DataOUT timer for ITT: 0x%08x",
cmd->init_task_tag);
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
void iscsit_start_dataout_timer(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn)
{
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
lockdep_assert_held(&cmd->dataout_timeout_lock);
if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
return;
pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
" CID: %hu.\n", cmd->init_task_tag, conn->cid);
cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ);
}
void iscsit_stop_dataout_timer(struct iscsit_cmd *cmd)
{
spin_lock_bh(&cmd->dataout_timeout_lock);
if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&cmd->dataout_timeout_lock);
return;
}
cmd->dataout_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&cmd->dataout_timeout_lock);
del_timer_sync(&cmd->dataout_timer);
spin_lock_bh(&cmd->dataout_timeout_lock);
cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
cmd->init_task_tag);
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
EXPORT_SYMBOL(iscsit_stop_dataout_timer);
|
linux-master
|
drivers/target/iscsi/iscsi_target_erl1.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
void iscsit_determine_maxcmdsn(struct iscsit_session *sess)
{
struct se_node_acl *se_nacl;
/*
* This is a discovery session, the single queue slot was already
* assigned in iscsi_login_zero_tsih(). Since only Logout and
* Text Opcodes are allowed during discovery we do not have to worry
* about the HBA's queue depth here.
*/
if (sess->sess_ops->SessionType)
return;
se_nacl = sess->se_sess->se_node_acl;
/*
* This is a normal session, set the Session's CmdSN window to the
* struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
* has already been validated as a legal value in
* core_set_queue_depth_for_node().
*/
sess->cmdsn_window = se_nacl->queue_depth;
atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
}
void iscsit_increment_maxcmdsn(struct iscsit_cmd *cmd, struct iscsit_session *sess)
{
u32 max_cmd_sn;
if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
return;
cmd->maxcmdsn_inc = 1;
max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn);
pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn);
}
EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
|
linux-master
|
drivers/target/iscsi/iscsi_target_device.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the configfs implementation for iSCSI Target mode
* from the LIO-Target Project.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
****************************************************************************/
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
#include <linux/module.h>
#include <net/ipv6.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_nodeattrib.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include <target/iscsi/iscsi_target_stat.h>
/* Start items for lio_target_portal_cit */
static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item)
{
return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
}
static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
enum iscsit_transport_type type)
{
struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
struct iscsi_tpg_np *tpg_np_new;
ssize_t rb;
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new)
rb = sysfs_emit(page, "1\n");
else
rb = sysfs_emit(page, "0\n");
return rb;
}
static ssize_t lio_target_np_driver_store(struct config_item *item,
const char *page, size_t count, enum iscsit_transport_type type,
const char *mod_name)
{
struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
struct iscsi_np *np;
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np_new = NULL;
u32 op;
int rc;
rc = kstrtou32(page, 0, &op);
if (rc)
return rc;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
}
np = tpg_np->tpg_np;
if (!np) {
pr_err("Unable to locate struct iscsi_np from"
" struct iscsi_tpg_np\n");
return -EINVAL;
}
tpg = tpg_np->tpg;
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
if (op) {
if (strlen(mod_name)) {
rc = request_module(mod_name);
if (rc != 0) {
pr_warn("Unable to request_module for %s\n",
mod_name);
rc = 0;
}
}
tpg_np_new = iscsit_tpg_add_network_portal(tpg,
&np->np_sockaddr, tpg_np, type);
if (IS_ERR(tpg_np_new)) {
rc = PTR_ERR(tpg_np_new);
goto out;
}
} else {
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new) {
rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new);
if (rc < 0)
goto out;
}
}
iscsit_put_tpg(tpg);
return count;
out:
iscsit_put_tpg(tpg);
return rc;
}
static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
{
return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
}
static ssize_t lio_target_np_iser_store(struct config_item *item,
const char *page, size_t count)
{
return lio_target_np_driver_store(item, page, count,
ISCSI_INFINIBAND, "ib_isert");
}
CONFIGFS_ATTR(lio_target_np_, iser);
static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
{
return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
}
static ssize_t lio_target_np_cxgbit_store(struct config_item *item,
const char *page, size_t count)
{
return lio_target_np_driver_store(item, page, count,
ISCSI_CXGBIT, "cxgbit");
}
CONFIGFS_ATTR(lio_target_np_, cxgbit);
static struct configfs_attribute *lio_target_portal_attrs[] = {
&lio_target_np_attr_iser,
&lio_target_np_attr_cxgbit,
NULL,
};
/* Stop items for lio_target_portal_cit */
/* Start items for lio_target_np_cit */
#define MAX_PORTAL_LEN 256
static struct se_tpg_np *lio_target_call_addnptotpg(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
char *str, *str2, *ip_str, *port_str;
struct sockaddr_storage sockaddr = { };
int ret;
char buf[MAX_PORTAL_LEN + 1] = { };
if (strlen(name) > MAX_PORTAL_LEN) {
pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
(int)strlen(name), MAX_PORTAL_LEN);
return ERR_PTR(-EOVERFLOW);
}
snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
str = strstr(buf, "[");
if (str) {
str2 = strstr(str, "]");
if (!str2) {
pr_err("Unable to locate trailing \"]\""
" in IPv6 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
ip_str = str + 1; /* Skip over leading "[" */
*str2 = '\0'; /* Terminate the unbracketed IPv6 address */
str2++; /* Skip over the \0 */
port_str = strstr(str2, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
" in IPv6 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
} else {
ip_str = &buf[0];
port_str = strstr(ip_str, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
" in IPv4 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
}
ret = inet_pton_with_scope(&init_net, AF_UNSPEC, ip_str,
port_str, &sockaddr);
if (ret) {
pr_err("malformed ip/port passed: %s\n", name);
return ERR_PTR(ret);
}
tpg = to_iscsi_tpg(se_tpg);
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return ERR_PTR(-EINVAL);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
" PORTAL: %s\n",
config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
tpg->tpgt, name);
/*
* Assume ISCSI_TCP by default. Other network portals for other
* iSCSI fabrics:
*
* Traditional iSCSI over SCTP (initial support)
* iSER/TCP (TODO, hardware available)
* iSER/SCTP (TODO, software emulation with osc-iwarp)
* iSER/IB (TODO, hardware available)
*
* can be enabled with attributes under
* sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
*
*/
tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
ISCSI_TCP);
if (IS_ERR(tpg_np)) {
iscsit_put_tpg(tpg);
return ERR_CAST(tpg_np);
}
pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
iscsit_put_tpg(tpg);
return &tpg_np->se_tpg_np;
}
static void lio_target_call_delnpfromtpg(
struct se_tpg_np *se_tpg_np)
{
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
struct se_portal_group *se_tpg;
int ret;
tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
tpg = tpg_np->tpg;
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return;
se_tpg = &tpg->tpg_se_tpg;
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
" PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
tpg->tpgt, &tpg_np->tpg_np->np_sockaddr);
ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
if (ret < 0)
goto out;
pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
out:
iscsit_put_tpg(tpg);
}
/* End items for lio_target_np_cit */
/* Start items for lio_target_nacl_attrib_cit */
#define ISCSI_NACL_ATTR(name) \
static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
char *page) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
return sysfs_emit(page, "%u\n", nacl->node_attrib.name); \
} \
\
static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
u32 val; \
int ret; \
\
ret = kstrtou32(page, 0, &val); \
if (ret) \
return ret; \
ret = iscsit_na_##name(nacl, val); \
if (ret < 0) \
return ret; \
\
return count; \
} \
\
CONFIGFS_ATTR(iscsi_nacl_attrib_, name)
ISCSI_NACL_ATTR(dataout_timeout);
ISCSI_NACL_ATTR(dataout_timeout_retries);
ISCSI_NACL_ATTR(default_erl);
ISCSI_NACL_ATTR(nopin_timeout);
ISCSI_NACL_ATTR(nopin_response_timeout);
ISCSI_NACL_ATTR(random_datain_pdu_offsets);
ISCSI_NACL_ATTR(random_datain_seq_offsets);
ISCSI_NACL_ATTR(random_r2t_offsets);
static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
char *page)
{
struct se_node_acl *se_nacl = attrib_to_nacl(item);
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
return sysfs_emit(page, "%d\n", nacl->node_attrib.authentication);
}
static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = attrib_to_nacl(item);
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
s32 val;
int ret;
ret = kstrtos32(page, 0, &val);
if (ret)
return ret;
if (val != 0 && val != 1 && val != NA_AUTHENTICATION_INHERITED)
return -EINVAL;
nacl->node_attrib.authentication = val;
return count;
}
CONFIGFS_ATTR(iscsi_nacl_attrib_, authentication);
static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_dataout_timeout,
&iscsi_nacl_attrib_attr_dataout_timeout_retries,
&iscsi_nacl_attrib_attr_default_erl,
&iscsi_nacl_attrib_attr_nopin_timeout,
&iscsi_nacl_attrib_attr_nopin_response_timeout,
&iscsi_nacl_attrib_attr_random_datain_pdu_offsets,
&iscsi_nacl_attrib_attr_random_datain_seq_offsets,
&iscsi_nacl_attrib_attr_random_r2t_offsets,
&iscsi_nacl_attrib_attr_authentication,
NULL,
};
/* End items for lio_target_nacl_attrib_cit */
/* Start items for lio_target_nacl_auth_cit */
#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
static ssize_t __iscsi_##prefix##_##name##_show( \
struct iscsi_node_acl *nacl, \
char *page) \
{ \
struct iscsi_node_auth *auth = &nacl->node_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
} \
\
static ssize_t __iscsi_##prefix##_##name##_store( \
struct iscsi_node_acl *nacl, \
const char *page, \
size_t count) \
{ \
struct iscsi_node_auth *auth = &nacl->node_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
if (count >= sizeof(auth->name)) \
return -EINVAL; \
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
else \
auth->naf_flags |= flags; \
\
if ((auth->naf_flags & NAF_USERID_IN_SET) && \
(auth->naf_flags & NAF_PASSWORD_IN_SET)) \
auth->authenticate_target = 1; \
else \
auth->authenticate_target = 0; \
\
return count; \
}
#define DEF_NACL_AUTH_STR(name, flags) \
__DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
return __iscsi_nacl_auth_##name##_store(to_iscsi_nacl(nacl), \
page, count); \
} \
\
CONFIGFS_ATTR(iscsi_nacl_auth_, name)
/*
* One-way authentication userid
*/
DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
#define __DEF_NACL_AUTH_INT(prefix, name) \
static ssize_t __iscsi_##prefix##_##name##_show( \
struct iscsi_node_acl *nacl, \
char *page) \
{ \
struct iscsi_node_auth *auth = &nacl->node_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
}
#define DEF_NACL_AUTH_INT(name) \
__DEF_NACL_AUTH_INT(nacl_auth, name) \
static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
\
CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name)
DEF_NACL_AUTH_INT(authenticate_target);
static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
&iscsi_nacl_auth_attr_userid,
&iscsi_nacl_auth_attr_password,
&iscsi_nacl_auth_attr_authenticate_target,
&iscsi_nacl_auth_attr_userid_mutual,
&iscsi_nacl_auth_attr_password_mutual,
NULL,
};
/* End items for lio_target_nacl_auth_cit */
/* Start items for lio_target_nacl_param_cit */
#define ISCSI_NACL_PARAM(name) \
static ssize_t iscsi_nacl_param_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *se_nacl = param_to_nacl(item); \
struct iscsit_session *sess; \
struct se_session *se_sess; \
ssize_t rb; \
\
spin_lock_bh(&se_nacl->nacl_sess_lock); \
se_sess = se_nacl->nacl_sess; \
if (!se_sess) { \
rb = snprintf(page, PAGE_SIZE, \
"No Active iSCSI Session\n"); \
} else { \
sess = se_sess->fabric_sess_ptr; \
rb = snprintf(page, PAGE_SIZE, "%u\n", \
(u32)sess->sess_ops->name); \
} \
spin_unlock_bh(&se_nacl->nacl_sess_lock); \
\
return rb; \
} \
\
CONFIGFS_ATTR_RO(iscsi_nacl_param_, name)
ISCSI_NACL_PARAM(MaxConnections);
ISCSI_NACL_PARAM(InitialR2T);
ISCSI_NACL_PARAM(ImmediateData);
ISCSI_NACL_PARAM(MaxBurstLength);
ISCSI_NACL_PARAM(FirstBurstLength);
ISCSI_NACL_PARAM(DefaultTime2Wait);
ISCSI_NACL_PARAM(DefaultTime2Retain);
ISCSI_NACL_PARAM(MaxOutstandingR2T);
ISCSI_NACL_PARAM(DataPDUInOrder);
ISCSI_NACL_PARAM(DataSequenceInOrder);
ISCSI_NACL_PARAM(ErrorRecoveryLevel);
static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
&iscsi_nacl_param_attr_MaxConnections,
&iscsi_nacl_param_attr_InitialR2T,
&iscsi_nacl_param_attr_ImmediateData,
&iscsi_nacl_param_attr_MaxBurstLength,
&iscsi_nacl_param_attr_FirstBurstLength,
&iscsi_nacl_param_attr_DefaultTime2Wait,
&iscsi_nacl_param_attr_DefaultTime2Retain,
&iscsi_nacl_param_attr_MaxOutstandingR2T,
&iscsi_nacl_param_attr_DataPDUInOrder,
&iscsi_nacl_param_attr_DataSequenceInOrder,
&iscsi_nacl_param_attr_ErrorRecoveryLevel,
NULL,
};
/* End items for lio_target_nacl_param_cit */
/* Start items for lio_target_acl_cit */
static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct iscsit_session *sess;
struct iscsit_conn *conn;
struct se_session *se_sess;
ssize_t rb = 0;
u32 max_cmd_sn;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (!se_sess) {
rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
" Endpoint: %s\n", se_nacl->initiatorname);
} else {
sess = se_sess->fabric_sess_ptr;
rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
sess->sess_ops->InitiatorName);
rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
sess->sess_ops->InitiatorAlias);
rb += sysfs_emit_at(page, rb,
"LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
sess->sid, sess->isid, sess->tsih);
rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ?
"Discovery" : "Normal");
rb += sysfs_emit_at(page, rb, "Session State: ");
switch (sess->session_state) {
case TARG_SESS_STATE_FREE:
rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
break;
case TARG_SESS_STATE_ACTIVE:
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
break;
case TARG_SESS_STATE_LOGGED_IN:
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
break;
case TARG_SESS_STATE_FAILED:
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
break;
case TARG_SESS_STATE_IN_CONTINUE:
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
break;
default:
rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
" State!\n");
break;
}
rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
" Values]-----------------------\n");
rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
(max_cmd_sn - sess->exp_cmd_sn) + 1,
sess->exp_cmd_sn, max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
rb += sysfs_emit_at(page, rb, "CID: %hu Connection"
" State: ", conn->cid);
switch (conn->conn_state) {
case TARG_CONN_STATE_FREE:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_FREE\n");
break;
case TARG_CONN_STATE_XPT_UP:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_XPT_UP\n");
break;
case TARG_CONN_STATE_IN_LOGIN:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGIN\n");
break;
case TARG_CONN_STATE_LOGGED_IN:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGGED_IN\n");
break;
case TARG_CONN_STATE_IN_LOGOUT:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGOUT\n");
break;
case TARG_CONN_STATE_LOGOUT_REQUESTED:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
break;
case TARG_CONN_STATE_CLEANUP_WAIT:
rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_CLEANUP_WAIT\n");
break;
default:
rb += sysfs_emit_at(page, rb,
"ERROR: Unknown Connection State!\n");
break;
}
rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n",
conn->stat_sn);
}
spin_unlock(&sess->conn_lock);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return rb;
}
static ssize_t lio_target_nacl_cmdsn_depth_show(struct config_item *item,
char *page)
{
return sysfs_emit(page, "%u\n", acl_to_nacl(item)->queue_depth);
}
static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
u32 cmdsn_depth = 0;
int ret;
ret = kstrtou32(page, 0, &cmdsn_depth);
if (ret)
return ret;
if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
pr_err("Passed cmdsn_depth: %u exceeds"
" TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
TA_DEFAULT_CMDSN_DEPTH_MAX);
return -EINVAL;
}
acl_ci = &se_nacl->acl_group.cg_item;
if (!acl_ci) {
pr_err("Unable to locatel acl_ci\n");
return -EINVAL;
}
tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
if (!tpg_ci) {
pr_err("Unable to locate tpg_ci\n");
return -EINVAL;
}
wwn_ci = &tpg_ci->ci_group->cg_item;
if (!wwn_ci) {
pr_err("Unable to locate config_item wwn_ci\n");
return -EINVAL;
}
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
"InitiatorName: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci), cmdsn_depth,
config_item_name(acl_ci));
iscsit_put_tpg(tpg);
return (!ret) ? count : (ssize_t)ret;
}
static ssize_t lio_target_nacl_tag_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
}
static ssize_t lio_target_nacl_tag_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
if (ret < 0)
return ret;
return count;
}
CONFIGFS_ATTR_RO(lio_target_nacl_, info);
CONFIGFS_ATTR(lio_target_nacl_, cmdsn_depth);
CONFIGFS_ATTR(lio_target_nacl_, tag);
static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_attr_info,
&lio_target_nacl_attr_cmdsn_depth,
&lio_target_nacl_attr_tag,
NULL,
};
static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
const char *name)
{
struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
configfs_add_default_group(&acl->node_stat_grps.iscsi_sess_stats_group,
&se_nacl->acl_fabric_stat_group);
return 0;
}
/* End items for lio_target_acl_cit */
/* Start items for lio_target_tpg_attrib_cit */
#define DEF_TPG_ATTRIB(name) \
\
static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
rb = sysfs_emit(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
\
static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
u32 val; \
int ret; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
ret = kstrtou32(page, 0, &val); \
if (ret) \
goto out; \
ret = iscsit_ta_##name(tpg, val); \
if (ret < 0) \
goto out; \
\
iscsit_put_tpg(tpg); \
return count; \
out: \
iscsit_put_tpg(tpg); \
return ret; \
} \
CONFIGFS_ATTR(iscsi_tpg_attrib_, name)
DEF_TPG_ATTRIB(authentication);
DEF_TPG_ATTRIB(login_timeout);
DEF_TPG_ATTRIB(generate_node_acls);
DEF_TPG_ATTRIB(default_cmdsn_depth);
DEF_TPG_ATTRIB(cache_dynamic_acls);
DEF_TPG_ATTRIB(demo_mode_write_protect);
DEF_TPG_ATTRIB(prod_mode_write_protect);
DEF_TPG_ATTRIB(demo_mode_discovery);
DEF_TPG_ATTRIB(default_erl);
DEF_TPG_ATTRIB(t10_pi);
DEF_TPG_ATTRIB(fabric_prot_type);
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
&iscsi_tpg_attrib_attr_login_timeout,
&iscsi_tpg_attrib_attr_generate_node_acls,
&iscsi_tpg_attrib_attr_default_cmdsn_depth,
&iscsi_tpg_attrib_attr_cache_dynamic_acls,
&iscsi_tpg_attrib_attr_demo_mode_write_protect,
&iscsi_tpg_attrib_attr_prod_mode_write_protect,
&iscsi_tpg_attrib_attr_demo_mode_discovery,
&iscsi_tpg_attrib_attr_default_erl,
&iscsi_tpg_attrib_attr_t10_pi,
&iscsi_tpg_attrib_attr_fabric_prot_type,
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
&iscsi_tpg_attrib_attr_login_keys_workaround,
NULL,
};
/* End items for lio_target_tpg_attrib_cit */
/* Start items for lio_target_tpg_auth_cit */
#define __DEF_TPG_AUTH_STR(prefix, name, flags) \
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
} \
\
static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\
const char *page, size_t count) \
{ \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!(strncmp("NULL", auth->name, 4))) \
auth->naf_flags &= ~flags; \
else \
auth->naf_flags |= flags; \
\
if ((auth->naf_flags & NAF_USERID_IN_SET) && \
(auth->naf_flags & NAF_PASSWORD_IN_SET)) \
auth->authenticate_target = 1; \
else \
auth->authenticate_target = 0; \
\
return count; \
}
#define DEF_TPG_AUTH_STR(name, flags) \
__DEF_TPG_AUTH_STR(tpg_auth, name, flags) \
static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page); \
} \
\
static ssize_t iscsi_tpg_auth_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
return __iscsi_tpg_auth_##name##_store(auth_to_tpg(item), page, count); \
} \
\
CONFIGFS_ATTR(iscsi_tpg_auth_, name);
DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
#define __DEF_TPG_AUTH_INT(prefix, name) \
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
}
#define DEF_TPG_AUTH_INT(name) \
__DEF_TPG_AUTH_INT(tpg_auth, name) \
static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page); \
} \
CONFIGFS_ATTR_RO(iscsi_tpg_auth_, name);
DEF_TPG_AUTH_INT(authenticate_target);
static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
&iscsi_tpg_auth_attr_userid,
&iscsi_tpg_auth_attr_password,
&iscsi_tpg_auth_attr_authenticate_target,
&iscsi_tpg_auth_attr_userid_mutual,
&iscsi_tpg_auth_attr_password_mutual,
NULL,
};
/* End items for lio_target_tpg_auth_cit */
/* Start items for lio_target_tpg_param_cit */
#define DEF_TPG_PARAM(name) \
static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_param *param; \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
param = iscsi_find_param_from_key(__stringify(name), \
tpg->param_list); \
if (!param) { \
iscsit_put_tpg(tpg); \
return -EINVAL; \
} \
rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
\
iscsit_put_tpg(tpg); \
return rb; \
} \
static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
char *buf; \
int ret, len; \
\
buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
if (!buf) \
return -ENOMEM; \
len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
if (isspace(buf[len-1])) \
buf[len-1] = '\0'; /* Kill newline */ \
\
if (iscsit_get_tpg(tpg) < 0) { \
kfree(buf); \
return -EINVAL; \
} \
\
ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
if (ret < 0) \
goto out; \
\
kfree(buf); \
iscsit_put_tpg(tpg); \
return count; \
out: \
kfree(buf); \
iscsit_put_tpg(tpg); \
return -EINVAL; \
} \
CONFIGFS_ATTR(iscsi_tpg_param_, name)
DEF_TPG_PARAM(AuthMethod);
DEF_TPG_PARAM(HeaderDigest);
DEF_TPG_PARAM(DataDigest);
DEF_TPG_PARAM(MaxConnections);
DEF_TPG_PARAM(TargetAlias);
DEF_TPG_PARAM(InitialR2T);
DEF_TPG_PARAM(ImmediateData);
DEF_TPG_PARAM(MaxRecvDataSegmentLength);
DEF_TPG_PARAM(MaxXmitDataSegmentLength);
DEF_TPG_PARAM(MaxBurstLength);
DEF_TPG_PARAM(FirstBurstLength);
DEF_TPG_PARAM(DefaultTime2Wait);
DEF_TPG_PARAM(DefaultTime2Retain);
DEF_TPG_PARAM(MaxOutstandingR2T);
DEF_TPG_PARAM(DataPDUInOrder);
DEF_TPG_PARAM(DataSequenceInOrder);
DEF_TPG_PARAM(ErrorRecoveryLevel);
DEF_TPG_PARAM(IFMarker);
DEF_TPG_PARAM(OFMarker);
DEF_TPG_PARAM(IFMarkInt);
DEF_TPG_PARAM(OFMarkInt);
static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
&iscsi_tpg_param_attr_AuthMethod,
&iscsi_tpg_param_attr_HeaderDigest,
&iscsi_tpg_param_attr_DataDigest,
&iscsi_tpg_param_attr_MaxConnections,
&iscsi_tpg_param_attr_TargetAlias,
&iscsi_tpg_param_attr_InitialR2T,
&iscsi_tpg_param_attr_ImmediateData,
&iscsi_tpg_param_attr_MaxRecvDataSegmentLength,
&iscsi_tpg_param_attr_MaxXmitDataSegmentLength,
&iscsi_tpg_param_attr_MaxBurstLength,
&iscsi_tpg_param_attr_FirstBurstLength,
&iscsi_tpg_param_attr_DefaultTime2Wait,
&iscsi_tpg_param_attr_DefaultTime2Retain,
&iscsi_tpg_param_attr_MaxOutstandingR2T,
&iscsi_tpg_param_attr_DataPDUInOrder,
&iscsi_tpg_param_attr_DataSequenceInOrder,
&iscsi_tpg_param_attr_ErrorRecoveryLevel,
&iscsi_tpg_param_attr_IFMarker,
&iscsi_tpg_param_attr_OFMarker,
&iscsi_tpg_param_attr_IFMarkInt,
&iscsi_tpg_param_attr_OFMarkInt,
NULL,
};
/* End items for lio_target_tpg_param_cit */
/* Start items for lio_target_tpg_cit */
static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item,
char *page)
{
return target_show_dynamic_sessions(to_tpg(item), page);
}
CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions);
static struct configfs_attribute *lio_target_tpg_attrs[] = {
&lio_target_tpg_attr_dynamic_sessions,
NULL,
};
/* End items for lio_target_tpg_cit */
/* Start items for lio_target_tiqn_cit */
static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn,
const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
char *tpgt_str;
int ret;
u16 tpgt;
tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
/*
* Only tpgt_# directory groups can be created below
* target/iscsi/iqn.superturodiskarry/
*/
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return NULL;
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
ret = kstrtou16(tpgt_str, 0, &tpgt);
if (ret)
return NULL;
tpg = iscsit_alloc_portal_group(tiqn, tpgt);
if (!tpg)
return NULL;
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
goto out;
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
name);
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
free_out:
kfree(tpg);
return NULL;
}
static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg,
bool enable)
{
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
int ret;
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return -EINVAL;
if (enable) {
ret = iscsit_tpg_enable_portal_group(tpg);
if (ret < 0)
goto out;
} else {
/*
* iscsit_tpg_disable_portal_group() assumes force=1
*/
ret = iscsit_tpg_disable_portal_group(tpg, 1);
if (ret < 0)
goto out;
}
iscsit_put_tpg(tpg);
return 0;
out:
iscsit_put_tpg(tpg);
return -EINVAL;
}
static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
tpg = to_iscsi_tpg(se_tpg);
tiqn = tpg->tpg_tiqn;
/*
* iscsit_tpg_del_portal_group() assumes force=1
*/
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
iscsit_tpg_del_portal_group(tiqn, tpg, 1);
}
/* End items for lio_target_tiqn_cit */
/* Start LIO-Target TIQN struct contig_item lio_target_cit */
static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
char *page)
{
return sysfs_emit(page, "Datera Inc. iSCSI Target %s\n", ISCSIT_VERSION);
}
CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
static ssize_t lio_target_wwn_cpus_allowed_list_show(
struct config_item *item, char *page)
{
return sysfs_emit(page, "%*pbl\n",
cpumask_pr_args(iscsit_global->allowed_cpumask));
}
static ssize_t lio_target_wwn_cpus_allowed_list_store(
struct config_item *item, const char *page, size_t count)
{
int ret = -ENOMEM;
char *orig;
cpumask_var_t new_allowed_cpumask;
if (!zalloc_cpumask_var(&new_allowed_cpumask, GFP_KERNEL))
goto out;
orig = kstrdup(page, GFP_KERNEL);
if (!orig)
goto out_free_cpumask;
ret = cpulist_parse(orig, new_allowed_cpumask);
if (!ret)
cpumask_copy(iscsit_global->allowed_cpumask,
new_allowed_cpumask);
kfree(orig);
out_free_cpumask:
free_cpumask_var(new_allowed_cpumask);
out:
return ret ? ret : count;
}
CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
static struct configfs_attribute *lio_target_wwn_attrs[] = {
&lio_target_wwn_attr_lio_version,
&lio_target_wwn_attr_cpus_allowed_list,
NULL,
};
static struct se_wwn *lio_target_call_coreaddtiqn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct iscsi_tiqn *tiqn;
tiqn = iscsit_add_tiqn((unsigned char *)name);
if (IS_ERR(tiqn))
return ERR_CAST(tiqn);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
" %s\n", name);
return &tiqn->tiqn_wwn;
}
static void lio_target_add_wwn_groups(struct se_wwn *wwn)
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
&tiqn->tiqn_wwn.fabric_stat_group);
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
&tiqn->tiqn_wwn.fabric_stat_group);
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
&tiqn->tiqn_wwn.fabric_stat_group);
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
&tiqn->tiqn_wwn.fabric_stat_group);
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
"iscsi_logout_stats", &iscsi_stat_logout_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
&tiqn->tiqn_wwn.fabric_stat_group);
}
static void lio_target_call_coredeltiqn(
struct se_wwn *wwn)
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
tiqn->tiqn);
iscsit_del_tiqn(tiqn);
}
/* End LIO-Target TIQN struct contig_lio_target_cit */
/* Start lio_target_discovery_auth_cit */
#define DEF_DISC_AUTH_STR(name, flags) \
__DEF_NACL_AUTH_STR(disc, name, flags) \
static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
{ \
return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl,\
page); \
} \
static ssize_t iscsi_disc_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
return __iscsi_disc_##name##_store(&iscsit_global->discovery_acl, \
page, count); \
\
} \
CONFIGFS_ATTR(iscsi_disc_, name)
DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
#define DEF_DISC_AUTH_INT(name) \
__DEF_NACL_AUTH_INT(disc, name) \
static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
{ \
return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl, \
page); \
} \
CONFIGFS_ATTR_RO(iscsi_disc_, name)
DEF_DISC_AUTH_INT(authenticate_target);
static ssize_t iscsi_disc_enforce_discovery_auth_show(struct config_item *item,
char *page)
{
struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
return sysfs_emit(page, "%d\n", discovery_auth->enforce_discovery_auth);
}
static ssize_t iscsi_disc_enforce_discovery_auth_store(struct config_item *item,
const char *page, size_t count)
{
struct iscsi_param *param;
struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
u32 op;
int err;
err = kstrtou32(page, 0, &op);
if (err)
return -EINVAL;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for enforce_discovery_auth:"
" %u\n", op);
return -EINVAL;
}
if (!discovery_tpg) {
pr_err("iscsit_global->discovery_tpg is NULL\n");
return -EINVAL;
}
param = iscsi_find_param_from_key(AUTHMETHOD,
discovery_tpg->param_list);
if (!param)
return -EINVAL;
if (op) {
/*
* Reset the AuthMethod key to CHAP.
*/
if (iscsi_update_param_value(param, CHAP) < 0)
return -EINVAL;
discovery_tpg->tpg_attrib.authentication = 1;
iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
pr_debug("LIO-CORE[0] Successfully enabled"
" authentication enforcement for iSCSI"
" Discovery TPG\n");
} else {
/*
* Reset the AuthMethod key to CHAP,None
*/
if (iscsi_update_param_value(param, "CHAP,None") < 0)
return -EINVAL;
discovery_tpg->tpg_attrib.authentication = 0;
iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
pr_debug("LIO-CORE[0] Successfully disabled"
" authentication enforcement for iSCSI"
" Discovery TPG\n");
}
return count;
}
CONFIGFS_ATTR(iscsi_disc_, enforce_discovery_auth);
static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
&iscsi_disc_attr_userid,
&iscsi_disc_attr_password,
&iscsi_disc_attr_authenticate_target,
&iscsi_disc_attr_userid_mutual,
&iscsi_disc_attr_password_mutual,
&iscsi_disc_attr_enforce_discovery_auth,
NULL,
};
/* End lio_target_discovery_auth_cit */
/* Start functions for target_core_fabric_ops */
static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
return cmd->i_state;
}
static u32 lio_sess_get_index(struct se_session *se_sess)
{
struct iscsit_session *sess = se_sess->fabric_sess_ptr;
return sess->session_index;
}
static u32 lio_sess_get_initiator_sid(
struct se_session *se_sess,
unsigned char *buf,
u32 size)
{
struct iscsit_session *sess = se_sess->fabric_sess_ptr;
/*
* iSCSI Initiator Session Identifier from RFC-3720.
*/
return snprintf(buf, size, "%6phN", sess->isid);
}
static int lio_queue_data_in(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
struct iscsit_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_DATAIN;
return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
}
static int lio_write_pending(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
struct iscsit_conn *conn = cmd->conn;
if (!cmd->immediate_data && !cmd->unsolicited_data)
return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
return 0;
}
static int lio_queue_status(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
struct iscsit_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
}
return conn->conn_transport->iscsit_queue_status(conn, cmd);
}
static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
static void lio_aborted_task(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
}
static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
}
static int lio_tpg_check_prot_fabric_only(
struct se_portal_group *se_tpg)
{
/*
* Only report fabric_prot_type if t10_pi has also been enabled
* for incoming ib_isert sessions.
*/
if (!to_iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
return 0;
return to_iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
}
/*
* This function calls iscsit_inc_session_usage_count() on the
* struct iscsit_session in question.
*/
static void lio_tpg_close_session(struct se_session *se_sess)
{
struct iscsit_session *sess = se_sess->fabric_sess_ptr;
struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return;
}
iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
struct iscsi_node_acl *acl = to_iscsi_nacl(se_acl);
struct se_portal_group *se_tpg = se_acl->se_tpg;
struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
acl->node_attrib.nacl = acl;
iscsit_set_default_node_attribues(acl, tpg);
}
static int lio_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
}
static void lio_release_cmd(struct se_cmd *se_cmd)
{
struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
iscsit_release_cmd(cmd);
}
const struct target_core_fabric_ops iscsi_ops = {
.module = THIS_MODULE,
.fabric_alias = "iscsi",
.fabric_name = "iSCSI",
.node_acl_size = sizeof(struct iscsi_node_acl),
.tpg_get_wwn = lio_tpg_get_endpoint_wwn,
.tpg_get_tag = lio_tpg_get_tag,
.tpg_get_default_depth = lio_tpg_get_default_depth,
.tpg_check_demo_mode = lio_tpg_check_demo_mode,
.tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect =
lio_tpg_check_demo_mode_write_protect,
.tpg_check_prod_mode_write_protect =
lio_tpg_check_prod_mode_write_protect,
.tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only,
.tpg_get_inst_index = lio_tpg_get_inst_index,
.check_stop_free = lio_check_stop_free,
.release_cmd = lio_release_cmd,
.close_session = lio_tpg_close_session,
.sess_get_index = lio_sess_get_index,
.sess_get_initiator_sid = lio_sess_get_initiator_sid,
.write_pending = lio_write_pending,
.set_default_node_attributes = lio_set_default_node_attributes,
.get_cmd_state = iscsi_get_cmd_state,
.queue_data_in = lio_queue_data_in,
.queue_status = lio_queue_status,
.queue_tm_rsp = lio_queue_tm_rsp,
.aborted_task = lio_aborted_task,
.fabric_make_wwn = lio_target_call_coreaddtiqn,
.fabric_drop_wwn = lio_target_call_coredeltiqn,
.add_wwn_groups = lio_target_add_wwn_groups,
.fabric_make_tpg = lio_target_tiqn_addtpg,
.fabric_enable_tpg = lio_target_tiqn_enabletpg,
.fabric_drop_tpg = lio_target_tiqn_deltpg,
.fabric_make_np = lio_target_call_addnptotpg,
.fabric_drop_np = lio_target_call_delnpfromtpg,
.fabric_init_nodeacl = lio_target_init_nodeacl,
.tfc_discovery_attrs = lio_target_discovery_auth_attrs,
.tfc_wwn_attrs = lio_target_wwn_attrs,
.tfc_tpg_base_attrs = lio_target_tpg_attrs,
.tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs,
.tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs,
.tfc_tpg_param_attrs = lio_target_tpg_param_attrs,
.tfc_tpg_np_base_attrs = lio_target_portal_attrs,
.tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs,
.tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
.tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
.tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
.write_pending_must_be_called = true,
};
|
linux-master
|
drivers/target/iscsi/iscsi_target_configfs.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to iSCSI DataSequenceInOrder=No
* and DataPDUInOrder=No.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/random.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsit_cmd *cmd)
{
int i;
struct iscsi_seq *seq;
pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
cmd->init_task_tag);
for (i = 0; i < cmd->seq_count; i++) {
seq = &cmd->seq_list[i];
pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
" offset: %d, xfer_len: %d, seq_send_order: %d,"
" seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
seq->offset, seq->xfer_len, seq->seq_send_order,
seq->seq_no);
}
}
static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd)
{
int i;
struct iscsi_pdu *pdu;
pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
cmd->init_task_tag);
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
pr_debug("i: %d, offset: %d, length: %d,"
" pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
pdu->length, pdu->pdu_send_order, pdu->seq_no);
}
}
#else
static void iscsit_dump_seq_list(struct iscsit_cmd *cmd) {}
static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd) {}
#endif
static void iscsit_ordered_seq_lists(
struct iscsit_cmd *cmd,
u8 type)
{
u32 i, seq_count = 0;
for (i = 0; i < cmd->seq_count; i++) {
if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
continue;
cmd->seq_list[i].seq_send_order = seq_count++;
}
}
static void iscsit_ordered_pdu_lists(
struct iscsit_cmd *cmd,
u8 type)
{
u32 i, pdu_send_order = 0, seq_no = 0;
for (i = 0; i < cmd->pdu_count; i++) {
redo:
if (cmd->pdu_list[i].seq_no == seq_no) {
cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
continue;
}
seq_no++;
pdu_send_order = 0;
goto redo;
}
}
/*
* Generate count random values into array.
* Use 0x80000000 to mark generates valued in array[].
*/
static void iscsit_create_random_array(u32 *array, u32 count)
{
int i, j, k;
if (count == 1) {
array[0] = 0;
return;
}
for (i = 0; i < count; i++) {
redo:
get_random_bytes(&j, sizeof(u32));
j = (1 + (int) (9999 + 1) - j) % count;
for (k = 0; k < i + 1; k++) {
j |= 0x80000000;
if ((array[k] & 0x80000000) && (array[k] == j))
goto redo;
}
array[i] = j;
}
for (i = 0; i < count; i++)
array[i] &= ~0x80000000;
}
static int iscsit_randomize_pdu_lists(
struct iscsit_cmd *cmd,
u8 type)
{
int i = 0;
u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
redo:
if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
seq_count++;
continue;
}
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory"
" for random array.\n");
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
for (i = 0; i < seq_count; i++)
cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
kfree(array);
seq_offset += seq_count;
seq_count = 0;
seq_no++;
goto redo;
}
if (seq_count) {
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for"
" random array.\n");
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
for (i = 0; i < seq_count; i++)
cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
kfree(array);
}
return 0;
}
static int iscsit_randomize_seq_lists(
struct iscsit_cmd *cmd,
u8 type)
{
int i, j = 0;
u32 *array, seq_count = cmd->seq_count;
if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
seq_count--;
else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
seq_count -= 2;
if (!seq_count)
return 0;
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for random array.\n");
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
for (i = 0; i < cmd->seq_count; i++) {
if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
continue;
cmd->seq_list[i].seq_send_order = array[j++];
}
kfree(array);
return 0;
}
static void iscsit_determine_counts_for_list(
struct iscsit_cmd *cmd,
struct iscsi_build_list *bl,
u32 *seq_count,
u32 *pdu_count)
{
int check_immediate = 0;
u32 burstlength = 0, offset = 0;
u32 unsolicited_data_length = 0;
u32 mdsl;
struct iscsit_conn *conn = cmd->conn;
if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
else
mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
if ((bl->type == PDULIST_IMMEDIATE) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
check_immediate = 1;
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
unsolicited_data_length = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
while (offset < cmd->se_cmd.data_length) {
*pdu_count += 1;
if (check_immediate) {
check_immediate = 0;
offset += bl->immediate_data_length;
*seq_count += 1;
if (unsolicited_data_length)
unsolicited_data_length -=
bl->immediate_data_length;
continue;
}
if (unsolicited_data_length > 0) {
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
unsolicited_data_length -=
(cmd->se_cmd.data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((offset + mdsl)
>= conn->sess->sess_ops->FirstBurstLength) {
unsolicited_data_length -=
(conn->sess->sess_ops->FirstBurstLength -
offset);
offset += (conn->sess->sess_ops->FirstBurstLength -
offset);
burstlength = 0;
*seq_count += 1;
continue;
}
offset += mdsl;
unsolicited_data_length -= mdsl;
continue;
}
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((burstlength + mdsl) >=
conn->sess->sess_ops->MaxBurstLength) {
offset += (conn->sess->sess_ops->MaxBurstLength -
burstlength);
burstlength = 0;
*seq_count += 1;
continue;
}
burstlength += mdsl;
offset += mdsl;
}
}
/*
* Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
* or DataPDUInOrder=No.
*/
static int iscsit_do_build_pdu_and_seq_lists(
struct iscsit_cmd *cmd,
struct iscsi_build_list *bl)
{
int check_immediate = 0, datapduinorder, datasequenceinorder;
u32 burstlength = 0, offset = 0, i = 0, mdsl;
u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = cmd->pdu_list;
struct iscsi_seq *seq = cmd->seq_list;
if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
else
mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
if ((bl->type == PDULIST_IMMEDIATE) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
check_immediate = 1;
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
unsolicited_data_length = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
while (offset < cmd->se_cmd.data_length) {
pdu_count++;
if (!datapduinorder) {
pdu[i].offset = offset;
pdu[i].seq_no = seq_no;
}
if (!datasequenceinorder && (pdu_count == 1)) {
seq[seq_no].pdu_start = i;
seq[seq_no].seq_no = seq_no;
seq[seq_no].offset = offset;
seq[seq_no].orig_offset = offset;
}
if (check_immediate) {
check_immediate = 0;
if (!datapduinorder) {
pdu[i].type = PDUTYPE_IMMEDIATE;
pdu[i++].length = bl->immediate_data_length;
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_IMMEDIATE;
seq[seq_no].pdu_count = 1;
seq[seq_no].xfer_len =
bl->immediate_data_length;
}
offset += bl->immediate_data_length;
pdu_count = 0;
seq_no++;
if (unsolicited_data_length)
unsolicited_data_length -=
bl->immediate_data_length;
continue;
}
if (unsolicited_data_length > 0) {
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i].length =
(cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_UNSOLICITED;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(cmd->se_cmd.data_length - offset));
}
unsolicited_data_length -=
(cmd->se_cmd.data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((offset + mdsl) >=
conn->sess->sess_ops->FirstBurstLength) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i++].length =
(conn->sess->sess_ops->FirstBurstLength -
offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_UNSOLICITED;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(conn->sess->sess_ops->FirstBurstLength -
offset));
}
unsolicited_data_length -=
(conn->sess->sess_ops->FirstBurstLength -
offset);
offset += (conn->sess->sess_ops->FirstBurstLength -
offset);
burstlength = 0;
pdu_count = 0;
seq_no++;
continue;
}
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i++].length = mdsl;
}
burstlength += mdsl;
offset += mdsl;
unsolicited_data_length -= mdsl;
continue;
}
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i].length = (cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_NORMAL;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(cmd->se_cmd.data_length - offset));
}
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((burstlength + mdsl) >=
conn->sess->sess_ops->MaxBurstLength) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i++].length =
(conn->sess->sess_ops->MaxBurstLength -
burstlength);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_NORMAL;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(conn->sess->sess_ops->MaxBurstLength -
burstlength));
}
offset += (conn->sess->sess_ops->MaxBurstLength -
burstlength);
burstlength = 0;
pdu_count = 0;
seq_no++;
continue;
}
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i++].length = mdsl;
}
burstlength += mdsl;
offset += mdsl;
}
if (!datasequenceinorder) {
if (bl->data_direction & ISCSI_PDU_WRITE) {
if (bl->randomize & RANDOM_R2T_OFFSETS) {
if (iscsit_randomize_seq_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_seq_lists(cmd, bl->type);
} else if (bl->data_direction & ISCSI_PDU_READ) {
if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
if (iscsit_randomize_seq_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_seq_lists(cmd, bl->type);
}
iscsit_dump_seq_list(cmd);
}
if (!datapduinorder) {
if (bl->data_direction & ISCSI_PDU_WRITE) {
if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
if (iscsit_randomize_pdu_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_pdu_lists(cmd, bl->type);
} else if (bl->data_direction & ISCSI_PDU_READ) {
if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
if (iscsit_randomize_pdu_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_pdu_lists(cmd, bl->type);
}
iscsit_dump_pdu_list(cmd);
}
return 0;
}
int iscsit_build_pdu_and_seq_lists(
struct iscsit_cmd *cmd,
u32 immediate_data_length)
{
struct iscsi_build_list bl;
u32 pdu_count = 0, seq_count = 1;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na;
/*
* Do nothing if no OOO shenanigans
*/
if (sess->sess_ops->DataSequenceInOrder &&
sess->sess_ops->DataPDUInOrder)
return 0;
if (cmd->data_direction == DMA_NONE)
return 0;
na = iscsit_tpg_get_node_attrib(sess);
memset(&bl, 0, sizeof(struct iscsi_build_list));
if (cmd->data_direction == DMA_FROM_DEVICE) {
bl.data_direction = ISCSI_PDU_READ;
bl.type = PDULIST_NORMAL;
if (na->random_datain_pdu_offsets)
bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
if (na->random_datain_seq_offsets)
bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
} else {
bl.data_direction = ISCSI_PDU_WRITE;
bl.immediate_data_length = immediate_data_length;
if (na->random_r2t_offsets)
bl.randomize |= RANDOM_R2T_OFFSETS;
if (!cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_NORMAL;
else if (cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE;
else if (!cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_UNSOLICITED;
else if (cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
}
iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
if (!conn->sess->sess_ops->DataSequenceInOrder) {
seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
if (!seq) {
pr_err("Unable to allocate struct iscsi_seq list\n");
return -ENOMEM;
}
cmd->seq_list = seq;
cmd->seq_count = seq_count;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
if (!pdu) {
pr_err("Unable to allocate struct iscsi_pdu list.\n");
kfree(seq);
return -ENOMEM;
}
cmd->pdu_list = pdu;
cmd->pdu_count = pdu_count;
}
return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
}
struct iscsi_pdu *iscsit_get_pdu_holder(
struct iscsit_cmd *cmd,
u32 offset,
u32 length)
{
u32 i;
struct iscsi_pdu *pdu = NULL;
if (!cmd->pdu_list) {
pr_err("struct iscsit_cmd->pdu_list is NULL!\n");
return NULL;
}
pdu = &cmd->pdu_list[0];
for (i = 0; i < cmd->pdu_count; i++)
if ((pdu[i].offset == offset) && (pdu[i].length == length))
return &pdu[i];
pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
" %u, Length: %u\n", cmd->init_task_tag, offset, length);
return NULL;
}
struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
struct iscsit_cmd *cmd,
struct iscsi_seq *seq)
{
u32 i;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
if (!cmd->pdu_list) {
pr_err("struct iscsit_cmd->pdu_list is NULL!\n");
return NULL;
}
if (conn->sess->sess_ops->DataSequenceInOrder) {
redo:
pdu = &cmd->pdu_list[cmd->pdu_start];
for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
"_send_order: %d, pdu[i].offset: %d,"
" pdu[i].length: %d\n", pdu[i].seq_no,
pdu[i].pdu_send_order, pdu[i].offset,
pdu[i].length);
if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
cmd->pdu_send_order++;
return &pdu[i];
}
}
cmd->pdu_start += cmd->pdu_send_order;
cmd->pdu_send_order = 0;
cmd->seq_no++;
if (cmd->pdu_start < cmd->pdu_count)
goto redo;
pr_err("Command ITT: 0x%08x unable to locate"
" struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
cmd->init_task_tag, cmd->pdu_send_order);
return NULL;
} else {
if (!seq) {
pr_err("struct iscsi_seq is NULL!\n");
return NULL;
}
pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
seq->seq_no);
pdu = &cmd->pdu_list[seq->pdu_start];
if (seq->pdu_send_order == seq->pdu_count) {
pr_err("Command ITT: 0x%08x seq->pdu_send"
"_order: %u equals seq->pdu_count: %u\n",
cmd->init_task_tag, seq->pdu_send_order,
seq->pdu_count);
return NULL;
}
for (i = 0; i < seq->pdu_count; i++) {
if (pdu[i].pdu_send_order == seq->pdu_send_order) {
seq->pdu_send_order++;
return &pdu[i];
}
}
pr_err("Command ITT: 0x%08x unable to locate iscsi"
"_pdu_t for seq->pdu_send_order: %u.\n",
cmd->init_task_tag, seq->pdu_send_order);
return NULL;
}
return NULL;
}
struct iscsi_seq *iscsit_get_seq_holder(
struct iscsit_cmd *cmd,
u32 offset,
u32 length)
{
u32 i;
if (!cmd->seq_list) {
pr_err("struct iscsit_cmd->seq_list is NULL!\n");
return NULL;
}
for (i = 0; i < cmd->seq_count; i++) {
pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
"xfer_len: %d, seq_list[i].seq_no %u\n",
cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
cmd->seq_list[i].seq_no);
if ((cmd->seq_list[i].orig_offset +
cmd->seq_list[i].xfer_len) >=
(offset + length))
return &cmd->seq_list[i];
}
pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
" Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
length);
return NULL;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/ctype.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <net/sock.h>
#include <trace/events/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_login.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_auth.h"
#define MAX_LOGIN_PDUS 7
void convert_null_to_semi(char *buf, int len)
{
int i;
for (i = 0; i < len; i++)
if (buf[i] == '\0')
buf[i] = ';';
}
static int strlen_semi(char *buf)
{
int i = 0;
while (buf[i] != '\0') {
if (buf[i] == ';')
return i;
i++;
}
return -1;
}
int extract_param(
const char *in_buf,
const char *pattern,
unsigned int max_length,
char *out_buf,
unsigned char *type)
{
char *ptr;
int len;
if (!in_buf || !pattern || !out_buf || !type)
return -EINVAL;
ptr = strstr(in_buf, pattern);
if (!ptr)
return -ENOENT;
ptr = strstr(ptr, "=");
if (!ptr)
return -EINVAL;
ptr += 1;
if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
ptr += 2; /* skip 0x */
*type = HEX;
} else if (*ptr == '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) {
ptr += 2; /* skip 0b */
*type = BASE64;
} else
*type = DECIMAL;
len = strlen_semi(ptr);
if (len < 0)
return -EINVAL;
if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
return -EINVAL;
}
memcpy(out_buf, ptr, len);
out_buf[len] = '\0';
return 0;
}
static struct iscsi_node_auth *iscsi_get_node_auth(struct iscsit_conn *conn)
{
struct iscsi_portal_group *tpg;
struct iscsi_node_acl *nacl;
struct se_node_acl *se_nacl;
if (conn->sess->sess_ops->SessionType)
return &iscsit_global->discovery_acl.node_auth;
se_nacl = conn->sess->se_sess->se_node_acl;
if (!se_nacl) {
pr_err("Unable to locate struct se_node_acl for CHAP auth\n");
return NULL;
}
if (se_nacl->dynamic_node_acl) {
tpg = to_iscsi_tpg(se_nacl->se_tpg);
return &tpg->tpg_demo_auth;
}
nacl = to_iscsi_nacl(se_nacl);
return &nacl->node_auth;
}
static u32 iscsi_handle_authentication(
struct iscsit_conn *conn,
char *in_buf,
char *out_buf,
int in_length,
int *out_length,
unsigned char *authtype)
{
struct iscsi_node_auth *auth;
auth = iscsi_get_node_auth(conn);
if (!auth)
return -1;
if (strstr("CHAP", authtype))
strcpy(conn->sess->auth_type, "CHAP");
else
strcpy(conn->sess->auth_type, NONE);
if (strstr("None", authtype))
return 1;
else if (strstr("CHAP", authtype))
return chap_main_loop(conn, auth, in_buf, out_buf,
&in_length, out_length);
/* SRP, SPKM1, SPKM2 and KRB5 are unsupported */
return 2;
}
static void iscsi_remove_failed_auth_entry(struct iscsit_conn *conn)
{
kfree(conn->auth_protocol);
}
int iscsi_target_check_login_request(
struct iscsit_conn *conn,
struct iscsi_login *login)
{
int req_csg, req_nsg;
u32 payload_length;
struct iscsi_login_req *login_req;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
switch (login_req->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
break;
default:
pr_err("Received unknown opcode 0x%02x.\n",
login_req->opcode & ISCSI_OPCODE_MASK);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
" and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags);
if (req_csg != login->current_stage) {
pr_err("Initiator unexpectedly changed login stage"
" from %d to %d, login failed.\n", login->current_stage,
req_csg);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if ((req_nsg == 2) || (req_csg >= 2) ||
((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
(req_nsg <= req_csg))) {
pr_err("Illegal login_req->flags Combination, CSG: %d,"
" NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if ((login_req->max_version != login->version_max) ||
(login_req->min_version != login->version_min)) {
pr_err("Login request changed Version Max/Nin"
" unexpectedly to 0x%02x/0x%02x, protocol error\n",
login_req->max_version, login_req->min_version);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (memcmp(login_req->isid, login->isid, 6) != 0) {
pr_err("Login request changed ISID unexpectedly,"
" protocol error.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (login_req->itt != login->init_task_tag) {
pr_err("Login request changed ITT unexpectedly to"
" 0x%08x, protocol error.\n", login_req->itt);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (payload_length > MAX_KEY_VALUE_PAIRS) {
pr_err("Login request payload exceeds default"
" MaxRecvDataSegmentLength: %u, protocol error.\n",
MAX_KEY_VALUE_PAIRS);
return -1;
}
return 0;
}
EXPORT_SYMBOL(iscsi_target_check_login_request);
static int iscsi_target_check_first_request(
struct iscsit_conn *conn,
struct iscsi_login *login)
{
struct iscsi_param *param = NULL;
struct se_node_acl *se_nacl;
login->first_request = 0;
list_for_each_entry(param, &conn->param_list->param_list, p_list) {
if (!strncmp(param->name, SESSIONTYPE, 11)) {
if (!IS_PSTATE_ACCEPTOR(param)) {
pr_err("SessionType key not received"
" in first login request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
}
if (!strncmp(param->value, DISCOVERY, 9))
return 0;
}
if (!strncmp(param->name, INITIATORNAME, 13)) {
if (!IS_PSTATE_ACCEPTOR(param)) {
if (!login->leading_connection)
continue;
pr_err("InitiatorName key not received"
" in first login request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
}
/*
* For non-leading connections, double check that the
* received InitiatorName matches the existing session's
* struct iscsi_node_acl.
*/
if (!login->leading_connection) {
se_nacl = conn->sess->se_sess->se_node_acl;
if (!se_nacl) {
pr_err("Unable to locate"
" struct se_node_acl\n");
iscsit_tx_login_rsp(conn,
ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
return -1;
}
if (strcmp(param->value,
se_nacl->initiatorname)) {
pr_err("Incorrect"
" InitiatorName: %s for this"
" iSCSI Initiator Node.\n",
param->value);
iscsit_tx_login_rsp(conn,
ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
return -1;
}
}
}
}
return 0;
}
static int iscsi_target_do_tx_login_io(struct iscsit_conn *conn, struct iscsi_login *login)
{
u32 padding = 0;
struct iscsi_login_rsp *login_rsp;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
hton24(login_rsp->dlength, login->rsp_length);
memcpy(login_rsp->isid, login->isid, 6);
login_rsp->tsih = cpu_to_be16(login->tsih);
login_rsp->itt = login->init_task_tag;
login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
login_rsp->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
" %u\n", login_rsp->flags, (__force u32)login_rsp->itt,
ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
ntohl(login_rsp->statsn), login->rsp_length);
padding = ((-login->rsp_length) & 3);
/*
* Before sending the last login response containing the transition
* bit for full-feature-phase, go ahead and start up TX/RX threads
* now to avoid potential resource allocation failures after the
* final login response has been sent.
*/
if (login->login_complete) {
int rc = iscsit_start_kthreads(conn);
if (rc) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
}
if (conn->conn_transport->iscsit_put_login_tx(conn, login,
login->rsp_length + padding) < 0)
goto err;
login->rsp_length = 0;
return 0;
err:
if (login->login_complete) {
if (conn->rx_thread && conn->rx_thread_active) {
send_sig(SIGINT, conn->rx_thread, 1);
complete(&conn->rx_login_comp);
kthread_stop(conn->rx_thread);
}
if (conn->tx_thread && conn->tx_thread_active) {
send_sig(SIGINT, conn->tx_thread, 1);
kthread_stop(conn->tx_thread);
}
spin_lock(&iscsit_global->ts_bitmap_lock);
bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
get_order(1));
spin_unlock(&iscsit_global->ts_bitmap_lock);
}
return -1;
}
static void iscsi_target_sk_data_ready(struct sock *sk)
{
struct iscsit_conn *conn = sk->sk_user_data;
bool rc;
trace_sk_data_ready(sk);
pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
write_lock_bh(&sk->sk_callback_lock);
if (!sk->sk_user_data) {
write_unlock_bh(&sk->sk_callback_lock);
return;
}
if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
write_unlock_bh(&sk->sk_callback_lock);
pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn);
return;
}
if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
write_unlock_bh(&sk->sk_callback_lock);
pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn);
return;
}
if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
write_unlock_bh(&sk->sk_callback_lock);
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
if (iscsi_target_sk_data_ready == conn->orig_data_ready)
return;
conn->orig_data_ready(sk);
return;
}
rc = schedule_delayed_work(&conn->login_work, 0);
if (!rc) {
pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
" got false\n");
}
write_unlock_bh(&sk->sk_callback_lock);
}
static void iscsi_target_sk_state_change(struct sock *);
static void iscsi_target_set_sock_callbacks(struct iscsit_conn *conn)
{
struct sock *sk;
if (!conn->sock)
return;
sk = conn->sock->sk;
pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn);
write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = conn;
conn->orig_data_ready = sk->sk_data_ready;
conn->orig_state_change = sk->sk_state_change;
sk->sk_data_ready = iscsi_target_sk_data_ready;
sk->sk_state_change = iscsi_target_sk_state_change;
write_unlock_bh(&sk->sk_callback_lock);
sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
}
static void iscsi_target_restore_sock_callbacks(struct iscsit_conn *conn)
{
struct sock *sk;
if (!conn->sock)
return;
sk = conn->sock->sk;
pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn);
write_lock_bh(&sk->sk_callback_lock);
if (!sk->sk_user_data) {
write_unlock_bh(&sk->sk_callback_lock);
return;
}
sk->sk_user_data = NULL;
sk->sk_data_ready = conn->orig_data_ready;
sk->sk_state_change = conn->orig_state_change;
write_unlock_bh(&sk->sk_callback_lock);
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
}
static int iscsi_target_do_login(struct iscsit_conn *, struct iscsi_login *);
static bool __iscsi_target_sk_check_close(struct sock *sk)
{
if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
"returning TRUE\n");
return true;
}
return false;
}
static bool iscsi_target_sk_check_close(struct iscsit_conn *conn)
{
bool state = false;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
read_lock_bh(&sk->sk_callback_lock);
state = (__iscsi_target_sk_check_close(sk) ||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
read_unlock_bh(&sk->sk_callback_lock);
}
return state;
}
static bool iscsi_target_sk_check_flag(struct iscsit_conn *conn, unsigned int flag)
{
bool state = false;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
read_lock_bh(&sk->sk_callback_lock);
state = test_bit(flag, &conn->login_flags);
read_unlock_bh(&sk->sk_callback_lock);
}
return state;
}
static bool iscsi_target_sk_check_and_clear(struct iscsit_conn *conn, unsigned int flag)
{
bool state = false;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
write_lock_bh(&sk->sk_callback_lock);
state = (__iscsi_target_sk_check_close(sk) ||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
if (!state)
clear_bit(flag, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
}
return state;
}
static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login *login)
{
bool zero_tsih = login->zero_tsih;
iscsi_remove_failed_auth_entry(conn);
iscsi_target_nego_release(conn);
iscsi_target_login_sess_out(conn, zero_tsih, true);
}
static void iscsi_target_do_login_rx(struct work_struct *work)
{
struct iscsit_conn *conn = container_of(work,
struct iscsit_conn, login_work.work);
struct iscsi_login *login = conn->login;
struct iscsi_np *np = login->np;
struct iscsi_portal_group *tpg = conn->tpg;
struct iscsi_tpg_np *tpg_np = conn->tpg_np;
int rc, zero_tsih = login->zero_tsih;
bool state;
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
conn, current->comm, current->pid);
spin_lock(&conn->login_worker_lock);
set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags);
spin_unlock(&conn->login_worker_lock);
/*
* If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
* before initial PDU processing in iscsi_target_start_negotiation()
* has completed, go ahead and retry until it's cleared.
*
* Otherwise if the TCP connection drops while this is occuring,
* iscsi_target_start_negotiation() will detect the failure, call
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
* remaining iscsi connection resources from iscsi_np process context.
*/
if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
return;
}
spin_lock(&tpg->tpg_state_lock);
state = (tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&tpg->tpg_state_lock);
if (!state) {
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
goto err;
}
if (iscsi_target_sk_check_close(conn)) {
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
goto err;
}
allow_signal(SIGINT);
rc = iscsit_set_login_timer_kworker(conn, current);
if (rc < 0) {
/* The login timer has already expired */
pr_debug("iscsi_target_do_login_rx, login failed\n");
goto err;
}
rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
flush_signals(current);
if (rc < 0)
goto err;
pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
conn, current->comm, current->pid);
/*
* LOGIN_FLAGS_READ_ACTIVE is cleared so that sk_data_ready
* could be triggered again after this.
*
* LOGIN_FLAGS_WRITE_ACTIVE is cleared after we successfully
* process a login PDU, so that sk_state_chage can do login
* cleanup as needed if the socket is closed. If a delayed work is
* ongoing (LOGIN_FLAGS_WRITE_ACTIVE or LOGIN_FLAGS_READ_ACTIVE),
* sk_state_change will leave the cleanup to the delayed work or
* it will schedule a delayed work to do cleanup.
*/
if (conn->sock) {
struct sock *sk = conn->sock->sk;
write_lock_bh(&sk->sk_callback_lock);
if (!test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags)) {
clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
set_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags);
}
write_unlock_bh(&sk->sk_callback_lock);
}
rc = iscsi_target_do_login(conn, login);
if (rc < 0) {
goto err;
} else if (!rc) {
if (iscsi_target_sk_check_and_clear(conn,
LOGIN_FLAGS_WRITE_ACTIVE))
goto err;
/*
* Set the login timer thread pointer to NULL to prevent the
* login process from getting stuck if the initiator
* stops sending data.
*/
rc = iscsit_set_login_timer_kworker(conn, NULL);
if (rc < 0)
goto err;
} else if (rc == 1) {
iscsit_stop_login_timer(conn);
cancel_delayed_work(&conn->login_work);
iscsi_target_nego_release(conn);
iscsi_post_login_handler(np, conn, zero_tsih);
iscsit_deaccess_np(np, tpg, tpg_np);
}
return;
err:
iscsi_target_restore_sock_callbacks(conn);
iscsit_stop_login_timer(conn);
cancel_delayed_work(&conn->login_work);
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
}
static void iscsi_target_sk_state_change(struct sock *sk)
{
struct iscsit_conn *conn;
void (*orig_state_change)(struct sock *);
bool state;
pr_debug("Entering iscsi_target_sk_state_change\n");
write_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
write_unlock_bh(&sk->sk_callback_lock);
return;
}
orig_state_change = conn->orig_state_change;
if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n",
conn);
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
return;
}
state = __iscsi_target_sk_check_close(sk);
pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags) ||
test_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_{READ|WRITE}_ACTIVE=1"
" sk_state_change conn: %p\n", conn);
if (state)
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
return;
}
if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
conn);
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
return;
}
/*
* If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
* but only queue conn->login_work -> iscsi_target_do_login_rx()
* processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
*
* When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
* will detect the dropped TCP connection from delayed workqueue context.
*
* If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
* iscsi_target_start_negotiation() is running, iscsi_target_do_login()
* via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
* via iscsi_target_sk_check_and_clear() is responsible for detecting the
* dropped TCP connection in iscsi_np process context, and cleaning up
* the remaining iscsi connection resources.
*/
if (state) {
pr_debug("iscsi_target_sk_state_change got failed state\n");
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
if (!state)
schedule_delayed_work(&conn->login_work, 0);
return;
}
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
}
/*
* NOTE: We check for existing sessions or connections AFTER the initiator
* has been successfully authenticated in order to protect against faked
* ISID/TSIH combinations.
*/
static int iscsi_target_check_for_existing_instances(
struct iscsit_conn *conn,
struct iscsi_login *login)
{
if (login->checked_for_existing)
return 0;
login->checked_for_existing = 1;
if (!login->tsih)
return iscsi_check_for_session_reinstatement(conn);
else
return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
login->initial_exp_statsn);
}
static int iscsi_target_do_authentication(
struct iscsit_conn *conn,
struct iscsi_login *login)
{
int authret;
u32 payload_length;
struct iscsi_param *param;
struct iscsi_login_req *login_req;
struct iscsi_login_rsp *login_rsp;
login_req = (struct iscsi_login_req *) login->req;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
payload_length = ntoh24(login_req->dlength);
param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
if (!param)
return -1;
authret = iscsi_handle_authentication(
conn,
login->req_buf,
login->rsp_buf,
payload_length,
&login->rsp_length,
param->value);
switch (authret) {
case 0:
pr_debug("Received OK response"
" from LIO Authentication, continuing.\n");
break;
case 1:
pr_debug("iSCSI security negotiation"
" completed successfully.\n");
login->auth_complete = 1;
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
ISCSI_FLAG_LOGIN_TRANSIT);
login->current_stage = 1;
}
return iscsi_target_check_for_existing_instances(
conn, login);
case 2:
pr_err("Security negotiation"
" failed.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
default:
pr_err("Received unknown error %d from LIO"
" Authentication\n", authret);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_TARGET_ERROR);
return -1;
}
return 0;
}
bool iscsi_conn_auth_required(struct iscsit_conn *conn)
{
struct iscsi_node_acl *nacl;
struct se_node_acl *se_nacl;
if (conn->sess->sess_ops->SessionType) {
/*
* For SessionType=Discovery
*/
return conn->tpg->tpg_attrib.authentication;
}
/*
* For SessionType=Normal
*/
se_nacl = conn->sess->se_sess->se_node_acl;
if (!se_nacl) {
pr_debug("Unknown ACL is trying to connect\n");
return true;
}
if (se_nacl->dynamic_node_acl) {
pr_debug("Dynamic ACL %s is trying to connect\n",
se_nacl->initiatorname);
return conn->tpg->tpg_attrib.authentication;
}
pr_debug("Known ACL %s is trying to connect\n",
se_nacl->initiatorname);
nacl = to_iscsi_nacl(se_nacl);
if (nacl->node_attrib.authentication == NA_AUTHENTICATION_INHERITED)
return conn->tpg->tpg_attrib.authentication;
return nacl->node_attrib.authentication;
}
static int iscsi_target_handle_csg_zero(
struct iscsit_conn *conn,
struct iscsi_login *login)
{
int ret;
u32 payload_length;
struct iscsi_param *param;
struct iscsi_login_req *login_req;
struct iscsi_login_rsp *login_rsp;
login_req = (struct iscsi_login_req *) login->req;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
payload_length = ntoh24(login_req->dlength);
param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
if (!param)
return -1;
ret = iscsi_decode_text_input(
PHASE_SECURITY|PHASE_DECLARATIVE,
SENDER_INITIATOR|SENDER_RECEIVER,
login->req_buf,
payload_length,
conn);
if (ret < 0)
return -1;
if (ret > 0) {
if (login->auth_complete) {
pr_err("Initiator has already been"
" successfully authenticated, but is still"
" sending %s keys.\n", param->value);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
goto do_auth;
} else if (!payload_length) {
pr_err("Initiator sent zero length security payload,"
" login failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
}
if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0)
return -1;
ret = iscsi_encode_text_output(
PHASE_SECURITY|PHASE_DECLARATIVE,
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
conn->param_list,
conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0)
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
bool auth_required = iscsi_conn_auth_required(conn);
if (auth_required) {
if (!strncmp(param->value, NONE, 4)) {
pr_err("Initiator sent AuthMethod=None but"
" Target is enforcing iSCSI Authentication,"
" login failed.\n");
iscsit_tx_login_rsp(conn,
ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
}
if (!login->auth_complete)
return 0;
if (strncmp(param->value, NONE, 4) &&
!login->auth_complete)
return 0;
}
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
ISCSI_FLAG_LOGIN_TRANSIT;
login->current_stage = 1;
}
}
return 0;
do_auth:
return iscsi_target_do_authentication(conn, login);
}
static bool iscsi_conn_authenticated(struct iscsit_conn *conn,
struct iscsi_login *login)
{
if (!iscsi_conn_auth_required(conn))
return true;
if (login->auth_complete)
return true;
return false;
}
static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login)
{
int ret;
u32 payload_length;
struct iscsi_login_req *login_req;
struct iscsi_login_rsp *login_rsp;
login_req = (struct iscsi_login_req *) login->req;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
payload_length = ntoh24(login_req->dlength);
ret = iscsi_decode_text_input(
PHASE_OPERATIONAL|PHASE_DECLARATIVE,
SENDER_INITIATOR|SENDER_RECEIVER,
login->req_buf,
payload_length,
conn);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0)
return -1;
if (iscsi_target_check_for_existing_instances(conn, login) < 0)
return -1;
ret = iscsi_encode_text_output(
PHASE_OPERATIONAL|PHASE_DECLARATIVE,
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
conn->param_list,
conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (!iscsi_conn_authenticated(conn, login)) {
pr_err("Initiator is requesting CSG: 1, has not been"
" successfully authenticated, and the Target is"
" enforcing iSCSI Authentication, login failed.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
}
if (!iscsi_check_negotiated_keys(conn->param_list))
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
ISCSI_FLAG_LOGIN_TRANSIT;
return 0;
}
/*
* RETURN VALUE:
*
* 1 = Login successful
* -1 = Login failed
* 0 = More PDU exchanges required
*/
static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login)
{
int pdu_count = 0;
struct iscsi_login_req *login_req;
struct iscsi_login_rsp *login_rsp;
login_req = (struct iscsi_login_req *) login->req;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
while (1) {
if (++pdu_count > MAX_LOGIN_PDUS) {
pr_err("MAX_LOGIN_PDUS count reached.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_TARGET_ERROR);
return -1;
}
switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) {
case 0:
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK;
if (iscsi_target_handle_csg_zero(conn, login) < 0)
return -1;
break;
case 1:
login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
if (iscsi_target_handle_csg_one(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
/*
* Check to make sure the TCP connection has not
* dropped asynchronously while session reinstatement
* was occuring in this kthread context, before
* transitioning to full feature phase operation.
*/
if (iscsi_target_sk_check_close(conn))
return -1;
login->tsih = conn->sess->tsih;
login->login_complete = 1;
iscsi_target_restore_sock_callbacks(conn);
if (iscsi_target_do_tx_login_io(conn,
login) < 0)
return -1;
return 1;
}
break;
default:
pr_err("Illegal CSG: %d received from"
" Initiator, protocol error.\n",
ISCSI_LOGIN_CURRENT_STAGE(login_req->flags));
break;
}
if (iscsi_target_do_tx_login_io(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
}
break;
}
return 0;
}
static void iscsi_initiatorname_tolower(
char *param_buf)
{
char *c;
u32 iqn_size = strlen(param_buf), i;
for (i = 0; i < iqn_size; i++) {
c = ¶m_buf[i];
if (!isupper(*c))
continue;
*c = tolower(*c);
}
}
/*
* Processes the first Login Request..
*/
int iscsi_target_locate_portal(
struct iscsi_np *np,
struct iscsit_conn *conn,
struct iscsi_login *login)
{
char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
struct iscsit_session *sess = conn->sess;
struct iscsi_tiqn *tiqn;
struct iscsi_tpg_np *tpg_np = NULL;
struct iscsi_login_req *login_req;
struct se_node_acl *se_nacl;
u32 payload_length, queue_depth = 0;
int sessiontype = 0, ret = 0, tag_num, tag_size;
INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
iscsi_target_set_sock_callbacks(conn);
login->np = np;
conn->tpg = NULL;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
tmpbuf = kmemdup_nul(login->req_buf, payload_length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate memory for tmpbuf.\n");
return -1;
}
start = tmpbuf;
end = (start + payload_length);
/*
* Locate the initial keys expected from the Initiator node in
* the first login request in order to progress with the login phase.
*/
while (start < end) {
if (iscsi_extract_key_value(start, &key, &value) < 0) {
ret = -1;
goto out;
}
if (!strncmp(key, "InitiatorName", 13))
i_buf = value;
else if (!strncmp(key, "SessionType", 11))
s_buf = value;
else if (!strncmp(key, "TargetName", 10))
t_buf = value;
start += strlen(key) + strlen(value) + 2;
}
/*
* See 5.3. Login Phase.
*/
if (!i_buf) {
pr_err("InitiatorName key not received"
" in first login request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
ret = -1;
goto out;
}
/*
* Convert the incoming InitiatorName to lowercase following
* RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
* are NOT case sensitive.
*/
iscsi_initiatorname_tolower(i_buf);
if (!s_buf) {
if (!login->leading_connection)
goto get_target;
pr_err("SessionType key not received"
" in first login request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
ret = -1;
goto out;
}
/*
* Use default portal group for discovery sessions.
*/
sessiontype = strncmp(s_buf, DISCOVERY, 9);
if (!sessiontype) {
if (!login->leading_connection)
goto get_target;
sess->sess_ops->SessionType = 1;
/*
* Setup crc32c modules from libcrypto
*/
if (iscsi_login_setup_crypto(conn) < 0) {
pr_err("iscsi_login_setup_crypto() failed\n");
ret = -1;
goto out;
}
/*
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
*/
conn->tpg = iscsit_global->discovery_tpg;
if (iscsit_access_np(np, conn->tpg) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
conn->tpg = NULL;
ret = -1;
goto out;
}
ret = 0;
goto alloc_tags;
}
get_target:
if (!t_buf) {
pr_err("TargetName key not received"
" in first login request while"
" SessionType=Normal.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
ret = -1;
goto out;
}
/*
* Locate Target IQN from Storage Node.
*/
tiqn = iscsit_get_tiqn_for_login(t_buf);
if (!tiqn) {
pr_err("Unable to locate Target IQN: %s in"
" Storage Node\n", t_buf);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
ret = -1;
goto out;
}
pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
/*
* Locate Target Portal Group from Storage Node.
*/
conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np);
if (!conn->tpg) {
pr_err("Unable to locate Target Portal Group"
" on %s\n", tiqn->tiqn);
iscsit_put_tiqn_for_login(tiqn);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
ret = -1;
goto out;
}
conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
/*
* Setup crc32c modules from libcrypto
*/
if (iscsi_login_setup_crypto(conn) < 0) {
pr_err("iscsi_login_setup_crypto() failed\n");
kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
iscsit_put_tiqn_for_login(tiqn);
conn->tpg = NULL;
ret = -1;
goto out;
}
/*
* Serialize access across the struct iscsi_portal_group to
* process login attempt.
*/
if (iscsit_access_np(np, conn->tpg) < 0) {
kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
iscsit_put_tiqn_for_login(tiqn);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
conn->tpg = NULL;
ret = -1;
goto out;
}
/*
* conn->sess->node_acl will be set when the referenced
* struct iscsit_session is located from received ISID+TSIH in
* iscsi_login_non_zero_tsih_s2().
*/
if (!login->leading_connection) {
ret = 0;
goto out;
}
/*
* This value is required in iscsi_login_zero_tsih_s2()
*/
sess->sess_ops->SessionType = 0;
/*
* Locate incoming Initiator IQN reference from Storage Node.
*/
sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
&conn->tpg->tpg_se_tpg, i_buf);
if (!sess->se_sess->se_node_acl) {
pr_err("iSCSI Initiator Node: %s is not authorized to"
" access iSCSI target portal group: %hu.\n",
i_buf, conn->tpg->tpgt);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
ret = -1;
goto out;
}
se_nacl = sess->se_sess->se_node_acl;
queue_depth = se_nacl->queue_depth;
/*
* Setup pre-allocated tags based upon allowed per NodeACL CmdSN
* depth for non immediate commands, plus extra tags for immediate
* commands.
*
* Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention
* in per-cpu-ida tag allocation logic + small queue_depth.
*/
alloc_tags:
tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
tag_size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
ret = -1;
}
out:
kfree(tmpbuf);
return ret;
}
int iscsi_target_start_negotiation(
struct iscsi_login *login,
struct iscsit_conn *conn)
{
int ret;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
write_lock_bh(&sk->sk_callback_lock);
set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
}
/*
* If iscsi_target_do_login returns zero to signal more PDU
* exchanges are required to complete the login, go ahead and
* clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
* is still active.
*
* Otherwise if TCP connection dropped asynchronously, go ahead
* and perform connection cleanup now.
*/
ret = iscsi_target_do_login(conn, login);
if (!ret) {
spin_lock(&conn->login_worker_lock);
if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
ret = -1;
else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) {
if (iscsit_set_login_timer_kworker(conn, NULL) < 0) {
/*
* The timeout has expired already.
* Schedule login_work to perform the cleanup.
*/
schedule_delayed_work(&conn->login_work, 0);
}
}
spin_unlock(&conn->login_worker_lock);
}
if (ret < 0) {
iscsi_target_restore_sock_callbacks(conn);
iscsi_remove_failed_auth_entry(conn);
}
if (ret != 0) {
iscsit_stop_login_timer(conn);
cancel_delayed_work_sync(&conn->login_work);
iscsi_target_nego_release(conn);
}
return ret;
}
void iscsi_target_nego_release(struct iscsit_conn *conn)
{
struct iscsi_login *login = conn->conn_login;
if (!login)
return;
kfree(login->req_buf);
kfree(login->rsp_buf);
kfree(login);
conn->conn_login = NULL;
}
|
linux-master
|
drivers/target/iscsi/iscsi_target_nego.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Modern ConfigFS group context specific iSCSI statistics based on original
* iscsi_target_mib.c code
*
* Copyright (c) 2011-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/configfs.h>
#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include <target/iscsi/iscsi_target_stat.h>
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
/* Instance Attributes Table */
#define ISCSI_INST_NUM_NODES 1
#define ISCSI_INST_DESCR "Storage Engine Target"
#define ISCSI_DISCONTINUITY_TIME 0
#define ISCSI_NODE_INDEX 1
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
/****************************************************************************
* iSCSI MIB Tables
****************************************************************************/
/*
* Instance Attributes Table
*/
static struct iscsi_tiqn *iscsi_instance_tiqn(struct config_item *item)
{
struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
struct iscsi_wwn_stat_grps, iscsi_instance_group);
return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
}
static ssize_t iscsi_stat_instance_inst_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_instance_tiqn(item)->tiqn_index);
}
static ssize_t iscsi_stat_instance_min_ver_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
}
static ssize_t iscsi_stat_instance_max_ver_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
}
static ssize_t iscsi_stat_instance_portals_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_instance_tiqn(item)->tiqn_num_tpg_nps);
}
static ssize_t iscsi_stat_instance_nodes_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
}
static ssize_t iscsi_stat_instance_sessions_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_instance_tiqn(item)->tiqn_nsessions);
}
static ssize_t iscsi_stat_instance_fail_sess_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
u32 sess_err_count;
spin_lock_bh(&sess_err->lock);
sess_err_count = (sess_err->digest_errors +
sess_err->cxn_timeout_errors +
sess_err->pdu_format_errors);
spin_unlock_bh(&sess_err->lock);
return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
}
static ssize_t iscsi_stat_instance_fail_type_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n",
sess_err->last_sess_failure_type);
}
static ssize_t iscsi_stat_instance_fail_rem_name_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%s\n",
sess_err->last_sess_fail_rem_name[0] ?
sess_err->last_sess_fail_rem_name : NONE);
}
static ssize_t iscsi_stat_instance_disc_time_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
}
static ssize_t iscsi_stat_instance_description_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
}
static ssize_t iscsi_stat_instance_vendor_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
}
static ssize_t iscsi_stat_instance_version_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
}
CONFIGFS_ATTR_RO(iscsi_stat_instance_, inst);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, min_ver);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, max_ver);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, portals);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, nodes);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, sessions);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_sess);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_type);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_rem_name);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, disc_time);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, description);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, vendor);
CONFIGFS_ATTR_RO(iscsi_stat_instance_, version);
static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
&iscsi_stat_instance_attr_inst,
&iscsi_stat_instance_attr_min_ver,
&iscsi_stat_instance_attr_max_ver,
&iscsi_stat_instance_attr_portals,
&iscsi_stat_instance_attr_nodes,
&iscsi_stat_instance_attr_sessions,
&iscsi_stat_instance_attr_fail_sess,
&iscsi_stat_instance_attr_fail_type,
&iscsi_stat_instance_attr_fail_rem_name,
&iscsi_stat_instance_attr_disc_time,
&iscsi_stat_instance_attr_description,
&iscsi_stat_instance_attr_vendor,
&iscsi_stat_instance_attr_version,
NULL,
};
const struct config_item_type iscsi_stat_instance_cit = {
.ct_attrs = iscsi_stat_instance_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Instance Session Failure Stats Table
*/
static struct iscsi_tiqn *iscsi_sess_err_tiqn(struct config_item *item)
{
struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
struct iscsi_wwn_stat_grps, iscsi_sess_err_group);
return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
}
static ssize_t iscsi_stat_sess_err_inst_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_sess_err_tiqn(item)->tiqn_index);
}
static ssize_t iscsi_stat_sess_err_digest_errors_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
}
static ssize_t iscsi_stat_sess_err_cxn_errors_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
}
static ssize_t iscsi_stat_sess_err_format_errors_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
}
CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, inst);
CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, digest_errors);
CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, cxn_errors);
CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, format_errors);
static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
&iscsi_stat_sess_err_attr_inst,
&iscsi_stat_sess_err_attr_digest_errors,
&iscsi_stat_sess_err_attr_cxn_errors,
&iscsi_stat_sess_err_attr_format_errors,
NULL,
};
const struct config_item_type iscsi_stat_sess_err_cit = {
.ct_attrs = iscsi_stat_sess_err_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Target Attributes Table
*/
static struct iscsi_tiqn *iscsi_tgt_attr_tiqn(struct config_item *item)
{
struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
struct iscsi_wwn_stat_grps, iscsi_tgt_attr_group);
return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
}
static ssize_t iscsi_stat_tgt_attr_inst_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_tgt_attr_tiqn(item)->tiqn_index);
}
static ssize_t iscsi_stat_tgt_attr_indx_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
static ssize_t iscsi_stat_tgt_attr_login_fails_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 fail_count;
spin_lock(&lstat->lock);
fail_count = (lstat->redirects + lstat->authorize_fails +
lstat->authenticate_fails + lstat->negotiate_fails +
lstat->other_fails);
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
}
static ssize_t iscsi_stat_tgt_attr_last_fail_time_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 last_fail_time;
spin_lock(&lstat->lock);
last_fail_time = lstat->last_fail_time ?
(u32)(((u32)lstat->last_fail_time -
INITIAL_JIFFIES) * 100 / HZ) : 0;
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
}
static ssize_t iscsi_stat_tgt_attr_last_fail_type_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 last_fail_type;
spin_lock(&lstat->lock);
last_fail_type = lstat->last_fail_type;
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
}
static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
unsigned char buf[ISCSI_IQN_LEN];
spin_lock(&lstat->lock);
snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
lstat->last_intr_fail_name : NONE);
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%s\n", buf);
}
static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_type_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
int ret;
spin_lock(&lstat->lock);
if (lstat->last_intr_fail_ip_family == AF_INET6)
ret = snprintf(page, PAGE_SIZE, "ipv6\n");
else
ret = snprintf(page, PAGE_SIZE, "ipv4\n");
spin_unlock(&lstat->lock);
return ret;
}
static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
int ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
spin_unlock(&lstat->lock);
return ret;
}
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, inst);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, indx);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, login_fails);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_time);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_type);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_name);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr_type);
CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr);
static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
&iscsi_stat_tgt_attr_attr_inst,
&iscsi_stat_tgt_attr_attr_indx,
&iscsi_stat_tgt_attr_attr_login_fails,
&iscsi_stat_tgt_attr_attr_last_fail_time,
&iscsi_stat_tgt_attr_attr_last_fail_type,
&iscsi_stat_tgt_attr_attr_fail_intr_name,
&iscsi_stat_tgt_attr_attr_fail_intr_addr_type,
&iscsi_stat_tgt_attr_attr_fail_intr_addr,
NULL,
};
const struct config_item_type iscsi_stat_tgt_attr_cit = {
.ct_attrs = iscsi_stat_tgt_attr_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Target Login Stats Table
*/
static struct iscsi_tiqn *iscsi_login_stat_tiqn(struct config_item *item)
{
struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
struct iscsi_wwn_stat_grps, iscsi_login_stats_group);
return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
}
static ssize_t iscsi_stat_login_inst_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_login_stat_tiqn(item)->tiqn_index);
}
static ssize_t iscsi_stat_login_indx_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
static ssize_t iscsi_stat_login_accepts_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
spin_unlock(&lstat->lock);
return ret;
}
static ssize_t iscsi_stat_login_other_fails_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
spin_unlock(&lstat->lock);
return ret;
}
static ssize_t iscsi_stat_login_redirects_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
spin_unlock(&lstat->lock);
return ret;
}
static ssize_t iscsi_stat_login_authorize_fails_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
spin_unlock(&lstat->lock);
return ret;
}
static ssize_t iscsi_stat_login_authenticate_fails_show(
struct config_item *item, char *page)
{
struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
spin_unlock(&lstat->lock);
return ret;
}
static ssize_t iscsi_stat_login_negotiate_fails_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
spin_unlock(&lstat->lock);
return ret;
}
CONFIGFS_ATTR_RO(iscsi_stat_login_, inst);
CONFIGFS_ATTR_RO(iscsi_stat_login_, indx);
CONFIGFS_ATTR_RO(iscsi_stat_login_, accepts);
CONFIGFS_ATTR_RO(iscsi_stat_login_, other_fails);
CONFIGFS_ATTR_RO(iscsi_stat_login_, redirects);
CONFIGFS_ATTR_RO(iscsi_stat_login_, authorize_fails);
CONFIGFS_ATTR_RO(iscsi_stat_login_, authenticate_fails);
CONFIGFS_ATTR_RO(iscsi_stat_login_, negotiate_fails);
static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
&iscsi_stat_login_attr_inst,
&iscsi_stat_login_attr_indx,
&iscsi_stat_login_attr_accepts,
&iscsi_stat_login_attr_other_fails,
&iscsi_stat_login_attr_redirects,
&iscsi_stat_login_attr_authorize_fails,
&iscsi_stat_login_attr_authenticate_fails,
&iscsi_stat_login_attr_negotiate_fails,
NULL,
};
const struct config_item_type iscsi_stat_login_cit = {
.ct_attrs = iscsi_stat_login_stats_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Target Logout Stats Table
*/
static struct iscsi_tiqn *iscsi_logout_stat_tiqn(struct config_item *item)
{
struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
struct iscsi_wwn_stat_grps, iscsi_logout_stats_group);
return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
}
static ssize_t iscsi_stat_logout_inst_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n",
iscsi_logout_stat_tiqn(item)->tiqn_index);
}
static ssize_t iscsi_stat_logout_indx_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
static ssize_t iscsi_stat_logout_normal_logouts_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item);
struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
}
static ssize_t iscsi_stat_logout_abnormal_logouts_show(struct config_item *item,
char *page)
{
struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item);
struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
}
CONFIGFS_ATTR_RO(iscsi_stat_logout_, inst);
CONFIGFS_ATTR_RO(iscsi_stat_logout_, indx);
CONFIGFS_ATTR_RO(iscsi_stat_logout_, normal_logouts);
CONFIGFS_ATTR_RO(iscsi_stat_logout_, abnormal_logouts);
static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
&iscsi_stat_logout_attr_inst,
&iscsi_stat_logout_attr_indx,
&iscsi_stat_logout_attr_normal_logouts,
&iscsi_stat_logout_attr_abnormal_logouts,
NULL,
};
const struct config_item_type iscsi_stat_logout_cit = {
.ct_attrs = iscsi_stat_logout_stats_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Session Stats Table
*/
static struct iscsi_node_acl *iscsi_stat_nacl(struct config_item *item)
{
struct iscsi_node_stat_grps *igrps = container_of(to_config_group(item),
struct iscsi_node_stat_grps, iscsi_sess_stats_group);
return container_of(igrps, struct iscsi_node_acl, node_stat_grps);
}
static ssize_t iscsi_stat_sess_inst_show(struct config_item *item, char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
struct iscsi_tiqn *tiqn = container_of(wwn,
struct iscsi_tiqn, tiqn_wwn);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
static ssize_t iscsi_stat_sess_node_show(struct config_item *item, char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_indx_show(struct config_item *item, char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->session_index);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_cmd_pdus_show(struct config_item *item,
char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->cmd_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_rsp_pdus_show(struct config_item *item,
char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->rsp_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_txdata_octs_show(struct config_item *item,
char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->tx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_rxdata_octs_show(struct config_item *item,
char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->rx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_conn_digest_errors_show(struct config_item *item,
char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->conn_digest_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
static ssize_t iscsi_stat_sess_conn_timeout_errors_show(
struct config_item *item, char *page)
{
struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->conn_timeout_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
CONFIGFS_ATTR_RO(iscsi_stat_sess_, inst);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, node);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, indx);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, cmd_pdus);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, rsp_pdus);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, txdata_octs);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, rxdata_octs);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_digest_errors);
CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_timeout_errors);
static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
&iscsi_stat_sess_attr_inst,
&iscsi_stat_sess_attr_node,
&iscsi_stat_sess_attr_indx,
&iscsi_stat_sess_attr_cmd_pdus,
&iscsi_stat_sess_attr_rsp_pdus,
&iscsi_stat_sess_attr_txdata_octs,
&iscsi_stat_sess_attr_rxdata_octs,
&iscsi_stat_sess_attr_conn_digest_errors,
&iscsi_stat_sess_attr_conn_timeout_errors,
NULL,
};
const struct config_item_type iscsi_stat_sess_cit = {
.ct_attrs = iscsi_stat_sess_stats_attrs,
.ct_owner = THIS_MODULE,
};
|
linux-master
|
drivers/target/iscsi/iscsi_target_stat.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Target DataIN value generation functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
******************************************************************************/
#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_datain_values.h"
struct iscsi_datain_req *iscsit_allocate_datain_req(void)
{
struct iscsi_datain_req *dr;
dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
if (!dr) {
pr_err("Unable to allocate memory for"
" struct iscsi_datain_req\n");
return NULL;
}
INIT_LIST_HEAD(&dr->cmd_datain_node);
return dr;
}
void iscsit_attach_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
spin_unlock(&cmd->datain_lock);
}
void iscsit_free_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_del(&dr->cmd_datain_node);
spin_unlock(&cmd->datain_lock);
kmem_cache_free(lio_dr_cache, dr);
}
void iscsit_free_all_datain_reqs(struct iscsit_cmd *cmd)
{
struct iscsi_datain_req *dr, *dr_tmp;
spin_lock(&cmd->datain_lock);
list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
list_del(&dr->cmd_datain_node);
kmem_cache_free(lio_dr_cache, dr);
}
spin_unlock(&cmd->datain_lock);
}
struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *cmd)
{
if (list_empty(&cmd->datain_list)) {
pr_err("cmd->datain_list is empty for ITT:"
" 0x%08x\n", cmd->init_task_tag);
return NULL;
}
return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
cmd_datain_node);
}
/*
* For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 next_burst_len, read_data_done, read_data_left;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
next_burst_len = (!dr->recovery) ?
cmd->next_burst_len : dr->next_burst_len;
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return NULL;
}
if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
(read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
next_burst_len))) {
datain->length = read_data_left;
datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
} else {
if ((next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
datain->length =
conn->conn_ops->MaxRecvDataSegmentLength;
next_burst_len += datain->length;
} else {
datain->length = (conn->sess->sess_ops->MaxBurstLength -
next_burst_len);
next_burst_len = 0;
datain->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
}
}
datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
datain->offset = read_data_done;
if (!dr->recovery) {
cmd->next_burst_len = next_burst_len;
cmd->read_data_done += datain->length;
} else {
dr->next_burst_len = next_burst_len;
dr->read_data_done += datain->length;
}
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
/*
* For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 offset, read_data_done, read_data_left, seq_send_order;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_seq *seq;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return NULL;
}
seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
if (!seq)
return NULL;
seq->sent = 1;
if (!dr->recovery && !seq->next_burst_len)
seq->first_datasn = cmd->data_sn;
offset = (seq->offset + seq->next_burst_len);
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
cmd->se_cmd.data_length) {
datain->length = (cmd->se_cmd.data_length - offset);
datain->offset = offset;
datain->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
seq->next_burst_len = 0;
seq_send_order++;
} else {
if ((seq->next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
datain->length =
conn->conn_ops->MaxRecvDataSegmentLength;
datain->offset = (seq->offset + seq->next_burst_len);
seq->next_burst_len += datain->length;
} else {
datain->length = (conn->sess->sess_ops->MaxBurstLength -
seq->next_burst_len);
datain->offset = (seq->offset + seq->next_burst_len);
datain->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
seq->next_burst_len = 0;
seq_send_order++;
}
}
if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
datain->flags |= ISCSI_FLAG_DATA_STATUS;
datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
if (!dr->recovery) {
cmd->seq_send_order = seq_send_order;
cmd->read_data_done += datain->length;
} else {
dr->seq_send_order = seq_send_order;
dr->read_data_done += datain->length;
}
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_CMD_FINAL)
seq->last_datasn = datain->data_sn;
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
/*
* For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 next_burst_len, read_data_done, read_data_left;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_pdu *pdu;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
next_burst_len = (!dr->recovery) ?
cmd->next_burst_len : dr->next_burst_len;
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return dr;
}
pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
if (!pdu)
return dr;
if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
next_burst_len = 0;
} else {
if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength)
next_burst_len += pdu->length;
else {
pdu->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
next_burst_len = 0;
}
}
pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
if (!dr->recovery) {
cmd->next_burst_len = next_burst_len;
cmd->read_data_done += pdu->length;
} else {
dr->next_burst_len = next_burst_len;
dr->read_data_done += pdu->length;
}
datain->flags = pdu->flags;
datain->length = pdu->length;
datain->offset = pdu->offset;
datain->data_sn = pdu->data_sn;
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
/*
* For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 read_data_done, read_data_left, seq_send_order;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_pdu *pdu;
struct iscsi_seq *seq = NULL;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return NULL;
}
seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
if (!seq)
return NULL;
seq->sent = 1;
if (!dr->recovery && !seq->next_burst_len)
seq->first_datasn = cmd->data_sn;
pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
if (!pdu)
return NULL;
if (seq->pdu_send_order == seq->pdu_count) {
pdu->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
seq->next_burst_len = 0;
seq_send_order++;
} else
seq->next_burst_len += pdu->length;
if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
pdu->flags |= ISCSI_FLAG_DATA_STATUS;
pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
if (!dr->recovery) {
cmd->seq_send_order = seq_send_order;
cmd->read_data_done += pdu->length;
} else {
dr->seq_send_order = seq_send_order;
dr->read_data_done += pdu->length;
}
datain->flags = pdu->flags;
datain->length = pdu->length;
datain->offset = pdu->offset;
datain->data_sn = pdu->data_sn;
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_CMD_FINAL)
seq->last_datasn = datain->data_sn;
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
struct iscsi_datain_req *iscsit_get_datain_values(
struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
struct iscsit_conn *conn = cmd->conn;
if (conn->sess->sess_ops->DataSequenceInOrder &&
conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_yes_and_yes(cmd, datain);
else if (!conn->sess->sess_ops->DataSequenceInOrder &&
conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_no_and_yes(cmd, datain);
else if (conn->sess->sess_ops->DataSequenceInOrder &&
!conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_yes_and_no(cmd, datain);
else if (!conn->sess->sess_ops->DataSequenceInOrder &&
!conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_no_and_no(cmd, datain);
return NULL;
}
EXPORT_SYMBOL(iscsit_get_datain_values);
|
linux-master
|
drivers/target/iscsi/iscsi_target_datain_values.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Chelsio Communications, Inc.
*/
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "cxgbit.h"
struct sge_opaque_hdr {
void *dev;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
sizeof(struct fw_ofld_tx_data_wr))
static struct sk_buff *
__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
{
struct sk_buff *skb = NULL;
u8 submode = 0;
int errcode;
static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
if (len) {
skb = alloc_skb_with_frags(hdr_len, len,
0, &errcode,
GFP_KERNEL);
if (!skb)
return NULL;
skb_reserve(skb, TX_HDR_LEN);
skb_reset_transport_header(skb);
__skb_put(skb, ISCSI_HDR_LEN);
skb->data_len = len;
skb->len += len;
submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
} else {
u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
if (!skb)
return NULL;
skb_reserve(skb, TX_HDR_LEN + iso_len);
skb_reset_transport_header(skb);
__skb_put(skb, ISCSI_HDR_LEN);
}
submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
cxgbit_skcb_submode(skb) = submode;
cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
return skb;
}
static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
{
return __cxgbit_alloc_skb(csk, len, false);
}
/*
* cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
* @skb: the packet
*
* Returns true if a packet can be sent as an offload WR with immediate
* data. We currently use the same limit as for Ethernet packets.
*/
static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
{
int length = skb->len;
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
length += sizeof(struct fw_ofld_tx_data_wr);
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
length += sizeof(struct cpl_tx_data_iso);
return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
}
/*
* cxgbit_sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
* Calculates the number of flits needed for a scatter/gather list that
* can hold the given number of entries.
*/
static inline unsigned int cxgbit_sgl_len(unsigned int n)
{
n--;
return (3 * n) / 2 + (n & 1) + 2;
}
/*
* cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
* @skb: the packet
*
* Returns the number of flits needed for the given offload packet.
* These packets are already fully constructed and no additional headers
* will be added.
*/
static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
{
unsigned int flits, cnt;
if (cxgbit_is_ofld_imm(skb))
return DIV_ROUND_UP(skb->len, 8);
flits = skb_transport_offset(skb) / 8;
cnt = skb_shinfo(skb)->nr_frags;
if (skb_tail_pointer(skb) != skb_transport_header(skb))
cnt++;
return flits + cxgbit_sgl_len(cnt);
}
#define CXGBIT_ISO_FSLICE 0x1
#define CXGBIT_ISO_LSLICE 0x2
static void
cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
{
struct cpl_tx_data_iso *cpl;
unsigned int submode = cxgbit_skcb_submode(skb);
unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
cpl = __skb_push(skb, sizeof(*cpl));
cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
CPL_TX_DATA_ISO_FIRST_V(fslice) |
CPL_TX_DATA_ISO_LAST_V(lslice) |
CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
CPL_TX_DATA_ISO_SCSI_V(2));
cpl->ahs_len = 0;
cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
cpl->len = htonl(iso_info->len);
cpl->reserved2_seglen_offset = htonl(0);
cpl->datasn_offset = htonl(0);
cpl->buffer_offset = htonl(0);
cpl->reserved3 = 0;
__skb_pull(skb, sizeof(*cpl));
}
static void
cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
u32 len, u32 credits, u32 compl)
{
struct fw_ofld_tx_data_wr *req;
const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
u32 submode = cxgbit_skcb_submode(skb);
u32 wr_ulp_mode = 0;
u32 hdr_size = sizeof(*req);
u32 opcode = FW_OFLD_TX_DATA_WR;
u32 immlen = 0;
u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
T6_TX_FORCE_F;
if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
opcode = FW_ISCSI_TX_DATA_WR;
immlen += sizeof(struct cpl_tx_data_iso);
hdr_size += sizeof(struct cpl_tx_data_iso);
submode |= 8;
}
if (cxgbit_is_ofld_imm(skb))
immlen += dlen;
req = __skb_push(skb, hdr_size);
req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
FW_WR_COMPL_V(compl) |
FW_WR_IMMDLEN_V(immlen));
req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
FW_WR_LEN16_V(credits));
req->plen = htonl(len);
wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
req->tunnel_to_proxy = htonl(wr_ulp_mode | force |
FW_OFLD_TX_DATA_WR_SHOVE_F);
}
static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
{
kfree_skb(skb);
}
void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
u32 dlen = skb->len;
u32 len = skb->len;
u32 credits_needed;
u32 compl = 0;
u32 flowclen16 = 0;
u32 iso_cpl_len = 0;
if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
iso_cpl_len = sizeof(struct cpl_tx_data_iso);
if (cxgbit_is_ofld_imm(skb))
credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
else
credits_needed = DIV_ROUND_UP((8 *
cxgbit_calc_tx_flits_ofld(skb)) +
iso_cpl_len, 16);
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
credits_needed += DIV_ROUND_UP(
sizeof(struct fw_ofld_tx_data_wr), 16);
/*
* Assumes the initial credits is large enough to support
* fw_flowc_wr plus largest possible first payload
*/
if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
flowclen16 = cxgbit_send_tx_flowc_wr(csk);
csk->wr_cred -= flowclen16;
csk->wr_una_cred += flowclen16;
}
if (csk->wr_cred < credits_needed) {
pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
csk, skb->len, skb->data_len,
credits_needed, csk->wr_cred);
break;
}
__skb_unlink(skb, &csk->txq);
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
skb->csum = (__force __wsum)(credits_needed + flowclen16);
csk->wr_cred -= credits_needed;
csk->wr_una_cred += credits_needed;
pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
csk, skb->len, skb->data_len, credits_needed,
csk->wr_cred, csk->wr_una_cred);
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
len += cxgbit_skcb_tx_extralen(skb);
if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
(!before(csk->write_seq,
csk->snd_una + csk->snd_win))) {
compl = 1;
csk->wr_una_cred = 0;
}
cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
compl);
csk->snd_nxt += len;
} else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
(csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
struct cpl_close_con_req *req =
(struct cpl_close_con_req *)skb->data;
req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
csk->wr_una_cred = 0;
}
cxgbit_sock_enqueue_wr(csk, skb);
t4_set_arp_err_handler(skb, csk,
cxgbit_arp_failure_skb_discard);
pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
csk, csk->tid, skb, len);
cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
}
static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
{
struct sk_buff_head backlogq;
struct sk_buff *skb;
void (*fn)(struct cxgbit_sock *, struct sk_buff *);
skb_queue_head_init(&backlogq);
spin_lock_bh(&csk->lock);
while (skb_queue_len(&csk->backlogq)) {
skb_queue_splice_init(&csk->backlogq, &backlogq);
spin_unlock_bh(&csk->lock);
while ((skb = __skb_dequeue(&backlogq))) {
fn = cxgbit_skcb_rx_backlog_fn(skb);
fn(csk, skb);
}
spin_lock_bh(&csk->lock);
}
csk->lock_owner = false;
spin_unlock_bh(&csk->lock);
}
static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
int ret = 0;
spin_lock_bh(&csk->lock);
csk->lock_owner = true;
spin_unlock_bh(&csk->lock);
if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
signal_pending(current))) {
__kfree_skb(skb);
__skb_queue_purge(&csk->ppodq);
ret = -1;
goto unlock;
}
csk->write_seq += skb->len +
cxgbit_skcb_tx_extralen(skb);
skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
__skb_queue_tail(&csk->txq, skb);
cxgbit_push_tx_frames(csk);
unlock:
cxgbit_unlock_sock(csk);
return ret;
}
static int
cxgbit_map_skb(struct iscsit_cmd *cmd, struct sk_buff *skb, u32 data_offset,
u32 data_length)
{
u32 i = 0, nr_frags = MAX_SKB_FRAGS;
u32 padding = ((-data_length) & 3);
struct scatterlist *sg;
struct page *page;
unsigned int page_off;
if (padding)
nr_frags--;
/*
* We know each entry in t_data_sg contains a page.
*/
sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
page_off = (data_offset % PAGE_SIZE);
while (data_length && (i < nr_frags)) {
u32 cur_len = min_t(u32, data_length, sg->length - page_off);
page = sg_page(sg);
get_page(page);
skb_fill_page_desc(skb, i, page, sg->offset + page_off,
cur_len);
skb->data_len += cur_len;
skb->len += cur_len;
skb->truesize += cur_len;
data_length -= cur_len;
page_off = 0;
sg = sg_next(sg);
i++;
}
if (data_length)
return -1;
if (padding) {
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return -1;
skb_fill_page_desc(skb, i, page, 0, padding);
skb->data_len += padding;
skb->len += padding;
skb->truesize += padding;
}
return 0;
}
static int
cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr)
{
struct iscsit_conn *conn = csk->conn;
struct sk_buff *skb;
struct iscsi_datain datain;
struct cxgbit_iso_info iso_info;
u32 data_length = cmd->se_cmd.data_length;
u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
u32 num_pdu, plen, tx_data = 0;
bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
SCF_TRANSPORT_TASK_SENSE);
bool set_statsn = false;
int ret = -1;
while (data_length) {
num_pdu = (data_length + mrdsl - 1) / mrdsl;
if (num_pdu > csk->max_iso_npdu)
num_pdu = csk->max_iso_npdu;
plen = num_pdu * mrdsl;
if (plen > data_length)
plen = data_length;
skb = __cxgbit_alloc_skb(csk, 0, true);
if (unlikely(!skb))
return -ENOMEM;
memset(skb->data, 0, ISCSI_HDR_LEN);
cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
cxgbit_skcb_submode(skb) |= (csk->submode &
CXGBIT_SUBMODE_DCRC);
cxgbit_skcb_tx_extralen(skb) = (num_pdu *
cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
((num_pdu - 1) * ISCSI_HDR_LEN);
memset(&datain, 0, sizeof(struct iscsi_datain));
memset(&iso_info, 0, sizeof(iso_info));
if (!tx_data)
iso_info.flags |= CXGBIT_ISO_FSLICE;
if (!(data_length - plen)) {
iso_info.flags |= CXGBIT_ISO_LSLICE;
if (!task_sense) {
datain.flags = ISCSI_FLAG_DATA_STATUS;
iscsit_increment_maxcmdsn(cmd, conn->sess);
cmd->stat_sn = conn->stat_sn++;
set_statsn = true;
}
}
iso_info.burst_len = num_pdu * mrdsl;
iso_info.mpdu = mrdsl;
iso_info.len = ISCSI_HDR_LEN + plen;
cxgbit_cpl_tx_data_iso(skb, &iso_info);
datain.offset = tx_data;
datain.data_sn = cmd->data_sn - 1;
iscsit_build_datain_pdu(cmd, conn, &datain,
(struct iscsi_data_rsp *)skb->data,
set_statsn);
ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
if (unlikely(ret)) {
__kfree_skb(skb);
goto out;
}
ret = cxgbit_queue_skb(csk, skb);
if (unlikely(ret))
goto out;
tx_data += plen;
data_length -= plen;
cmd->read_data_done += plen;
cmd->data_sn += num_pdu;
}
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return 0;
out:
return ret;
}
static int
cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
const struct iscsi_datain *datain)
{
struct sk_buff *skb;
int ret = 0;
skb = cxgbit_alloc_skb(csk, 0);
if (unlikely(!skb))
return -ENOMEM;
memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
if (datain->length) {
cxgbit_skcb_submode(skb) |= (csk->submode &
CXGBIT_SUBMODE_DCRC);
cxgbit_skcb_tx_extralen(skb) =
cxgbit_digest_len[cxgbit_skcb_submode(skb)];
}
ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
if (ret < 0) {
__kfree_skb(skb);
return ret;
}
return cxgbit_queue_skb(csk, skb);
}
static int
cxgbit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr,
const struct iscsi_datain *datain)
{
struct cxgbit_sock *csk = conn->context;
u32 data_length = cmd->se_cmd.data_length;
u32 padding = ((-data_length) & 3);
u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
if ((data_length > mrdsl) && (!dr->recovery) &&
(!padding) && (!datain->offset) && csk->max_iso_npdu) {
atomic_long_add(data_length - datain->length,
&conn->sess->tx_data_octets);
return cxgbit_tx_datain_iso(csk, cmd, dr);
}
return cxgbit_tx_datain(csk, cmd, datain);
}
static int
cxgbit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
const void *data_buf, u32 data_buf_len)
{
struct cxgbit_sock *csk = conn->context;
struct sk_buff *skb;
u32 padding = ((-data_buf_len) & 3);
skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
if (unlikely(!skb))
return -ENOMEM;
memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
if (data_buf_len) {
u32 pad_bytes = 0;
skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
if (padding)
skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
&pad_bytes, padding);
}
cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
cxgbit_skcb_submode(skb)];
return cxgbit_queue_skb(csk, skb);
}
int
cxgbit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
{
if (dr)
return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
else
return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
}
int cxgbit_validate_params(struct iscsit_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
struct cxgbit_device *cdev = csk->com.cdev;
struct iscsi_param *param;
u32 max_xmitdsl;
param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
conn->param_list);
if (!param)
return -1;
if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
return -1;
if (max_xmitdsl > cdev->mdsl) {
if (iscsi_change_param_sprintf(
conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
return -1;
}
return 0;
}
static int cxgbit_set_digest(struct cxgbit_sock *csk)
{
struct iscsit_conn *conn = csk->conn;
struct iscsi_param *param;
param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
if (!param) {
pr_err("param not found key %s\n", HEADERDIGEST);
return -1;
}
if (!strcmp(param->value, CRC32C))
csk->submode |= CXGBIT_SUBMODE_HCRC;
param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
if (!param) {
csk->submode = 0;
pr_err("param not found key %s\n", DATADIGEST);
return -1;
}
if (!strcmp(param->value, CRC32C))
csk->submode |= CXGBIT_SUBMODE_DCRC;
if (cxgbit_setup_conn_digest(csk)) {
csk->submode = 0;
return -1;
}
return 0;
}
static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
{
struct iscsit_conn *conn = csk->conn;
struct iscsi_conn_ops *conn_ops = conn->conn_ops;
struct iscsi_param *param;
u32 mrdsl, mbl;
u32 max_npdu, max_iso_npdu;
u32 max_iso_payload;
if (conn->login->leading_connection) {
param = iscsi_find_param_from_key(MAXBURSTLENGTH,
conn->param_list);
if (!param) {
pr_err("param not found key %s\n", MAXBURSTLENGTH);
return -1;
}
if (kstrtou32(param->value, 0, &mbl) < 0)
return -1;
} else {
mbl = conn->sess->sess_ops->MaxBurstLength;
}
mrdsl = conn_ops->MaxRecvDataSegmentLength;
max_npdu = mbl / mrdsl;
max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
max_iso_npdu = max_iso_payload /
(ISCSI_HDR_LEN + mrdsl +
cxgbit_digest_len[csk->submode]);
csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
if (csk->max_iso_npdu <= 1)
csk->max_iso_npdu = 0;
return 0;
}
/*
* cxgbit_seq_pdu_inorder()
* @csk: pointer to cxgbit socket structure
*
* This function checks whether data sequence and data
* pdu are in order.
*
* Return: returns -1 on error, 0 if data sequence and
* data pdu are in order, 1 if data sequence or data pdu
* is not in order.
*/
static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
{
struct iscsit_conn *conn = csk->conn;
struct iscsi_param *param;
if (conn->login->leading_connection) {
param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
conn->param_list);
if (!param) {
pr_err("param not found key %s\n", DATASEQUENCEINORDER);
return -1;
}
if (strcmp(param->value, YES))
return 1;
param = iscsi_find_param_from_key(DATAPDUINORDER,
conn->param_list);
if (!param) {
pr_err("param not found key %s\n", DATAPDUINORDER);
return -1;
}
if (strcmp(param->value, YES))
return 1;
} else {
if (!conn->sess->sess_ops->DataSequenceInOrder)
return 1;
if (!conn->sess->sess_ops->DataPDUInOrder)
return 1;
}
return 0;
}
static int cxgbit_set_params(struct iscsit_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
struct iscsi_conn_ops *conn_ops = conn->conn_ops;
struct iscsi_param *param;
u8 erl;
if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
if (cxgbit_set_digest(csk))
return -1;
if (conn->login->leading_connection) {
param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
conn->param_list);
if (!param) {
pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
return -1;
}
if (kstrtou8(param->value, 0, &erl) < 0)
return -1;
} else {
erl = conn->sess->sess_ops->ErrorRecoveryLevel;
}
if (!erl) {
int ret;
ret = cxgbit_seq_pdu_inorder(csk);
if (ret < 0) {
return -1;
} else if (ret > 0) {
if (is_t5(cdev->lldi.adapter_type))
goto enable_ddp;
else
return 0;
}
if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
if (cxgbit_set_iso_npdu(csk))
return -1;
}
enable_ddp:
if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
if (cxgbit_setup_conn_pgidx(csk,
ppm->tformat.pgsz_idx_dflt))
return -1;
set_bit(CSK_DDP_ENABLE, &csk->com.flags);
}
}
return 0;
}
int
cxgbit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
u32 length)
{
struct cxgbit_sock *csk = conn->context;
struct sk_buff *skb;
u32 padding_buf = 0;
u8 padding = ((-length) & 3);
skb = cxgbit_alloc_skb(csk, length + padding);
if (!skb)
return -ENOMEM;
skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
if (padding)
skb_store_bits(skb, ISCSI_HDR_LEN + length,
&padding_buf, padding);
if (login->login_complete) {
if (cxgbit_set_params(conn)) {
kfree_skb(skb);
return -1;
}
set_bit(CSK_LOGIN_DONE, &csk->com.flags);
}
if (cxgbit_queue_skb(csk, skb))
return -1;
if ((!login->login_complete) && (!login->login_failed))
schedule_delayed_work(&conn->login_work, 0);
return 0;
}
static void
cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
unsigned int nents, u32 skip)
{
struct skb_seq_state st;
const u8 *buf;
unsigned int consumed = 0, buf_len;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
skb_prepare_seq_read(skb, pdu_cb->doffset,
pdu_cb->doffset + pdu_cb->dlen,
&st);
while (true) {
buf_len = skb_seq_read(consumed, &buf, &st);
if (!buf_len) {
skb_abort_seq_read(&st);
break;
}
consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
buf_len, skip + consumed);
}
}
static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
{
struct iscsit_conn *conn = csk->conn;
struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
struct cxgbit_cmd *ccmd;
struct iscsit_cmd *cmd;
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
pr_err("Unable to allocate iscsit_cmd + cxgbit_cmd\n");
return NULL;
}
ccmd = iscsit_priv_cmd(cmd);
ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
ccmd->setup_ddp = true;
return cmd;
}
static int
cxgbit_handle_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
u32 length)
{
struct iscsit_conn *conn = cmd->conn;
struct cxgbit_sock *csk = conn->context;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
pr_err("ImmediateData CRC32C DataDigest error\n");
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from"
" Immediate Data digest failure while"
" in ERL=0.\n");
iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
(unsigned char *)hdr);
return IMMEDIATE_DATA_CANNOT_RECOVER;
}
iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
(unsigned char *)hdr);
return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
}
if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
struct skb_shared_info *ssi = skb_shinfo(csk->skb);
skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
sg_init_table(&ccmd->sg, 1);
sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
skb_frag_size(dfrag), skb_frag_off(dfrag));
get_page(skb_frag_page(dfrag));
cmd->se_cmd.t_data_sg = &ccmd->sg;
cmd->se_cmd.t_data_nents = 1;
ccmd->release = true;
} else {
struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
}
cmd->write_data_done += pdu_cb->dlen;
if (cmd->write_data_done == cmd->se_cmd.data_length) {
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
}
return IMMEDIATE_DATA_NORMAL_OPERATION;
}
static int
cxgbit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
bool dump_payload)
{
struct iscsit_conn *conn = cmd->conn;
int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
*/
if (dump_payload)
goto after_immediate_data;
immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
after_immediate_data:
if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
/*
* A PDU/CmdSN carrying Immediate Data passed
* DataCRC, check against ExpCmdSN/MaxCmdSN if
* Immediate Bit is not set.
*/
cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
(unsigned char *)hdr,
hdr->cmdsn);
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
target_put_sess_cmd(&cmd->se_cmd);
return 0;
} else if (cmd->unsolicited_data) {
iscsit_set_unsolicited_dataout(cmd);
}
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
/*
* Immediate Data failed DataCRC and ERL>=1,
* silently drop this PDU and let the initiator
* plug the CmdSN gap.
*
* FIXME: Send Unsolicited NOPIN with reserved
* TTT here to help the initiator figure out
* the missing CmdSN, although they should be
* intelligent enough to determine the missing
* CmdSN and issue a retry to plug the sequence.
*/
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
return -1;
return 0;
}
static int
cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
{
struct iscsit_conn *conn = csk->conn;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
int rc;
bool dump_payload = false;
rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
if (rc < 0)
return rc;
if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
(pdu_cb->nr_dfrags == 1))
cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
if (rc < 0)
return 0;
else if (rc > 0)
dump_payload = true;
if (!pdu_cb->dlen)
return 0;
return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
}
static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
{
struct scatterlist *sg_start;
struct iscsit_conn *conn = csk->conn;
struct iscsit_cmd *cmd = NULL;
struct cxgbit_cmd *ccmd;
struct cxgbi_task_tag_info *ttinfo;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
u32 data_offset = be32_to_cpu(hdr->offset);
u32 data_len = ntoh24(hdr->dlength);
int rc, sg_nents, sg_off;
bool dcrc_err = false;
if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
u32 offset = be32_to_cpu(hdr->offset);
u32 ddp_data_len;
bool success = false;
cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
if (!cmd)
return 0;
ddp_data_len = offset - cmd->write_data_done;
atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
cmd->write_data_done = offset;
cmd->next_burst_len = ddp_data_len;
cmd->data_sn = be32_to_cpu(hdr->datasn);
rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
cmd, data_len, &success);
if (rc < 0)
return rc;
else if (!success)
return 0;
} else {
rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
if (rc < 0)
return rc;
else if (!cmd)
return 0;
}
if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
" DataSN: 0x%08x\n",
hdr->itt, hdr->offset, data_len,
hdr->datasn);
dcrc_err = true;
goto check_payload;
}
pr_debug("DataOut data_len: %u, "
"write_data_done: %u, data_length: %u\n",
data_len, cmd->write_data_done,
cmd->se_cmd.data_length);
if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
u32 skip = data_offset % PAGE_SIZE;
sg_off = data_offset / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
}
ccmd = iscsit_priv_cmd(cmd);
ttinfo = &ccmd->ttinfo;
if (ccmd->release && ttinfo->sgl &&
(cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
DMA_FROM_DEVICE);
ttinfo->nents = 0;
ttinfo->sgl = NULL;
}
check_payload:
rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
if (rc < 0)
return rc;
return 0;
}
static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
{
struct iscsit_conn *conn = csk->conn;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
unsigned char *ping_data = NULL;
u32 payload_length = pdu_cb->dlen;
int ret;
ret = iscsit_setup_nop_out(conn, cmd, hdr);
if (ret < 0)
return 0;
if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from"
" NOPOUT Ping DataCRC failure while in"
" ERL=0.\n");
ret = -1;
goto out;
} else {
/*
* drop this PDU and let the
* initiator plug the CmdSN gap.
*/
pr_info("Dropping NOPOUT"
" Command CmdSN: 0x%08x due to"
" DataCRC error.\n", hdr->cmdsn);
ret = 0;
goto out;
}
}
/*
* Handle NOP-OUT payload for traditional iSCSI sockets
*/
if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
if (!ping_data) {
pr_err("Unable to allocate memory for"
" NOPOUT ping data.\n");
ret = -1;
goto out;
}
skb_copy_bits(csk->skb, pdu_cb->doffset,
ping_data, payload_length);
ping_data[payload_length] = '\0';
/*
* Attach ping data to struct iscsit_cmd->buf_ptr.
*/
cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
pr_debug("Got %u bytes of NOPOUT ping"
" data.\n", payload_length);
pr_debug("Ping Data: \"%s\"\n", ping_data);
}
return iscsit_process_nop_out(conn, cmd, hdr);
out:
if (cmd)
iscsit_free_cmd(cmd, false);
return ret;
}
static int
cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
{
struct iscsit_conn *conn = csk->conn;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
u32 payload_length = pdu_cb->dlen;
int rc;
unsigned char *text_in = NULL;
rc = iscsit_setup_text_cmd(conn, cmd, hdr);
if (rc < 0)
return rc;
if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from"
" Text Data digest failure while in"
" ERL=0.\n");
goto reject;
} else {
/*
* drop this PDU and let the
* initiator plug the CmdSN gap.
*/
pr_info("Dropping Text"
" Command CmdSN: 0x%08x due to"
" DataCRC error.\n", hdr->cmdsn);
return 0;
}
}
if (payload_length) {
text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) {
pr_err("Unable to allocate text_in of payload_length: %u\n",
payload_length);
return -ENOMEM;
}
skb_copy_bits(csk->skb, pdu_cb->doffset,
text_in, payload_length);
text_in[payload_length - 1] = '\0';
cmd->text_in_ptr = text_in;
}
return iscsit_process_text_cmd(conn, cmd, hdr);
reject:
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
pdu_cb->hdr);
}
static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
{
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
struct iscsit_conn *conn = csk->conn;
struct iscsit_cmd *cmd = NULL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
int ret = -EINVAL;
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
cmd = cxgbit_allocate_cmd(csk);
if (!cmd)
goto reject;
ret = cxgbit_handle_scsi_cmd(csk, cmd);
break;
case ISCSI_OP_SCSI_DATA_OUT:
ret = cxgbit_handle_iscsi_dataout(csk);
break;
case ISCSI_OP_NOOP_OUT:
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
cmd = cxgbit_allocate_cmd(csk);
if (!cmd)
goto reject;
}
ret = cxgbit_handle_nop_out(csk, cmd);
break;
case ISCSI_OP_SCSI_TMFUNC:
cmd = cxgbit_allocate_cmd(csk);
if (!cmd)
goto reject;
ret = iscsit_handle_task_mgt_cmd(conn, cmd,
(unsigned char *)hdr);
break;
case ISCSI_OP_TEXT:
if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
if (!cmd)
goto reject;
} else {
cmd = cxgbit_allocate_cmd(csk);
if (!cmd)
goto reject;
}
ret = cxgbit_handle_text_cmd(csk, cmd);
break;
case ISCSI_OP_LOGOUT:
cmd = cxgbit_allocate_cmd(csk);
if (!cmd)
goto reject;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
if (ret > 0)
wait_for_completion_timeout(&conn->conn_logout_comp,
SECONDS_FOR_LOGOUT_COMP
* HZ);
break;
case ISCSI_OP_SNACK:
ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
break;
default:
pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
break;
}
return ret;
reject:
return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
(unsigned char *)hdr);
return ret;
}
static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
{
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsit_conn *conn = csk->conn;
struct iscsi_hdr *hdr = pdu_cb->hdr;
u8 opcode;
if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
atomic_long_inc(&conn->sess->conn_digest_errors);
goto transport_err;
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
goto transport_err;
opcode = hdr->opcode & ISCSI_OPCODE_MASK;
if (conn->sess->sess_ops->SessionType &&
((!(opcode & ISCSI_OP_TEXT)) ||
(!(opcode & ISCSI_OP_LOGOUT)))) {
pr_err("Received illegal iSCSI Opcode: 0x%02x"
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
goto transport_err;
}
if (cxgbit_target_rx_opcode(csk) < 0)
goto transport_err;
return 0;
transport_err:
return -1;
}
static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
{
struct iscsit_conn *conn = csk->conn;
struct iscsi_login *login = conn->login;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_login_req *login_req;
login_req = (struct iscsi_login_req *)login->req;
memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
login_req->flags, login_req->itt, login_req->cmdsn,
login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
*/
if (login->first_request) {
login_req = (struct iscsi_login_req *)login->req;
login->leading_connection = (!login_req->tsih) ? 1 : 0;
login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
login_req->flags);
login->version_min = login_req->min_version;
login->version_max = login_req->max_version;
memcpy(login->isid, login_req->isid, 6);
login->cmd_sn = be32_to_cpu(login_req->cmdsn);
login->init_task_tag = login_req->itt;
login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
login->cid = be16_to_cpu(login_req->cid);
login->tsih = be16_to_cpu(login_req->tsih);
}
if (iscsi_target_check_login_request(conn, login) < 0)
return -1;
memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
return 0;
}
static int
cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
{
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
int ret;
cxgbit_rx_pdu_cb(skb) = pdu_cb;
csk->skb = skb;
if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
ret = cxgbit_rx_login_pdu(csk);
set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
} else {
ret = cxgbit_rx_opcode(csk);
}
return ret;
}
static void cxgbit_lro_skb_dump(struct sk_buff *skb)
{
struct skb_shared_info *ssi = skb_shinfo(skb);
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
u8 i;
pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
skb, skb->head, skb->data, skb->len, skb->data_len,
ssi->nr_frags);
pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
"frags %u.\n",
skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
pdu_cb->ddigest, pdu_cb->frags);
for (i = 0; i < ssi->nr_frags; i++)
pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
skb, i, skb_frag_off(&ssi->frags[i]),
skb_frag_size(&ssi->frags[i]));
}
static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
{
struct sk_buff *skb = csk->lro_hskb;
struct skb_shared_info *ssi = skb_shinfo(skb);
u8 i;
memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
for (i = 0; i < ssi->nr_frags; i++)
put_page(skb_frag_page(&ssi->frags[i]));
ssi->nr_frags = 0;
skb->data_len = 0;
skb->truesize -= skb->len;
skb->len = 0;
}
static void
cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
{
struct sk_buff *hskb = csk->lro_hskb;
struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
struct skb_shared_info *hssi = skb_shinfo(hskb);
struct skb_shared_info *ssi = skb_shinfo(skb);
unsigned int len = 0;
if (pdu_cb->flags & PDUCBF_RX_HDR) {
u8 hfrag_idx = hssi->nr_frags;
hpdu_cb->flags |= pdu_cb->flags;
hpdu_cb->seq = pdu_cb->seq;
hpdu_cb->hdr = pdu_cb->hdr;
hpdu_cb->hlen = pdu_cb->hlen;
memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
sizeof(skb_frag_t));
get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
hssi->nr_frags++;
hpdu_cb->frags++;
hpdu_cb->hfrag_idx = hfrag_idx;
len = skb_frag_size(&hssi->frags[hfrag_idx]);
hskb->len += len;
hskb->data_len += len;
hskb->truesize += len;
}
if (pdu_cb->flags & PDUCBF_RX_DATA) {
u8 dfrag_idx = hssi->nr_frags, i;
hpdu_cb->flags |= pdu_cb->flags;
hpdu_cb->dfrag_idx = dfrag_idx;
len = 0;
for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
memcpy(&hssi->frags[dfrag_idx],
&ssi->frags[pdu_cb->dfrag_idx + i],
sizeof(skb_frag_t));
get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
len += skb_frag_size(&hssi->frags[dfrag_idx]);
hssi->nr_frags++;
hpdu_cb->frags++;
}
hpdu_cb->dlen = pdu_cb->dlen;
hpdu_cb->doffset = hpdu_cb->hlen;
hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
hskb->len += len;
hskb->data_len += len;
hskb->truesize += len;
}
if (pdu_cb->flags & PDUCBF_RX_STATUS) {
hpdu_cb->flags |= pdu_cb->flags;
if (hpdu_cb->flags & PDUCBF_RX_DATA)
hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
hpdu_cb->ddigest = pdu_cb->ddigest;
hpdu_cb->pdulen = pdu_cb->pdulen;
}
}
static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
u8 pdu_idx = 0, last_idx = 0;
int ret = 0;
if (!pdu_cb->complete) {
cxgbit_lro_skb_merge(csk, skb, 0);
if (pdu_cb->flags & PDUCBF_RX_STATUS) {
struct sk_buff *hskb = csk->lro_hskb;
ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
cxgbit_lro_hskb_reset(csk);
if (ret < 0)
goto out;
}
pdu_idx = 1;
}
if (lro_cb->pdu_idx)
last_idx = lro_cb->pdu_idx - 1;
for (; pdu_idx <= last_idx; pdu_idx++) {
ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
if (ret < 0)
goto out;
}
if ((!lro_cb->complete) && lro_cb->pdu_idx)
cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
out:
return ret;
}
static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
int ret = -1;
if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
(pdu_cb->seq != csk->rcv_nxt)) {
pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
cxgbit_lro_skb_dump(skb);
return ret;
}
csk->rcv_nxt += lro_cb->pdu_totallen;
ret = cxgbit_process_lro_skb(csk, skb);
csk->rx_credits += lro_cb->pdu_totallen;
if (csk->rx_credits >= (csk->rcv_win / 4))
cxgbit_rx_data_ack(csk);
return ret;
}
static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
int ret;
ret = cxgbit_process_lro_skb(csk, skb);
if (ret)
return ret;
csk->rx_credits += lro_cb->pdu_totallen;
if (csk->rx_credits >= csk->rcv_win) {
csk->rx_credits = 0;
cxgbit_rx_data_ack(csk);
}
return 0;
}
static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
int ret = -1;
if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
if (is_t5(lldi->adapter_type))
ret = cxgbit_t5_rx_lro_skb(csk, skb);
else
ret = cxgbit_rx_lro_skb(csk, skb);
}
__kfree_skb(skb);
return ret;
}
static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
{
spin_lock_bh(&csk->rxq.lock);
if (skb_queue_len(&csk->rxq)) {
skb_queue_splice_init(&csk->rxq, rxq);
spin_unlock_bh(&csk->rxq.lock);
return true;
}
spin_unlock_bh(&csk->rxq.lock);
return false;
}
static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
struct sk_buff_head rxq;
skb_queue_head_init(&rxq);
wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
if (signal_pending(current))
goto out;
while ((skb = __skb_dequeue(&rxq))) {
if (cxgbit_rx_skb(csk, skb))
goto out;
}
return 0;
out:
__skb_queue_purge(&rxq);
return -1;
}
int cxgbit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
{
struct cxgbit_sock *csk = conn->context;
int ret = -1;
while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
ret = cxgbit_wait_rxq(csk);
if (ret) {
clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
break;
}
}
return ret;
}
void cxgbit_get_rx_pdu(struct iscsit_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
while (!kthread_should_stop()) {
iscsit_thread_check_cpumask(conn, current, 0);
if (cxgbit_wait_rxq(csk))
return;
}
}
|
linux-master
|
drivers/target/iscsi/cxgbit/cxgbit_target.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Chelsio Communications, Inc.
*/
#include "cxgbit.h"
static void
cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
struct cxgbi_task_tag_info *ttinfo,
struct scatterlist **sg_pp, unsigned int *sg_off)
{
struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
unsigned int offset = sg_off ? *sg_off : 0;
dma_addr_t addr = 0UL;
unsigned int len = 0;
int i;
memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
if (sg) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
}
for (i = 0; i < PPOD_PAGES_MAX; i++) {
if (sg) {
ppod->addr[i] = cpu_to_be64(addr + offset);
offset += PAGE_SIZE;
if (offset == (len + sg->offset)) {
offset = 0;
sg = sg_next(sg);
if (sg) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
}
}
} else {
ppod->addr[i] = 0ULL;
}
}
/*
* the fifth address needs to be repeated in the next ppod, so do
* not move sg
*/
if (sg_pp) {
*sg_pp = sg;
*sg_off = offset;
}
if (offset == len) {
offset = 0;
if (sg) {
sg = sg_next(sg);
if (sg)
addr = sg_dma_address(sg);
}
}
ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
}
static struct sk_buff *
cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
unsigned int idx, unsigned int npods, unsigned int tid)
{
struct ulp_mem_io *req;
struct ulptx_idata *idata;
unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
unsigned int dlen = npods << PPOD_SIZE_SHIFT;
unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
sizeof(struct ulptx_idata) + dlen, 16);
struct sk_buff *skb;
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return NULL;
req = __skb_put(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, tid);
req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_ATOMIC_V(0));
req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
ULP_MEMIO_ORDER_V(0) |
T5_ULP_MEMIO_IMM_V(1));
req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
idata = (struct ulptx_idata *)(req + 1);
idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
idata->len = htonl(dlen);
return skb;
}
static int
cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
unsigned int npods, struct scatterlist **sg_pp,
unsigned int *sg_off)
{
struct cxgbit_device *cdev = csk->com.cdev;
struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_idata *idata;
struct cxgbi_pagepod *ppod;
unsigned int i;
skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
if (!skb)
return -ENOMEM;
req = (struct ulp_mem_io *)skb->data;
idata = (struct ulptx_idata *)(req + 1);
ppod = (struct cxgbi_pagepod *)(idata + 1);
for (i = 0; i < npods; i++, ppod++)
cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
__skb_queue_tail(&csk->ppodq, skb);
return 0;
}
static int
cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
struct cxgbi_task_tag_info *ttinfo)
{
unsigned int pidx = ttinfo->idx;
unsigned int npods = ttinfo->npods;
unsigned int i, cnt;
struct scatterlist *sg = ttinfo->sgl;
unsigned int offset = 0;
int ret = 0;
for (i = 0; i < npods; i += cnt, pidx += cnt) {
cnt = npods - i;
if (cnt > ULPMEM_IDATA_MAX_NPPODS)
cnt = ULPMEM_IDATA_MAX_NPPODS;
ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
&sg, &offset);
if (ret < 0)
break;
}
return ret;
}
static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
unsigned int nents)
{
unsigned int last_sgidx = nents - 1;
unsigned int i;
for (i = 0; i < nents; i++, sg = sg_next(sg)) {
unsigned int len = sg->length + sg->offset;
if ((sg->offset & 0x3) || (i && sg->offset) ||
((i != last_sgidx) && (len != PAGE_SIZE))) {
return -EINVAL;
}
}
return 0;
}
static int
cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
unsigned int xferlen)
{
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
struct scatterlist *sgl = ttinfo->sgl;
unsigned int sgcnt = ttinfo->nents;
unsigned int sg_offset = sgl->offset;
int ret;
if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
ppm, ppm->tformat.pgsz_idx_dflt,
xferlen, ttinfo->nents);
return -EINVAL;
}
if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
return -EINVAL;
ttinfo->nr_pages = (xferlen + sgl->offset +
(1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
/*
* the ddp tag will be used for the ttt in the outgoing r2t pdu
*/
ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
&ttinfo->tag, 0);
if (ret < 0)
return ret;
ttinfo->npods = ret;
sgl->offset = 0;
ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
sgl->offset = sg_offset;
if (!ret) {
pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
__func__, 0, xferlen, sgcnt);
goto rel_ppods;
}
cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
xferlen, &ttinfo->hdr);
ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
if (ret < 0) {
__skb_queue_purge(&csk->ppodq);
dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
goto rel_ppods;
}
return 0;
rel_ppods:
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
return -EINVAL;
}
void
cxgbit_get_r2t_ttt(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_r2t *r2t)
{
struct cxgbit_sock *csk = conn->context;
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
int ret;
if ((!ccmd->setup_ddp) ||
(!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
goto out;
ccmd->setup_ddp = false;
ttinfo->sgl = cmd->se_cmd.t_data_sg;
ttinfo->nents = cmd->se_cmd.t_data_nents;
ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
if (ret < 0) {
pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
ttinfo->sgl = NULL;
ttinfo->nents = 0;
} else {
ccmd->release = true;
}
out:
pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
r2t->targ_xfer_tag = ttinfo->tag;
}
void cxgbit_unmap_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
if (ccmd->release) {
if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
put_page(sg_page(&ccmd->sg));
} else {
struct cxgbit_sock *csk = conn->context;
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
/* Abort the TCP conn if DDP is not complete to
* avoid any possibility of DDP after freeing
* the cmd.
*/
if (unlikely(cmd->write_data_done !=
cmd->se_cmd.data_length))
cxgbit_abort_conn(csk);
if (unlikely(ttinfo->sgl)) {
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
ttinfo->nents, DMA_FROM_DEVICE);
ttinfo->nents = 0;
ttinfo->sgl = NULL;
}
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
}
ccmd->release = false;
}
}
int cxgbit_ddp_init(struct cxgbit_device *cdev)
{
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct net_device *ndev = cdev->lldi.ports[0];
struct cxgbi_tag_format tformat;
int ret, i;
if (!lldi->vr->iscsi.size) {
pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
return -EACCES;
}
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
& 0xF;
cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
cdev->lldi.pdev, &cdev->lldi, &tformat,
lldi->vr->iscsi.size, lldi->iscsi_llimit,
lldi->vr->iscsi.start, 2,
lldi->vr->ppod_edram.start,
lldi->vr->ppod_edram.size);
if (ret >= 0) {
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
(ppm->ppmax >= 1024))
set_bit(CDEV_DDP_ENABLE, &cdev->flags);
ret = 0;
}
return ret;
}
|
linux-master
|
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Chelsio Communications, Inc.
*/
#define DRV_NAME "cxgbit"
#define DRV_VERSION "1.0.0-ko"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include "cxgbit.h"
#ifdef CONFIG_CHELSIO_T4_DCB
#include <net/dcbevent.h>
#include "cxgb4_dcb.h"
#endif
LIST_HEAD(cdev_list_head);
/* cdev list lock */
DEFINE_MUTEX(cdev_list_lock);
void _cxgbit_free_cdev(struct kref *kref)
{
struct cxgbit_device *cdev;
cdev = container_of(kref, struct cxgbit_device, kref);
cxgbi_ppm_release(cdev2ppm(cdev));
kfree(cdev);
}
static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
{
struct cxgb4_lld_info *lldi = &cdev->lldi;
u32 mdsl;
#define CXGBIT_T5_MAX_PDU_LEN 16224
#define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */
if (is_t5(lldi->adapter_type)) {
mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN,
CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN);
} else {
mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN;
mdsl = min(mdsl, 16384U);
}
mdsl = round_down(mdsl, 4);
mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE);
mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
cdev->mdsl = mdsl;
}
static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
{
struct cxgbit_device *cdev;
if (is_t4(lldi->adapter_type))
return ERR_PTR(-ENODEV);
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return ERR_PTR(-ENOMEM);
kref_init(&cdev->kref);
spin_lock_init(&cdev->np_lock);
cdev->lldi = *lldi;
cxgbit_set_mdsl(cdev);
if (cxgbit_ddp_init(cdev) < 0) {
kfree(cdev);
return ERR_PTR(-EINVAL);
}
if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
pr_info("cdev %s ddp init failed\n",
pci_name(lldi->pdev));
if (lldi->fw_vers >= 0x10d2b00)
set_bit(CDEV_ISO_ENABLE, &cdev->flags);
spin_lock_init(&cdev->cskq.lock);
INIT_LIST_HEAD(&cdev->cskq.list);
mutex_lock(&cdev_list_lock);
list_add_tail(&cdev->list, &cdev_list_head);
mutex_unlock(&cdev_list_lock);
pr_info("cdev %s added for iSCSI target transport\n",
pci_name(lldi->pdev));
return cdev;
}
static void cxgbit_close_conn(struct cxgbit_device *cdev)
{
struct cxgbit_sock *csk;
struct sk_buff *skb;
bool wakeup_thread = false;
spin_lock_bh(&cdev->cskq.lock);
list_for_each_entry(csk, &cdev->cskq.list, list) {
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
continue;
spin_lock_bh(&csk->rxq.lock);
__skb_queue_tail(&csk->rxq, skb);
if (skb_queue_len(&csk->rxq) == 1)
wakeup_thread = true;
spin_unlock_bh(&csk->rxq.lock);
if (wakeup_thread) {
wake_up(&csk->waitq);
wakeup_thread = false;
}
}
spin_unlock_bh(&cdev->cskq.lock);
}
static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
{
bool free_cdev = false;
spin_lock_bh(&cdev->cskq.lock);
if (list_empty(&cdev->cskq.list))
free_cdev = true;
spin_unlock_bh(&cdev->cskq.lock);
if (free_cdev) {
mutex_lock(&cdev_list_lock);
list_del(&cdev->list);
mutex_unlock(&cdev_list_lock);
cxgbit_put_cdev(cdev);
} else {
cxgbit_close_conn(cdev);
}
}
static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
{
struct cxgbit_device *cdev = handle;
switch (state) {
case CXGB4_STATE_UP:
set_bit(CDEV_STATE_UP, &cdev->flags);
pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
break;
case CXGB4_STATE_START_RECOVERY:
clear_bit(CDEV_STATE_UP, &cdev->flags);
cxgbit_close_conn(cdev);
pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
break;
case CXGB4_STATE_DOWN:
pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
break;
case CXGB4_STATE_DETACH:
clear_bit(CDEV_STATE_UP, &cdev->flags);
pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
cxgbit_detach_cdev(cdev);
break;
default:
pr_info("cdev %s unknown state %d.\n",
pci_name(cdev->lldi.pdev), state);
break;
}
return 0;
}
static void
cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
u32 ddpvld)
{
if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
}
if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
}
if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
(!(pdu_cb->flags & PDUCBF_RX_DATA))) {
pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
}
}
static void
cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
lro_cb->pdu_idx);
struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
pdu_cb->flags |= PDUCBF_RX_STATUS;
pdu_cb->ddigest = ntohl(cpl->ulp_crc);
pdu_cb->pdulen = ntohs(cpl->len);
if (pdu_cb->flags & PDUCBF_RX_HDR)
pdu_cb->complete = true;
lro_cb->pdu_totallen += pdu_cb->pdulen;
lro_cb->complete = true;
lro_cb->pdu_idx++;
}
static void
cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
unsigned int offset)
{
u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
u8 i;
/* usually there's just one frag */
__skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
gl->frags[0].offset + offset,
gl->frags[0].size - offset);
for (i = 1; i < gl->nfrags; i++)
__skb_fill_page_desc(skb, skb_frag_idx + i,
gl->frags[i].page,
gl->frags[i].offset,
gl->frags[i].size);
skb_shinfo(skb)->nr_frags += gl->nfrags;
/* get a reference to the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
}
static void
cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
lro_cb->pdu_idx);
u32 len, offset;
if (op == CPL_ISCSI_HDR) {
struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
offset = sizeof(struct cpl_iscsi_hdr);
pdu_cb->flags |= PDUCBF_RX_HDR;
pdu_cb->seq = ntohl(cpl->seq);
len = ntohs(cpl->len);
pdu_cb->hdr = gl->va + offset;
pdu_cb->hlen = len;
pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
if (unlikely(gl->nfrags > 1))
cxgbit_skcb_flags(skb) = 0;
lro_cb->complete = false;
} else if (op == CPL_ISCSI_DATA) {
struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
offset = sizeof(struct cpl_iscsi_data);
pdu_cb->flags |= PDUCBF_RX_DATA;
len = ntohs(cpl->len);
pdu_cb->dlen = len;
pdu_cb->doffset = lro_cb->offset;
pdu_cb->nr_dfrags = gl->nfrags;
pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
lro_cb->complete = false;
} else {
struct cpl_rx_iscsi_cmp *cpl;
cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
offset = sizeof(struct cpl_rx_iscsi_cmp);
pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
len = be16_to_cpu(cpl->len);
pdu_cb->hdr = gl->va + offset;
pdu_cb->hlen = len;
pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
pdu_cb->pdulen = ntohs(cpl->len);
if (unlikely(gl->nfrags > 1))
cxgbit_skcb_flags(skb) = 0;
cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
be32_to_cpu(cpl->ddpvld));
if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
pdu_cb->complete = true;
} else if (pdu_cb->flags & PDUCBF_RX_DATA) {
pdu_cb->complete = true;
}
lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
lro_cb->complete = true;
lro_cb->pdu_idx++;
}
cxgbit_copy_frags(skb, gl, offset);
pdu_cb->frags += gl->nfrags;
lro_cb->offset += len;
skb->len += len;
skb->data_len += len;
skb->truesize += len;
}
static struct sk_buff *
cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
const __be64 *rsp, struct napi_struct *napi)
{
struct sk_buff *skb;
struct cxgbit_lro_cb *lro_cb;
skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
if (unlikely(!skb))
return NULL;
memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
lro_cb = cxgbit_skb_lro_cb(skb);
cxgbit_get_csk(csk);
lro_cb->csk = csk;
return skb;
}
static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
bool wakeup_thread = false;
spin_lock(&csk->rxq.lock);
__skb_queue_tail(&csk->rxq, skb);
if (skb_queue_len(&csk->rxq) == 1)
wakeup_thread = true;
spin_unlock(&csk->rxq.lock);
if (wakeup_thread)
wake_up(&csk->waitq);
}
static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
{
struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
struct cxgbit_sock *csk = lro_cb->csk;
csk->lro_skb = NULL;
__skb_unlink(skb, &lro_mgr->lroq);
cxgbit_queue_lro_skb(csk, skb);
cxgbit_put_csk(csk);
lro_mgr->lro_pkts++;
lro_mgr->lro_session_cnt--;
}
static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
{
struct sk_buff *skb;
while ((skb = skb_peek(&lro_mgr->lroq)))
cxgbit_lro_flush(lro_mgr, skb);
}
static int
cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi)
{
struct sk_buff *skb;
struct cxgbit_lro_cb *lro_cb;
if (!csk) {
pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
goto out;
}
if (csk->lro_skb)
goto add_packet;
start_lro:
if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
cxgbit_uld_lro_flush(lro_mgr);
goto start_lro;
}
skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
if (unlikely(!skb))
goto out;
csk->lro_skb = skb;
__skb_queue_tail(&lro_mgr->lroq, skb);
lro_mgr->lro_session_cnt++;
add_packet:
skb = csk->lro_skb;
lro_cb = cxgbit_skb_lro_cb(skb);
if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
(lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
cxgbit_lro_flush(lro_mgr, skb);
goto start_lro;
}
if (gl)
cxgbit_lro_add_packet_gl(skb, op, gl);
else
cxgbit_lro_add_packet_rsp(skb, op, rsp);
lro_mgr->lro_merged++;
return 0;
out:
return -1;
}
static int
cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi)
{
struct cxgbit_device *cdev = hndl;
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct cpl_tx_data *rpl = NULL;
struct cxgbit_sock *csk = NULL;
unsigned int tid = 0;
struct sk_buff *skb;
unsigned int op = *(u8 *)rsp;
bool lro_flush = true;
switch (op) {
case CPL_ISCSI_HDR:
case CPL_ISCSI_DATA:
case CPL_RX_ISCSI_CMP:
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
fallthrough;
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
case CPL_CLOSE_CON_RPL:
case CPL_ABORT_REQ_RSS:
case CPL_SET_TCB_RPL:
case CPL_RX_DATA:
rpl = gl ? (struct cpl_tx_data *)gl->va :
(struct cpl_tx_data *)(rsp + 1);
tid = GET_TID(rpl);
csk = lookup_tid(lldi->tids, tid);
break;
default:
break;
}
if (csk && csk->lro_skb && lro_flush)
cxgbit_lro_flush(lro_mgr, csk->lro_skb);
if (!gl) {
unsigned int len;
if (op == CPL_RX_ISCSI_DDP) {
if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
napi))
return 0;
}
len = 64 - sizeof(struct rsp_ctrl) - 8;
skb = napi_alloc_skb(napi, len);
if (!skb)
goto nomem;
__skb_put(skb, len);
skb_copy_to_linear_data(skb, &rsp[1], len);
} else {
if (unlikely(op != *(u8 *)gl->va)) {
pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
gl->va, be64_to_cpu(*rsp),
get_unaligned_be64(gl->va),
gl->tot_len);
return 0;
}
if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
(op == CPL_RX_ISCSI_CMP)) {
if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
napi))
return 0;
}
#define RX_PULL_LEN 128
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb))
goto nomem;
}
rpl = (struct cpl_tx_data *)skb->data;
op = rpl->ot.opcode;
cxgbit_skcb_rx_opcode(skb) = op;
pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
cdev, op, rpl->ot.opcode_tid,
ntohl(rpl->ot.opcode_tid), skb);
if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
cxgbit_cplhandlers[op](cdev, skb);
} else {
pr_err("No handler for opcode 0x%x.\n", op);
__kfree_skb(skb);
}
return 0;
nomem:
pr_err("%s OOM bailing out.\n", __func__);
return 1;
}
#ifdef CONFIG_CHELSIO_T4_DCB
struct cxgbit_dcb_work {
struct dcb_app_type dcb_app;
struct work_struct work;
};
static void
cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
u8 dcb_priority, u16 port_num)
{
struct cxgbit_sock *csk;
struct sk_buff *skb;
u16 local_port;
bool wakeup_thread = false;
spin_lock_bh(&cdev->cskq.lock);
list_for_each_entry(csk, &cdev->cskq.list, list) {
if (csk->port_id != port_id)
continue;
if (csk->com.local_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sock_in6;
sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
local_port = ntohs(sock_in6->sin6_port);
} else {
struct sockaddr_in *sock_in;
sock_in = (struct sockaddr_in *)&csk->com.local_addr;
local_port = ntohs(sock_in->sin_port);
}
if (local_port != port_num)
continue;
if (csk->dcb_priority == dcb_priority)
continue;
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
continue;
spin_lock(&csk->rxq.lock);
__skb_queue_tail(&csk->rxq, skb);
if (skb_queue_len(&csk->rxq) == 1)
wakeup_thread = true;
spin_unlock(&csk->rxq.lock);
if (wakeup_thread) {
wake_up(&csk->waitq);
wakeup_thread = false;
}
}
spin_unlock_bh(&cdev->cskq.lock);
}
static void cxgbit_dcb_workfn(struct work_struct *work)
{
struct cxgbit_dcb_work *dcb_work;
struct net_device *ndev;
struct cxgbit_device *cdev = NULL;
struct dcb_app_type *iscsi_app;
u8 priority, port_id = 0xff;
dcb_work = container_of(work, struct cxgbit_dcb_work, work);
iscsi_app = &dcb_work->dcb_app;
if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
(iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
goto out;
priority = iscsi_app->app.priority;
} else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
goto out;
if (!iscsi_app->app.priority)
goto out;
priority = ffs(iscsi_app->app.priority) - 1;
} else {
goto out;
}
pr_debug("priority for ifid %d is %u\n",
iscsi_app->ifindex, priority);
ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
if (!ndev)
goto out;
mutex_lock(&cdev_list_lock);
cdev = cxgbit_find_device(ndev, &port_id);
dev_put(ndev);
if (!cdev) {
mutex_unlock(&cdev_list_lock);
goto out;
}
cxgbit_update_dcb_priority(cdev, port_id, priority,
iscsi_app->app.protocol);
mutex_unlock(&cdev_list_lock);
out:
kfree(dcb_work);
}
static int
cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct cxgbit_dcb_work *dcb_work;
struct dcb_app_type *dcb_app = data;
dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
if (!dcb_work)
return NOTIFY_DONE;
dcb_work->dcb_app = *dcb_app;
INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
schedule_work(&dcb_work->work);
return NOTIFY_OK;
}
#endif
static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsit_conn *conn)
{
return TARGET_PROT_NORMAL;
}
static struct iscsit_transport cxgbit_transport = {
.name = DRV_NAME,
.transport_type = ISCSI_CXGBIT,
.rdma_shutdown = false,
.priv_size = sizeof(struct cxgbit_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = cxgbit_setup_np,
.iscsit_accept_np = cxgbit_accept_np,
.iscsit_free_np = cxgbit_free_np,
.iscsit_free_conn = cxgbit_free_conn,
.iscsit_get_login_rx = cxgbit_get_login_rx,
.iscsit_put_login_tx = cxgbit_put_login_tx,
.iscsit_immediate_queue = iscsit_immediate_queue,
.iscsit_response_queue = iscsit_response_queue,
.iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
.iscsit_queue_data_in = iscsit_queue_rsp,
.iscsit_queue_status = iscsit_queue_rsp,
.iscsit_xmit_pdu = cxgbit_xmit_pdu,
.iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
.iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
.iscsit_validate_params = cxgbit_validate_params,
.iscsit_unmap_cmd = cxgbit_unmap_cmd,
.iscsit_aborted_task = iscsit_aborted_task,
.iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
};
static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME,
.nrxq = MAX_ULD_QSETS,
.ntxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.lro = true,
.add = cxgbit_uld_add,
.state_change = cxgbit_uld_state_change,
.lro_rx_handler = cxgbit_uld_lro_rx_handler,
.lro_flush = cxgbit_uld_lro_flush,
};
#ifdef CONFIG_CHELSIO_T4_DCB
static struct notifier_block cxgbit_dcbevent_nb = {
.notifier_call = cxgbit_dcbevent_notify,
};
#endif
static int __init cxgbit_init(void)
{
cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
iscsit_register_transport(&cxgbit_transport);
#ifdef CONFIG_CHELSIO_T4_DCB
pr_info("%s dcb enabled.\n", DRV_NAME);
register_dcbevent_notifier(&cxgbit_dcbevent_nb);
#endif
BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
sizeof(union cxgbit_skb_cb));
return 0;
}
static void __exit cxgbit_exit(void)
{
struct cxgbit_device *cdev, *tmp;
#ifdef CONFIG_CHELSIO_T4_DCB
unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
#endif
mutex_lock(&cdev_list_lock);
list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
list_del(&cdev->list);
cxgbit_put_cdev(cdev);
}
mutex_unlock(&cdev_list_lock);
iscsit_unregister_transport(&cxgbit_transport);
cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
}
module_init(cxgbit_init);
module_exit(cxgbit_exit);
MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/target/iscsi/cxgbit/cxgbit_main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Chelsio Communications, Inc.
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <libcxgb_cm.h>
#include "cxgbit.h"
#include "clip_tbl.h"
static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
{
wr_waitp->ret = 0;
reinit_completion(&wr_waitp->completion);
}
static void
cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
{
if (ret == CPL_ERR_NONE)
wr_waitp->ret = 0;
else
wr_waitp->ret = -EIO;
if (wr_waitp->ret)
pr_err("%s: err:%u", func, ret);
complete(&wr_waitp->completion);
}
static int
cxgbit_wait_for_reply(struct cxgbit_device *cdev,
struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
const char *func)
{
int ret;
if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
wr_waitp->ret = -EIO;
goto out;
}
ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
if (!ret) {
pr_info("%s - Device %s not responding tid %u\n",
func, pci_name(cdev->lldi.pdev), tid);
wr_waitp->ret = -ETIMEDOUT;
}
out:
if (wr_waitp->ret)
pr_info("%s: FW reply %d tid %u\n",
pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
return wr_waitp->ret;
}
static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
{
return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
}
static struct np_info *
cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
unsigned int stid)
{
struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
int bucket = cxgbit_np_hashfn(cnp);
p->cnp = cnp;
p->stid = stid;
spin_lock(&cdev->np_lock);
p->next = cdev->np_hash_tab[bucket];
cdev->np_hash_tab[bucket] = p;
spin_unlock(&cdev->np_lock);
}
return p;
}
static int
cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
{
int stid = -1, bucket = cxgbit_np_hashfn(cnp);
struct np_info *p;
spin_lock(&cdev->np_lock);
for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
if (p->cnp == cnp) {
stid = p->stid;
break;
}
}
spin_unlock(&cdev->np_lock);
return stid;
}
static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
{
int stid = -1, bucket = cxgbit_np_hashfn(cnp);
struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
spin_lock(&cdev->np_lock);
for (p = *prev; p; prev = &p->next, p = p->next) {
if (p->cnp == cnp) {
stid = p->stid;
*prev = p->next;
kfree(p);
break;
}
}
spin_unlock(&cdev->np_lock);
return stid;
}
void _cxgbit_free_cnp(struct kref *kref)
{
struct cxgbit_np *cnp;
cnp = container_of(kref, struct cxgbit_np, kref);
kfree(cnp);
}
static int
cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
struct cxgbit_np *cnp)
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
&cnp->com.local_addr;
int addr_type;
int ret;
pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
__func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
addr_type = ipv6_addr_type((const struct in6_addr *)
&sin6->sin6_addr);
if (addr_type != IPV6_ADDR_ANY) {
ret = cxgb4_clip_get(cdev->lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
if (ret) {
pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
sin6->sin6_addr.s6_addr, ret);
return -ENOMEM;
}
}
cxgbit_get_cnp(cnp);
cxgbit_init_wr_wait(&cnp->com.wr_wait);
ret = cxgb4_create_server6(cdev->lldi.ports[0],
stid, &sin6->sin6_addr,
sin6->sin6_port,
cdev->lldi.rxq_ids[0]);
if (!ret)
ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
0, 10, __func__);
else if (ret > 0)
ret = net_xmit_errno(ret);
else
cxgbit_put_cnp(cnp);
if (ret) {
if (ret != -ETIMEDOUT)
cxgb4_clip_release(cdev->lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
ret, stid, sin6->sin6_addr.s6_addr,
ntohs(sin6->sin6_port));
}
return ret;
}
static int
cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
struct cxgbit_np *cnp)
{
struct sockaddr_in *sin = (struct sockaddr_in *)
&cnp->com.local_addr;
int ret;
pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
__func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
cxgbit_get_cnp(cnp);
cxgbit_init_wr_wait(&cnp->com.wr_wait);
ret = cxgb4_create_server(cdev->lldi.ports[0],
stid, sin->sin_addr.s_addr,
sin->sin_port, 0,
cdev->lldi.rxq_ids[0]);
if (!ret)
ret = cxgbit_wait_for_reply(cdev,
&cnp->com.wr_wait,
0, 10, __func__);
else if (ret > 0)
ret = net_xmit_errno(ret);
else
cxgbit_put_cnp(cnp);
if (ret)
pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
return ret;
}
struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
{
struct cxgbit_device *cdev;
u8 i;
list_for_each_entry(cdev, &cdev_list_head, list) {
struct cxgb4_lld_info *lldi = &cdev->lldi;
for (i = 0; i < lldi->nports; i++) {
if (lldi->ports[i] == ndev) {
if (port_id)
*port_id = i;
return cdev;
}
}
}
return NULL;
}
static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
{
if (ndev->priv_flags & IFF_BONDING) {
pr_err("Bond devices are not supported. Interface:%s\n",
ndev->name);
return NULL;
}
if (is_vlan_dev(ndev))
return vlan_dev_real_dev(ndev);
return ndev;
}
static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
{
struct net_device *ndev;
ndev = __ip_dev_find(&init_net, saddr, false);
if (!ndev)
return NULL;
return cxgbit_get_real_dev(ndev);
}
static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
{
struct net_device *ndev = NULL;
bool found = false;
if (IS_ENABLED(CONFIG_IPV6)) {
for_each_netdev_rcu(&init_net, ndev)
if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
found = true;
break;
}
}
if (!found)
return NULL;
return cxgbit_get_real_dev(ndev);
}
static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
{
struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
int ss_family = sockaddr->ss_family;
struct net_device *ndev = NULL;
struct cxgbit_device *cdev = NULL;
rcu_read_lock();
if (ss_family == AF_INET) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)sockaddr;
ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
} else if (ss_family == AF_INET6) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)sockaddr;
ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
}
if (!ndev)
goto out;
cdev = cxgbit_find_device(ndev, NULL);
out:
rcu_read_unlock();
return cdev;
}
static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
{
struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
int ss_family = sockaddr->ss_family;
int addr_type;
if (ss_family == AF_INET) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)sockaddr;
if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
return true;
} else if (ss_family == AF_INET6) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)sockaddr;
addr_type = ipv6_addr_type((const struct in6_addr *)
&sin6->sin6_addr);
if (addr_type == IPV6_ADDR_ANY)
return true;
}
return false;
}
static int
__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
{
int stid, ret;
int ss_family = cnp->com.local_addr.ss_family;
if (!test_bit(CDEV_STATE_UP, &cdev->flags))
return -EINVAL;
stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
if (stid < 0)
return -EINVAL;
if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
return -EINVAL;
}
if (ss_family == AF_INET)
ret = cxgbit_create_server4(cdev, stid, cnp);
else
ret = cxgbit_create_server6(cdev, stid, cnp);
if (ret) {
if (ret != -ETIMEDOUT)
cxgb4_free_stid(cdev->lldi.tids, stid,
ss_family);
cxgbit_np_hash_del(cdev, cnp);
return ret;
}
return ret;
}
static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
{
struct cxgbit_device *cdev;
int ret = -1;
mutex_lock(&cdev_list_lock);
cdev = cxgbit_find_np_cdev(cnp);
if (!cdev)
goto out;
if (cxgbit_np_hash_find(cdev, cnp) >= 0)
goto out;
if (__cxgbit_setup_cdev_np(cdev, cnp))
goto out;
cnp->com.cdev = cdev;
ret = 0;
out:
mutex_unlock(&cdev_list_lock);
return ret;
}
static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
{
struct cxgbit_device *cdev;
int ret;
u32 count = 0;
mutex_lock(&cdev_list_lock);
list_for_each_entry(cdev, &cdev_list_head, list) {
if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
mutex_unlock(&cdev_list_lock);
return -1;
}
}
list_for_each_entry(cdev, &cdev_list_head, list) {
ret = __cxgbit_setup_cdev_np(cdev, cnp);
if (ret == -ETIMEDOUT)
break;
if (ret != 0)
continue;
count++;
}
mutex_unlock(&cdev_list_lock);
return count ? 0 : -1;
}
int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
{
struct cxgbit_np *cnp;
int ret;
if ((ksockaddr->ss_family != AF_INET) &&
(ksockaddr->ss_family != AF_INET6))
return -EINVAL;
cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
if (!cnp)
return -ENOMEM;
init_waitqueue_head(&cnp->accept_wait);
init_completion(&cnp->com.wr_wait.completion);
init_completion(&cnp->accept_comp);
INIT_LIST_HEAD(&cnp->np_accept_list);
spin_lock_init(&cnp->np_accept_lock);
kref_init(&cnp->kref);
memcpy(&np->np_sockaddr, ksockaddr,
sizeof(struct sockaddr_storage));
memcpy(&cnp->com.local_addr, &np->np_sockaddr,
sizeof(cnp->com.local_addr));
cnp->np = np;
cnp->com.cdev = NULL;
if (cxgbit_inaddr_any(cnp))
ret = cxgbit_setup_all_np(cnp);
else
ret = cxgbit_setup_cdev_np(cnp);
if (ret) {
cxgbit_put_cnp(cnp);
return -EINVAL;
}
np->np_context = cnp;
cnp->com.state = CSK_STATE_LISTEN;
return 0;
}
static void
cxgbit_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
struct cxgbit_sock *csk)
{
conn->login_family = np->np_sockaddr.ss_family;
conn->login_sockaddr = csk->com.remote_addr;
conn->local_sockaddr = csk->com.local_addr;
}
int cxgbit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
{
struct cxgbit_np *cnp = np->np_context;
struct cxgbit_sock *csk;
int ret = 0;
accept_wait:
ret = wait_for_completion_interruptible(&cnp->accept_comp);
if (ret)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
/**
* No point in stalling here when np_thread
* is in state RESET/SHUTDOWN/EXIT - bail
**/
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
spin_lock_bh(&cnp->np_accept_lock);
if (list_empty(&cnp->np_accept_list)) {
spin_unlock_bh(&cnp->np_accept_lock);
goto accept_wait;
}
csk = list_first_entry(&cnp->np_accept_list,
struct cxgbit_sock,
accept_node);
list_del_init(&csk->accept_node);
spin_unlock_bh(&cnp->np_accept_lock);
conn->context = csk;
csk->conn = conn;
cxgbit_set_conn_info(np, conn, csk);
return 0;
}
static int
__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
{
int stid, ret;
bool ipv6 = false;
stid = cxgbit_np_hash_del(cdev, cnp);
if (stid < 0)
return -EINVAL;
if (!test_bit(CDEV_STATE_UP, &cdev->flags))
return -EINVAL;
if (cnp->np->np_sockaddr.ss_family == AF_INET6)
ipv6 = true;
cxgbit_get_cnp(cnp);
cxgbit_init_wr_wait(&cnp->com.wr_wait);
ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
cdev->lldi.rxq_ids[0], ipv6);
if (ret > 0)
ret = net_xmit_errno(ret);
if (ret) {
cxgbit_put_cnp(cnp);
return ret;
}
ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
0, 10, __func__);
if (ret == -ETIMEDOUT)
return ret;
if (ipv6 && cnp->com.cdev) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
cxgb4_clip_release(cdev->lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
cxgb4_free_stid(cdev->lldi.tids, stid,
cnp->com.local_addr.ss_family);
return 0;
}
static void cxgbit_free_all_np(struct cxgbit_np *cnp)
{
struct cxgbit_device *cdev;
int ret;
mutex_lock(&cdev_list_lock);
list_for_each_entry(cdev, &cdev_list_head, list) {
ret = __cxgbit_free_cdev_np(cdev, cnp);
if (ret == -ETIMEDOUT)
break;
}
mutex_unlock(&cdev_list_lock);
}
static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
{
struct cxgbit_device *cdev;
bool found = false;
mutex_lock(&cdev_list_lock);
list_for_each_entry(cdev, &cdev_list_head, list) {
if (cdev == cnp->com.cdev) {
found = true;
break;
}
}
if (!found)
goto out;
__cxgbit_free_cdev_np(cdev, cnp);
out:
mutex_unlock(&cdev_list_lock);
}
static void __cxgbit_free_conn(struct cxgbit_sock *csk);
void cxgbit_free_np(struct iscsi_np *np)
{
struct cxgbit_np *cnp = np->np_context;
struct cxgbit_sock *csk, *tmp;
cnp->com.state = CSK_STATE_DEAD;
if (cnp->com.cdev)
cxgbit_free_cdev_np(cnp);
else
cxgbit_free_all_np(cnp);
spin_lock_bh(&cnp->np_accept_lock);
list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
list_del_init(&csk->accept_node);
__cxgbit_free_conn(csk);
}
spin_unlock_bh(&cnp->np_accept_lock);
np->np_context = NULL;
cxgbit_put_cnp(cnp);
}
static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
NULL, NULL);
cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
__skb_queue_tail(&csk->txq, skb);
cxgbit_push_tx_frames(csk);
}
static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
{
struct cxgbit_sock *csk = handle;
pr_debug("%s cxgbit_device %p\n", __func__, handle);
kfree_skb(skb);
cxgbit_put_csk(csk);
}
static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
{
struct cxgbit_device *cdev = handle;
struct cpl_abort_req *req = cplhdr(skb);
pr_debug("%s cdev %p\n", __func__, cdev);
req->cmd = CPL_ABORT_NO_RST;
cxgbit_ofld_send(cdev, skb);
}
static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
u32 len = roundup(sizeof(struct cpl_abort_req), 16);
pr_debug("%s: csk %p tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
__skb_queue_purge(&csk->txq);
if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
cxgbit_send_tx_flowc_wr(csk);
skb = __skb_dequeue(&csk->skbq);
cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
csk->com.cdev, cxgbit_abort_arp_failure);
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
static void
__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
{
__kfree_skb(skb);
if (csk->com.state != CSK_STATE_ESTABLISHED)
goto no_abort;
set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
csk->com.state = CSK_STATE_ABORTING;
cxgbit_send_abort_req(csk);
return;
no_abort:
cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
cxgbit_put_csk(csk);
}
void cxgbit_abort_conn(struct cxgbit_sock *csk)
{
struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
cxgbit_get_csk(csk);
cxgbit_init_wr_wait(&csk->com.wr_wait);
spin_lock_bh(&csk->lock);
if (csk->lock_owner) {
cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
__skb_queue_tail(&csk->backlogq, skb);
} else {
__cxgbit_abort_conn(csk, skb);
}
spin_unlock_bh(&csk->lock);
cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
csk->tid, 600, __func__);
}
static void __cxgbit_free_conn(struct cxgbit_sock *csk)
{
struct iscsit_conn *conn = csk->conn;
bool release = false;
pr_debug("%s: state %d\n",
__func__, csk->com.state);
spin_lock_bh(&csk->lock);
switch (csk->com.state) {
case CSK_STATE_ESTABLISHED:
if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
csk->com.state = CSK_STATE_CLOSING;
cxgbit_send_halfclose(csk);
} else {
csk->com.state = CSK_STATE_ABORTING;
cxgbit_send_abort_req(csk);
}
break;
case CSK_STATE_CLOSING:
csk->com.state = CSK_STATE_MORIBUND;
cxgbit_send_halfclose(csk);
break;
case CSK_STATE_DEAD:
release = true;
break;
default:
pr_err("%s: csk %p; state %d\n",
__func__, csk, csk->com.state);
}
spin_unlock_bh(&csk->lock);
if (release)
cxgbit_put_csk(csk);
}
void cxgbit_free_conn(struct iscsit_conn *conn)
{
__cxgbit_free_conn(conn->context);
}
static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
{
csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
((csk->com.remote_addr.ss_family == AF_INET) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr);
csk->mss = csk->emss;
if (TCPOPT_TSTAMP_G(opt))
csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (csk->emss < 128)
csk->emss = 128;
if (csk->emss & 7)
pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
TCPOPT_MSS_G(opt), csk->mss, csk->emss);
pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
csk->mss, csk->emss);
}
static void cxgbit_free_skb(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
__skb_queue_purge(&csk->txq);
__skb_queue_purge(&csk->rxq);
__skb_queue_purge(&csk->backlogq);
__skb_queue_purge(&csk->ppodq);
__skb_queue_purge(&csk->skbq);
while ((skb = cxgbit_sock_dequeue_wr(csk)))
kfree_skb(skb);
__kfree_skb(csk->lro_hskb);
}
void _cxgbit_free_csk(struct kref *kref)
{
struct cxgbit_sock *csk;
struct cxgbit_device *cdev;
csk = container_of(kref, struct cxgbit_sock, kref);
pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
if (csk->com.local_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
&csk->com.local_addr;
cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
(const u32 *)
&sin6->sin6_addr.s6_addr, 1);
}
cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
csk->com.local_addr.ss_family);
dst_release(csk->dst);
cxgb4_l2t_release(csk->l2t);
cdev = csk->com.cdev;
spin_lock_bh(&cdev->cskq.lock);
list_del(&csk->list);
spin_unlock_bh(&cdev->cskq.lock);
cxgbit_free_skb(csk);
cxgbit_put_cnp(csk->cnp);
cxgbit_put_cdev(cdev);
kfree(csk);
}
static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
{
unsigned int linkspeed;
u8 scale;
linkspeed = pi->link_cfg.speed;
scale = linkspeed / SPEED_10000;
#define CXGBIT_10G_RCV_WIN (256 * 1024)
csk->rcv_win = CXGBIT_10G_RCV_WIN;
if (scale)
csk->rcv_win *= scale;
csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10);
#define CXGBIT_10G_SND_WIN (256 * 1024)
csk->snd_win = CXGBIT_10G_SND_WIN;
if (scale)
csk->snd_win *= scale;
csk->snd_win = min(csk->snd_win, 512U * 1024);
pr_debug("%s snd_win %d rcv_win %d\n",
__func__, csk->snd_win, csk->rcv_win);
}
#ifdef CONFIG_CHELSIO_T4_DCB
static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
{
return ndev->dcbnl_ops->getstate(ndev);
}
static int cxgbit_select_priority(int pri_mask)
{
if (!pri_mask)
return 0;
return (ffs(pri_mask) - 1);
}
static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
{
int ret;
u8 caps;
struct dcb_app iscsi_dcb_app = {
.protocol = local_port
};
ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
if (ret)
return 0;
if (caps & DCB_CAP_DCBX_VER_IEEE) {
iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
if (!ret) {
iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
}
} else if (caps & DCB_CAP_DCBX_VER_CEE) {
iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
ret = dcb_getapp(ndev, &iscsi_dcb_app);
}
pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
return cxgbit_select_priority(ret);
}
#endif
static int
cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
u16 local_port, struct dst_entry *dst,
struct cxgbit_device *cdev)
{
struct neighbour *n;
int ret, step;
struct net_device *ndev;
u16 rxq_idx, port_id;
#ifdef CONFIG_CHELSIO_T4_DCB
u8 priority = 0;
#endif
n = dst_neigh_lookup(dst, peer_ip);
if (!n)
return -ENODEV;
rcu_read_lock();
if (!(n->nud_state & NUD_VALID))
neigh_event_send(n, NULL);
ret = -ENOMEM;
if (n->dev->flags & IFF_LOOPBACK) {
if (iptype == 4)
ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
else if (IS_ENABLED(CONFIG_IPV6))
ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
else
ndev = NULL;
if (!ndev) {
ret = -ENODEV;
goto out;
}
csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
n, ndev, 0);
if (!csk->l2t)
goto out;
csk->mtu = ndev->mtu;
csk->tx_chan = cxgb4_port_chan(ndev);
csk->smac_idx =
((struct port_info *)netdev_priv(ndev))->smt_idx;
step = cdev->lldi.ntxq /
cdev->lldi.nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = cdev->lldi.nrxq /
cdev->lldi.nchan;
csk->ctrlq_idx = cxgb4_port_idx(ndev);
csk->rss_qid = cdev->lldi.rxq_ids[
cxgb4_port_idx(ndev) * step];
csk->port_id = cxgb4_port_idx(ndev);
cxgbit_set_tcp_window(csk,
(struct port_info *)netdev_priv(ndev));
} else {
ndev = cxgbit_get_real_dev(n->dev);
if (!ndev) {
ret = -ENODEV;
goto out;
}
#ifdef CONFIG_CHELSIO_T4_DCB
if (cxgbit_get_iscsi_dcb_state(ndev))
priority = cxgbit_get_iscsi_dcb_priority(ndev,
local_port);
csk->dcb_priority = priority;
csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
#else
csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
#endif
if (!csk->l2t)
goto out;
port_id = cxgb4_port_idx(ndev);
csk->mtu = dst_mtu(dst);
csk->tx_chan = cxgb4_port_chan(ndev);
csk->smac_idx =
((struct port_info *)netdev_priv(ndev))->smt_idx;
step = cdev->lldi.ntxq /
cdev->lldi.nports;
csk->txq_idx = (port_id * step) +
(cdev->selectq[port_id][0]++ % step);
csk->ctrlq_idx = cxgb4_port_idx(ndev);
step = cdev->lldi.nrxq /
cdev->lldi.nports;
rxq_idx = (port_id * step) +
(cdev->selectq[port_id][1]++ % step);
csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
csk->port_id = port_id;
cxgbit_set_tcp_window(csk,
(struct port_info *)netdev_priv(ndev));
}
ret = 0;
out:
rcu_read_unlock();
neigh_release(n);
return ret;
}
int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
{
int ret = 0;
if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
kfree_skb(skb);
pr_err("%s - device not up - dropping\n", __func__);
return -EIO;
}
ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
if (ret < 0)
kfree_skb(skb);
return ret < 0 ? ret : 0;
}
static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
{
u32 len = roundup(sizeof(struct cpl_tid_release), 16);
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
cxgb_mk_tid_release(skb, len, tid, 0);
cxgbit_ofld_send(cdev, skb);
}
int
cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
struct l2t_entry *l2e)
{
int ret = 0;
if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
kfree_skb(skb);
pr_err("%s - device not up - dropping\n", __func__);
return -EIO;
}
ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
if (ret < 0)
kfree_skb(skb);
return ret < 0 ? ret : 0;
}
static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
{
if (csk->com.state != CSK_STATE_ESTABLISHED) {
__kfree_skb(skb);
return;
}
cxgbit_ofld_send(csk->com.cdev, skb);
}
/*
* CPL connection rx data ack: host ->
* Send RX credits through an RX_DATA_ACK CPL message.
* Returns the number of credits sent.
*/
int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
u32 credit_dack;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -1;
credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) |
RX_CREDITS_V(csk->rx_credits);
cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
credit_dack);
csk->rx_credits = 0;
spin_lock_bh(&csk->lock);
if (csk->lock_owner) {
cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
__skb_queue_tail(&csk->backlogq, skb);
spin_unlock_bh(&csk->lock);
return 0;
}
cxgbit_send_rx_credits(csk, skb);
spin_unlock_bh(&csk->lock);
return 0;
}
#define FLOWC_WR_NPARAMS_MIN 9
#define FLOWC_WR_NPARAMS_MAX 11
static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
u32 len, flowclen;
u8 i;
flowclen = offsetof(struct fw_flowc_wr,
mnemval[FLOWC_WR_NPARAMS_MAX]);
len = max_t(u32, sizeof(struct cpl_abort_req),
sizeof(struct cpl_abort_rpl));
len = max(len, flowclen);
len = roundup(len, 16);
for (i = 0; i < 3; i++) {
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
goto out;
__skb_queue_tail(&csk->skbq, skb);
}
skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
if (!skb)
goto out;
memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
csk->lro_hskb = skb;
return 0;
out:
__skb_queue_purge(&csk->skbq);
return -ENOMEM;
}
static void
cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
{
struct sk_buff *skb;
const struct tcphdr *tcph;
struct cpl_t5_pass_accept_rpl *rpl5;
struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
unsigned int len = roundup(sizeof(*rpl5), 16);
unsigned int mtu_idx;
u64 opt0;
u32 opt2, hlen;
u32 wscale;
u32 win;
pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
cxgbit_put_csk(csk);
return;
}
rpl5 = __skb_put_zero(skb, len);
INIT_TP_WR(rpl5, csk->tid);
OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
csk->tid));
cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
req->tcpopt.tstamp,
(csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
wscale = cxgb_compute_wscale(csk->rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
*/
win = csk->rcv_win >> 10;
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
opt0 = TCAM_BYPASS_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(csk->l2t->idx) |
TX_CHAN_V(csk->tx_chan) |
SMAC_SEL_V(csk->smac_idx) |
DSCP_V(csk->tos >> 2) |
ULP_MODE_V(ULP_MODE_ISCSI) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
if (!is_t5(lldi->adapter_type))
opt2 |= RX_FC_DISABLE_F;
if (req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN_F;
if (req->tcpopt.sack)
opt2 |= SACK_EN_F;
if (wscale)
opt2 |= WND_SCALE_EN_F;
hlen = ntohl(req->hdr_len);
if (is_t5(lldi->adapter_type))
tcph = (struct tcphdr *)((u8 *)(req + 1) +
ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
else
tcph = (struct tcphdr *)((u8 *)(req + 1) +
T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
opt2 |= T5_ISS_F;
rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
opt2 |= T5_OPT_2_VALID_F;
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
static void
cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cxgbit_sock *csk = NULL;
struct cxgbit_np *cnp;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
struct tid_info *t = cdev->lldi.tids;
unsigned int tid = GET_TID(req);
u16 peer_mss = ntohs(req->tcpopt.mss);
unsigned short hdrs;
struct dst_entry *dst;
__u8 local_ip[16], peer_ip[16];
__be16 local_port, peer_port;
int ret;
int iptype;
pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
__func__, cdev, stid, tid);
cnp = lookup_stid(t, stid);
if (!cnp) {
pr_err("%s connect request on invalid stid %d\n",
__func__, stid);
goto rel_skb;
}
if (cnp->com.state != CSK_STATE_LISTEN) {
pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
__func__);
goto reject;
}
csk = lookup_tid(t, tid);
if (csk) {
pr_err("%s csk not null tid %u\n",
__func__, tid);
goto rel_skb;
}
cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
"lport %d rport %d peer_mss %d\n"
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
*(__be32 *)local_ip,
*(__be32 *)peer_ip,
local_port, peer_port,
PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
"lport %d rport %d peer_mss %d\n"
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
local_ip, peer_ip,
local_port, peer_port,
PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
((struct sockaddr_in6 *)
&cnp->com.local_addr)->sin6_scope_id);
}
if (!dst) {
pr_err("%s - failed to find dst entry!\n",
__func__);
goto reject;
}
csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
if (!csk) {
dst_release(dst);
goto rel_skb;
}
ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
dst, cdev);
if (ret) {
pr_err("%s - failed to allocate l2t entry!\n",
__func__);
dst_release(dst);
kfree(csk);
goto reject;
}
kref_init(&csk->kref);
init_completion(&csk->com.wr_wait.completion);
INIT_LIST_HEAD(&csk->accept_node);
hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
if (peer_mss && csk->mtu > (peer_mss + hdrs))
csk->mtu = peer_mss + hdrs;
csk->com.state = CSK_STATE_CONNECTING;
csk->com.cdev = cdev;
csk->cnp = cnp;
csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
csk->dst = dst;
csk->tid = tid;
csk->wr_cred = cdev->lldi.wr_cred -
DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
csk->wr_max_cred = csk->wr_cred;
csk->wr_una_cred = 0;
if (iptype == 4) {
struct sockaddr_in *sin = (struct sockaddr_in *)
&csk->com.local_addr;
sin->sin_family = AF_INET;
sin->sin_port = local_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
sin = (struct sockaddr_in *)&csk->com.remote_addr;
sin->sin_family = AF_INET;
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
&csk->com.local_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
cxgb4_clip_get(cdev->lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = peer_port;
memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
}
skb_queue_head_init(&csk->rxq);
skb_queue_head_init(&csk->txq);
skb_queue_head_init(&csk->ppodq);
skb_queue_head_init(&csk->backlogq);
skb_queue_head_init(&csk->skbq);
cxgbit_sock_reset_wr_list(csk);
spin_lock_init(&csk->lock);
init_waitqueue_head(&csk->waitq);
csk->lock_owner = false;
if (cxgbit_alloc_csk_skb(csk)) {
dst_release(dst);
kfree(csk);
goto rel_skb;
}
cxgbit_get_cnp(cnp);
cxgbit_get_cdev(cdev);
spin_lock(&cdev->cskq.lock);
list_add_tail(&csk->list, &cdev->cskq.list);
spin_unlock(&cdev->cskq.lock);
cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
cxgbit_pass_accept_rpl(csk, req);
goto rel_skb;
reject:
cxgbit_release_tid(cdev, tid);
rel_skb:
__kfree_skb(skb);
}
static u32
cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
u32 *flowclenp)
{
u32 nparams, flowclen16, flowclen;
nparams = FLOWC_WR_NPARAMS_MIN;
if (csk->snd_wscale)
nparams++;
#ifdef CONFIG_CHELSIO_T4_DCB
nparams++;
#endif
flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
flowclen16 = DIV_ROUND_UP(flowclen, 16);
flowclen = flowclen16 * 16;
/*
* Return the number of 16-byte credits used by the flowc request.
* Pass back the nparams and actual flowc length if requested.
*/
if (nparamsp)
*nparamsp = nparams;
if (flowclenp)
*flowclenp = flowclen;
return flowclen16;
}
u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
{
struct cxgbit_device *cdev = csk->com.cdev;
struct fw_flowc_wr *flowc;
u32 nparams, flowclen16, flowclen;
struct sk_buff *skb;
u8 index;
#ifdef CONFIG_CHELSIO_T4_DCB
u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
#endif
flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
skb = __skb_dequeue(&csk->skbq);
flowc = __skb_put_zero(skb, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
FW_WR_FLOWID_V(csk->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
(csk->com.cdev->lldi.pf));
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(csk->emss);
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
else
flowc->mnemval[8].val = cpu_to_be32(16384);
index = 9;
if (csk->snd_wscale) {
flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
index++;
}
#ifdef CONFIG_CHELSIO_T4_DCB
flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
if (vlan == VLAN_NONE) {
pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
flowc->mnemval[index].val = cpu_to_be32(0);
} else
flowc->mnemval[index].val = cpu_to_be32(
(vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
#endif
pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
" rcv_seq = %u; snd_win = %u; emss = %u\n",
__func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
csk->rcv_nxt, csk->snd_win, csk->emss);
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
cxgbit_ofld_send(csk->com.cdev, skb);
return flowclen16;
}
static int
cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
spin_lock_bh(&csk->lock);
if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
spin_unlock_bh(&csk->lock);
pr_err("%s: csk 0x%p, tid %u, state %u\n",
__func__, csk, csk->tid, csk->com.state);
__kfree_skb(skb);
return -1;
}
cxgbit_get_csk(csk);
cxgbit_init_wr_wait(&csk->com.wr_wait);
cxgbit_ofld_send(csk->com.cdev, skb);
spin_unlock_bh(&csk->lock);
return 0;
}
int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
unsigned int len = roundup(sizeof(*req), 16);
int ret;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
/* set up ulp submode */
req = __skb_put_zero(skb, len);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
(dcrc ? ULP_CRC_DATA : 0)) << 4);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
if (cxgbit_send_tcb_skb(csk, skb))
return -1;
ret = cxgbit_wait_for_reply(csk->com.cdev,
&csk->com.wr_wait,
csk->tid, 5, __func__);
if (ret)
return -1;
return 0;
}
int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
unsigned int len = roundup(sizeof(*req), 16);
int ret;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
req = __skb_put_zero(skb, len);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
if (cxgbit_send_tcb_skb(csk, skb))
return -1;
ret = cxgbit_wait_for_reply(csk->com.cdev,
&csk->com.wr_wait,
csk->tid, 5, __func__);
if (ret)
return -1;
return 0;
}
static void
cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cpl_pass_open_rpl *rpl = cplhdr(skb);
struct tid_info *t = cdev->lldi.tids;
unsigned int stid = GET_TID(rpl);
struct cxgbit_np *cnp = lookup_stid(t, stid);
pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
__func__, cnp, stid, rpl->status);
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
rel_skb:
__kfree_skb(skb);
}
static void
cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
struct tid_info *t = cdev->lldi.tids;
unsigned int stid = GET_TID(rpl);
struct cxgbit_np *cnp = lookup_stid(t, stid);
pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
__func__, cnp, stid, rpl->status);
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
rel_skb:
__kfree_skb(skb);
}
static void
cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cpl_pass_establish *req = cplhdr(skb);
struct tid_info *t = cdev->lldi.tids;
unsigned int tid = GET_TID(req);
struct cxgbit_sock *csk;
struct cxgbit_np *cnp;
u16 tcp_opt = be16_to_cpu(req->tcp_opt);
u32 snd_isn = be32_to_cpu(req->snd_isn);
u32 rcv_isn = be32_to_cpu(req->rcv_isn);
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
goto rel_skb;
}
cnp = csk->cnp;
pr_debug("%s: csk %p; tid %u; cnp %p\n",
__func__, csk, tid, cnp);
csk->write_seq = snd_isn;
csk->snd_una = snd_isn;
csk->snd_nxt = snd_isn;
csk->rcv_nxt = rcv_isn;
csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
cxgbit_set_emss(csk, tcp_opt);
dst_confirm(csk->dst);
csk->com.state = CSK_STATE_ESTABLISHED;
spin_lock_bh(&cnp->np_accept_lock);
list_add_tail(&csk->accept_node, &cnp->np_accept_list);
spin_unlock_bh(&cnp->np_accept_lock);
complete(&cnp->accept_comp);
rel_skb:
__kfree_skb(skb);
}
static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
cxgbit_skcb_flags(skb) = 0;
spin_lock_bh(&csk->rxq.lock);
__skb_queue_tail(&csk->rxq, skb);
spin_unlock_bh(&csk->rxq.lock);
wake_up(&csk->waitq);
}
static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
{
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ESTABLISHED:
csk->com.state = CSK_STATE_CLOSING;
cxgbit_queue_rx_skb(csk, skb);
return;
case CSK_STATE_CLOSING:
/* simultaneous close */
csk->com.state = CSK_STATE_MORIBUND;
break;
case CSK_STATE_MORIBUND:
csk->com.state = CSK_STATE_DEAD;
cxgbit_put_csk(csk);
break;
case CSK_STATE_ABORTING:
break;
default:
pr_info("%s: cpl_peer_close in bad state %d\n",
__func__, csk->com.state);
}
__kfree_skb(skb);
}
static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
{
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_CLOSING:
csk->com.state = CSK_STATE_MORIBUND;
break;
case CSK_STATE_MORIBUND:
csk->com.state = CSK_STATE_DEAD;
cxgbit_put_csk(csk);
break;
case CSK_STATE_ABORTING:
case CSK_STATE_DEAD:
break;
default:
pr_info("%s: cpl_close_con_rpl in bad state %d\n",
__func__, csk->com.state);
}
__kfree_skb(skb);
}
static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cpl_abort_req_rss *hdr = cplhdr(skb);
unsigned int tid = GET_TID(hdr);
struct sk_buff *rpl_skb;
bool release = false;
bool wakeup_thread = false;
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, tid, csk->com.state);
if (cxgb_is_neg_adv(hdr->status)) {
pr_err("%s: got neg advise %d on tid %u\n",
__func__, hdr->status, tid);
goto rel_skb;
}
switch (csk->com.state) {
case CSK_STATE_CONNECTING:
case CSK_STATE_MORIBUND:
csk->com.state = CSK_STATE_DEAD;
release = true;
break;
case CSK_STATE_ESTABLISHED:
csk->com.state = CSK_STATE_DEAD;
wakeup_thread = true;
break;
case CSK_STATE_CLOSING:
csk->com.state = CSK_STATE_DEAD;
if (!csk->conn)
release = true;
break;
case CSK_STATE_ABORTING:
break;
default:
pr_info("%s: cpl_abort_req_rss in bad state %d\n",
__func__, csk->com.state);
csk->com.state = CSK_STATE_DEAD;
}
__skb_queue_purge(&csk->txq);
if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
cxgbit_send_tx_flowc_wr(csk);
rpl_skb = __skb_dequeue(&csk->skbq);
cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
cxgbit_ofld_send(csk->com.cdev, rpl_skb);
if (wakeup_thread) {
cxgbit_queue_rx_skb(csk, skb);
return;
}
if (release)
cxgbit_put_csk(csk);
rel_skb:
__kfree_skb(skb);
}
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ABORTING:
csk->com.state = CSK_STATE_DEAD;
if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
cxgbit_wake_up(&csk->com.wr_wait, __func__,
rpl->status);
cxgbit_put_csk(csk);
break;
default:
pr_info("%s: cpl_abort_rpl_rss in state %d\n",
__func__, csk->com.state);
}
__kfree_skb(skb);
}
static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
{
const struct sk_buff *skb = csk->wr_pending_head;
u32 credit = 0;
if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
pr_err("csk 0x%p, tid %u, credit %u > %u\n",
csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
return true;
}
while (skb) {
credit += (__force u32)skb->csum;
skb = cxgbit_skcb_tx_wr_next(skb);
}
if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
csk, csk->tid, csk->wr_cred,
credit, csk->wr_max_cred);
return true;
}
return false;
}
static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
u32 credits = rpl->credits;
u32 snd_una = ntohl(rpl->snd_una);
csk->wr_cred += credits;
if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
while (credits) {
struct sk_buff *p = cxgbit_sock_peek_wr(csk);
u32 csum;
if (unlikely(!p)) {
pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
csk, csk->tid, credits,
csk->wr_cred, csk->wr_una_cred);
break;
}
csum = (__force u32)p->csum;
if (unlikely(credits < csum)) {
pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
csk, csk->tid,
credits, csk->wr_cred, csk->wr_una_cred,
csum);
p->csum = (__force __wsum)(csum - credits);
break;
}
cxgbit_sock_dequeue_wr(csk);
credits -= csum;
kfree_skb(p);
}
if (unlikely(cxgbit_credit_err(csk))) {
cxgbit_queue_rx_skb(csk, skb);
return;
}
if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
if (unlikely(before(snd_una, csk->snd_una))) {
pr_warn("csk 0x%p,%u, snd_una %u/%u.",
csk, csk->tid, snd_una,
csk->snd_una);
goto rel_skb;
}
if (csk->snd_una != snd_una) {
csk->snd_una = snd_una;
dst_confirm(csk->dst);
}
}
if (skb_queue_len(&csk->txq))
cxgbit_push_tx_frames(csk);
rel_skb:
__kfree_skb(skb);
}
static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cxgbit_sock *csk;
struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
goto rel_skb;
} else {
cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
}
cxgbit_put_csk(csk);
rel_skb:
__kfree_skb(skb);
}
static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cxgbit_sock *csk;
struct cpl_rx_data *cpl = cplhdr(skb);
unsigned int tid = GET_TID(cpl);
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find conn. for tid %u.\n", tid);
goto rel_skb;
}
cxgbit_queue_rx_skb(csk, skb);
return;
rel_skb:
__kfree_skb(skb);
}
static void
__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
{
spin_lock(&csk->lock);
if (csk->lock_owner) {
__skb_queue_tail(&csk->backlogq, skb);
spin_unlock(&csk->lock);
return;
}
cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
spin_unlock(&csk->lock);
}
static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
{
cxgbit_get_csk(csk);
__cxgbit_process_rx_cpl(csk, skb);
cxgbit_put_csk(csk);
}
static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
{
struct cxgbit_sock *csk;
struct cpl_tx_data *cpl = cplhdr(skb);
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct tid_info *t = lldi->tids;
unsigned int tid = GET_TID(cpl);
u8 opcode = cxgbit_skcb_rx_opcode(skb);
bool ref = true;
switch (opcode) {
case CPL_FW4_ACK:
cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
ref = false;
break;
case CPL_PEER_CLOSE:
cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
break;
case CPL_CLOSE_CON_RPL:
cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
break;
case CPL_ABORT_REQ_RSS:
cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
break;
case CPL_ABORT_RPL_RSS:
cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
break;
default:
goto rel_skb;
}
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find conn. for tid %u.\n", tid);
goto rel_skb;
}
if (ref)
cxgbit_process_rx_cpl(csk, skb);
else
__cxgbit_process_rx_cpl(csk, skb);
return;
rel_skb:
__kfree_skb(skb);
}
cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
[CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
[CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
[CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
[CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
[CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
[CPL_RX_DATA] = cxgbit_rx_data,
[CPL_FW4_ACK] = cxgbit_rx_cpl,
[CPL_PEER_CLOSE] = cxgbit_rx_cpl,
[CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
[CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
[CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
};
|
linux-master
|
drivers/target/iscsi/cxgbit/cxgbit_cm.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SBP2 target driver (SCSI over IEEE1394 in target mode)
*
* Copyright (C) 2011 Chris Boot <[email protected]>
*/
#define KMSG_COMPONENT "sbp_target"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <asm/unaligned.h>
#include "sbp_target.h"
/* FireWire address region for management and command block address handlers */
static const struct fw_address_region sbp_register_region = {
.start = CSR_REGISTER_BASE + 0x10000,
.end = 0x1000000000000ULL,
};
static const u32 sbp_unit_directory_template[] = {
0x1200609e, /* unit_specifier_id: NCITS/T10 */
0x13010483, /* unit_sw_version: 1155D Rev 4 */
0x3800609e, /* command_set_specifier_id: NCITS/T10 */
0x390104d8, /* command_set: SPC-2 */
0x3b000000, /* command_set_revision: 0 */
0x3c000001, /* firmware_revision: 1 */
};
#define SESSION_MAINTENANCE_INTERVAL HZ
static atomic_t login_id = ATOMIC_INIT(0);
static void session_maintenance_work(struct work_struct *);
static int sbp_run_transaction(struct fw_card *, int, int, int, int,
unsigned long long, void *, size_t);
static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
{
int ret;
__be32 high, low;
ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
req->node_addr, req->generation, req->speed,
(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
&high, sizeof(high));
if (ret != RCODE_COMPLETE)
return ret;
ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
req->node_addr, req->generation, req->speed,
(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
&low, sizeof(low));
if (ret != RCODE_COMPLETE)
return ret;
*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
return RCODE_COMPLETE;
}
static struct sbp_session *sbp_session_find_by_guid(
struct sbp_tpg *tpg, u64 guid)
{
struct se_session *se_sess;
struct sbp_session *sess, *found = NULL;
spin_lock_bh(&tpg->se_tpg.session_lock);
list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
sess = se_sess->fabric_sess_ptr;
if (sess->guid == guid)
found = sess;
}
spin_unlock_bh(&tpg->se_tpg.session_lock);
return found;
}
static struct sbp_login_descriptor *sbp_login_find_by_lun(
struct sbp_session *session, u32 unpacked_lun)
{
struct sbp_login_descriptor *login, *found = NULL;
spin_lock_bh(&session->lock);
list_for_each_entry(login, &session->login_list, link) {
if (login->login_lun == unpacked_lun)
found = login;
}
spin_unlock_bh(&session->lock);
return found;
}
static int sbp_login_count_all_by_lun(
struct sbp_tpg *tpg,
u32 unpacked_lun,
int exclusive)
{
struct se_session *se_sess;
struct sbp_session *sess;
struct sbp_login_descriptor *login;
int count = 0;
spin_lock_bh(&tpg->se_tpg.session_lock);
list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
sess = se_sess->fabric_sess_ptr;
spin_lock_bh(&sess->lock);
list_for_each_entry(login, &sess->login_list, link) {
if (login->login_lun != unpacked_lun)
continue;
if (!exclusive || login->exclusive)
count++;
}
spin_unlock_bh(&sess->lock);
}
spin_unlock_bh(&tpg->se_tpg.session_lock);
return count;
}
static struct sbp_login_descriptor *sbp_login_find_by_id(
struct sbp_tpg *tpg, int login_id)
{
struct se_session *se_sess;
struct sbp_session *sess;
struct sbp_login_descriptor *login, *found = NULL;
spin_lock_bh(&tpg->se_tpg.session_lock);
list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
sess = se_sess->fabric_sess_ptr;
spin_lock_bh(&sess->lock);
list_for_each_entry(login, &sess->login_list, link) {
if (login->login_id == login_id)
found = login;
}
spin_unlock_bh(&sess->lock);
}
spin_unlock_bh(&tpg->se_tpg.session_lock);
return found;
}
static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
{
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_lun *se_lun;
rcu_read_lock();
hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
if (se_lun->unpacked_lun == login_lun) {
rcu_read_unlock();
*err = 0;
return login_lun;
}
}
rcu_read_unlock();
*err = -ENODEV;
return login_lun;
}
static struct sbp_session *sbp_session_create(
struct sbp_tpg *tpg,
u64 guid)
{
struct sbp_session *sess;
int ret;
char guid_str[17];
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
sess = kmalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
return ERR_PTR(-ENOMEM);
spin_lock_init(&sess->lock);
INIT_LIST_HEAD(&sess->login_list);
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
sizeof(struct sbp_target_request),
TARGET_PROT_NORMAL, guid_str,
sess, NULL);
if (IS_ERR(sess->se_sess)) {
pr_err("failed to init se_session\n");
ret = PTR_ERR(sess->se_sess);
kfree(sess);
return ERR_PTR(ret);
}
return sess;
}
static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
{
spin_lock_bh(&sess->lock);
if (!list_empty(&sess->login_list)) {
spin_unlock_bh(&sess->lock);
return;
}
spin_unlock_bh(&sess->lock);
if (cancel_work)
cancel_delayed_work_sync(&sess->maint_work);
target_remove_session(sess->se_sess);
if (sess->card)
fw_card_put(sess->card);
kfree(sess);
}
static void sbp_target_agent_unregister(struct sbp_target_agent *);
static void sbp_login_release(struct sbp_login_descriptor *login,
bool cancel_work)
{
struct sbp_session *sess = login->sess;
/* FIXME: abort/wait on tasks */
sbp_target_agent_unregister(login->tgt_agt);
if (sess) {
spin_lock_bh(&sess->lock);
list_del(&login->link);
spin_unlock_bh(&sess->lock);
sbp_session_release(sess, cancel_work);
}
kfree(login);
}
static struct sbp_target_agent *sbp_target_agent_register(
struct sbp_login_descriptor *);
static void sbp_management_request_login(
struct sbp_management_agent *agent, struct sbp_management_request *req,
int *status_data_size)
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
struct sbp_session *sess;
struct sbp_login_descriptor *login;
struct sbp_login_response_block *response;
u64 guid;
u32 unpacked_lun;
int login_response_len, ret;
unpacked_lun = sbp_get_lun_from_tpg(tpg,
LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
if (ret) {
pr_notice("login to unknown LUN: %d\n",
LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
return;
}
ret = read_peer_guid(&guid, req);
if (ret != RCODE_COMPLETE) {
pr_warn("failed to read peer GUID: %d\n", ret);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
return;
}
pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
unpacked_lun, guid);
sess = sbp_session_find_by_guid(tpg, guid);
if (sess) {
login = sbp_login_find_by_lun(sess, unpacked_lun);
if (login) {
pr_notice("initiator already logged-in\n");
/*
* SBP-2 R4 says we should return access denied, but
* that can confuse initiators. Instead we need to
* treat this like a reconnect, but send the login
* response block like a fresh login.
*
* This is required particularly in the case of Apple
* devices booting off the FireWire target, where
* the firmware has an active login to the target. When
* the OS takes control of the session it issues its own
* LOGIN rather than a RECONNECT. To avoid the machine
* waiting until the reconnect_hold expires, we can skip
* the ACCESS_DENIED errors to speed things up.
*/
goto already_logged_in;
}
}
/*
* check exclusive bit in login request
* reject with access_denied if any logins present
*/
if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
pr_warn("refusing exclusive login with other active logins\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
/*
* check exclusive bit in any existing login descriptor
* reject with access_denied if any exclusive logins present
*/
if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
pr_warn("refusing login while another exclusive login present\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
/*
* check we haven't exceeded the number of allowed logins
* reject with resources_unavailable if we have
*/
if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
tport->max_logins_per_lun) {
pr_warn("max number of logins reached\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
if (!sess) {
sess = sbp_session_create(tpg, guid);
if (IS_ERR(sess)) {
switch (PTR_ERR(sess)) {
case -EPERM:
ret = SBP_STATUS_ACCESS_DENIED;
break;
default:
ret = SBP_STATUS_RESOURCES_UNAVAIL;
break;
}
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(
STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(ret));
return;
}
sess->node_id = req->node_addr;
sess->card = fw_card_get(req->card);
sess->generation = req->generation;
sess->speed = req->speed;
schedule_delayed_work(&sess->maint_work,
SESSION_MAINTENANCE_INTERVAL);
}
/* only take the latest reconnect_hold into account */
sess->reconnect_hold = min(
1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
tport->max_reconnect_timeout) - 1;
login = kmalloc(sizeof(*login), GFP_KERNEL);
if (!login) {
pr_err("failed to allocate login descriptor\n");
sbp_session_release(sess, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
login->sess = sess;
login->login_lun = unpacked_lun;
login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
login->login_id = atomic_inc_return(&login_id);
login->tgt_agt = sbp_target_agent_register(login);
if (IS_ERR(login->tgt_agt)) {
ret = PTR_ERR(login->tgt_agt);
pr_err("failed to map command block handler: %d\n", ret);
sbp_session_release(sess, true);
kfree(login);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
spin_lock_bh(&sess->lock);
list_add_tail(&login->link, &sess->login_list);
spin_unlock_bh(&sess->lock);
already_logged_in:
response = kzalloc(sizeof(*response), GFP_KERNEL);
if (!response) {
pr_err("failed to allocate login response block\n");
sbp_login_release(login, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
login_response_len = clamp_val(
LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
12, sizeof(*response));
response->misc = cpu_to_be32(
((login_response_len & 0xffff) << 16) |
(login->login_id & 0xffff));
response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
&response->command_block_agent);
ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
sess->node_id, sess->generation, sess->speed,
sbp2_pointer_to_addr(&req->orb.ptr2), response,
login_response_len);
if (ret != RCODE_COMPLETE) {
pr_debug("failed to write login response block: %x\n", ret);
kfree(response);
sbp_login_release(login, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
return;
}
kfree(response);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
}
static void sbp_management_request_query_logins(
struct sbp_management_agent *agent, struct sbp_management_request *req,
int *status_data_size)
{
pr_notice("QUERY LOGINS not implemented\n");
/* FIXME: implement */
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
}
static void sbp_management_request_reconnect(
struct sbp_management_agent *agent, struct sbp_management_request *req,
int *status_data_size)
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
int ret;
u64 guid;
struct sbp_login_descriptor *login;
ret = read_peer_guid(&guid, req);
if (ret != RCODE_COMPLETE) {
pr_warn("failed to read peer GUID: %d\n", ret);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
return;
}
pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
login = sbp_login_find_by_id(tpg,
RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
if (!login) {
pr_err("mgt_agent RECONNECT unknown login ID\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
if (login->sess->guid != guid) {
pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
spin_lock_bh(&login->sess->lock);
if (login->sess->card)
fw_card_put(login->sess->card);
/* update the node details */
login->sess->generation = req->generation;
login->sess->node_id = req->node_addr;
login->sess->card = fw_card_get(req->card);
login->sess->speed = req->speed;
spin_unlock_bh(&login->sess->lock);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
}
static void sbp_management_request_logout(
struct sbp_management_agent *agent, struct sbp_management_request *req,
int *status_data_size)
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
int id;
struct sbp_login_descriptor *login;
id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
login = sbp_login_find_by_id(tpg, id);
if (!login) {
pr_warn("cannot find login: %d\n", id);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
return;
}
pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
login->login_lun, login->login_id);
if (req->node_addr != login->sess->node_id) {
pr_warn("logout from different node ID\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
sbp_login_release(login, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
}
static void session_check_for_reset(struct sbp_session *sess)
{
bool card_valid = false;
spin_lock_bh(&sess->lock);
if (sess->card) {
spin_lock_irq(&sess->card->lock);
card_valid = (sess->card->local_node != NULL);
spin_unlock_irq(&sess->card->lock);
if (!card_valid) {
fw_card_put(sess->card);
sess->card = NULL;
}
}
if (!card_valid || (sess->generation != sess->card->generation)) {
pr_info("Waiting for reconnect from node: %016llx\n",
sess->guid);
sess->node_id = -1;
sess->reconnect_expires = get_jiffies_64() +
((sess->reconnect_hold + 1) * HZ);
}
spin_unlock_bh(&sess->lock);
}
static void session_reconnect_expired(struct sbp_session *sess)
{
struct sbp_login_descriptor *login, *temp;
LIST_HEAD(login_list);
pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
spin_lock_bh(&sess->lock);
list_for_each_entry_safe(login, temp, &sess->login_list, link) {
login->sess = NULL;
list_move_tail(&login->link, &login_list);
}
spin_unlock_bh(&sess->lock);
list_for_each_entry_safe(login, temp, &login_list, link) {
list_del(&login->link);
sbp_login_release(login, false);
}
sbp_session_release(sess, false);
}
static void session_maintenance_work(struct work_struct *work)
{
struct sbp_session *sess = container_of(work, struct sbp_session,
maint_work.work);
/* could be called while tearing down the session */
spin_lock_bh(&sess->lock);
if (list_empty(&sess->login_list)) {
spin_unlock_bh(&sess->lock);
return;
}
spin_unlock_bh(&sess->lock);
if (sess->node_id != -1) {
/* check for bus reset and make node_id invalid */
session_check_for_reset(sess);
schedule_delayed_work(&sess->maint_work,
SESSION_MAINTENANCE_INTERVAL);
} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
/* still waiting for reconnect */
schedule_delayed_work(&sess->maint_work,
SESSION_MAINTENANCE_INTERVAL);
} else {
/* reconnect timeout has expired */
session_reconnect_expired(sess);
}
}
static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
int state;
switch (tcode) {
case TCODE_READ_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_STATE READ\n");
spin_lock_bh(&agent->lock);
state = agent->state;
spin_unlock_bh(&agent->lock);
*(__be32 *)data = cpu_to_be32(state);
return RCODE_COMPLETE;
case TCODE_WRITE_QUADLET_REQUEST:
/* ignored */
return RCODE_COMPLETE;
default:
return RCODE_TYPE_ERROR;
}
}
static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_RESET\n");
spin_lock_bh(&agent->lock);
agent->state = AGENT_STATE_RESET;
spin_unlock_bh(&agent->lock);
return RCODE_COMPLETE;
default:
return RCODE_TYPE_ERROR;
}
}
static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
struct sbp2_pointer *ptr = data;
switch (tcode) {
case TCODE_WRITE_BLOCK_REQUEST:
spin_lock_bh(&agent->lock);
if (agent->state != AGENT_STATE_SUSPENDED &&
agent->state != AGENT_STATE_RESET) {
spin_unlock_bh(&agent->lock);
pr_notice("Ignoring ORB_POINTER write while active.\n");
return RCODE_CONFLICT_ERROR;
}
agent->state = AGENT_STATE_ACTIVE;
spin_unlock_bh(&agent->lock);
agent->orb_pointer = sbp2_pointer_to_addr(ptr);
agent->doorbell = false;
pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
agent->orb_pointer);
queue_work(system_unbound_wq, &agent->work);
return RCODE_COMPLETE;
case TCODE_READ_BLOCK_REQUEST:
pr_debug("tgt_agent ORB_POINTER READ\n");
spin_lock_bh(&agent->lock);
addr_to_sbp2_pointer(agent->orb_pointer, ptr);
spin_unlock_bh(&agent->lock);
return RCODE_COMPLETE;
default:
return RCODE_TYPE_ERROR;
}
}
static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
spin_lock_bh(&agent->lock);
if (agent->state != AGENT_STATE_SUSPENDED) {
spin_unlock_bh(&agent->lock);
pr_debug("Ignoring DOORBELL while active.\n");
return RCODE_CONFLICT_ERROR;
}
agent->state = AGENT_STATE_ACTIVE;
spin_unlock_bh(&agent->lock);
agent->doorbell = true;
pr_debug("tgt_agent DOORBELL\n");
queue_work(system_unbound_wq, &agent->work);
return RCODE_COMPLETE;
case TCODE_READ_QUADLET_REQUEST:
return RCODE_COMPLETE;
default:
return RCODE_TYPE_ERROR;
}
}
static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
int tcode, void *data, struct sbp_target_agent *agent)
{
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
/* ignored as we don't send unsolicited status */
return RCODE_COMPLETE;
case TCODE_READ_QUADLET_REQUEST:
return RCODE_COMPLETE;
default:
return RCODE_TYPE_ERROR;
}
}
static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *data, size_t length,
void *callback_data)
{
struct sbp_target_agent *agent = callback_data;
struct sbp_session *sess = agent->login->sess;
int sess_gen, sess_node, rcode;
spin_lock_bh(&sess->lock);
sess_gen = sess->generation;
sess_node = sess->node_id;
spin_unlock_bh(&sess->lock);
if (generation != sess_gen) {
pr_notice("ignoring request with wrong generation\n");
rcode = RCODE_TYPE_ERROR;
goto out;
}
if (source != sess_node) {
pr_notice("ignoring request from foreign node (%x != %x)\n",
source, sess_node);
rcode = RCODE_TYPE_ERROR;
goto out;
}
/* turn offset into the offset from the start of the block */
offset -= agent->handler.offset;
if (offset == 0x00 && length == 4) {
/* AGENT_STATE */
rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
} else if (offset == 0x04 && length == 4) {
/* AGENT_RESET */
rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
} else if (offset == 0x08 && length == 8) {
/* ORB_POINTER */
rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
} else if (offset == 0x10 && length == 4) {
/* DOORBELL */
rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
} else if (offset == 0x14 && length == 4) {
/* UNSOLICITED_STATUS_ENABLE */
rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
data, agent);
} else {
rcode = RCODE_ADDRESS_ERROR;
}
out:
fw_send_response(card, request, rcode);
}
static void sbp_handle_command(struct sbp_target_request *);
static int sbp_send_status(struct sbp_target_request *);
static void sbp_free_request(struct sbp_target_request *);
static void tgt_agent_process_work(struct work_struct *work)
{
struct sbp_target_request *req =
container_of(work, struct sbp_target_request, work);
pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
req->orb_pointer,
sbp2_pointer_to_addr(&req->orb.next_orb),
sbp2_pointer_to_addr(&req->orb.data_descriptor),
be32_to_cpu(req->orb.misc));
if (req->orb_pointer >> 32)
pr_debug("ORB with high bits set\n");
switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
case 0:/* Format specified by this standard */
sbp_handle_command(req);
return;
case 1: /* Reserved for future standardization */
case 2: /* Vendor-dependent */
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(
STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_REQ_TYPE_NOTSUPP));
sbp_send_status(req);
return;
case 3: /* Dummy ORB */
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(
STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_DUMMY_ORB_COMPLETE));
sbp_send_status(req);
return;
default:
BUG();
}
}
/* used to double-check we haven't been issued an AGENT_RESET */
static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
{
bool active;
spin_lock_bh(&agent->lock);
active = (agent->state == AGENT_STATE_ACTIVE);
spin_unlock_bh(&agent->lock);
return active;
}
static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
struct fw_card *card, u64 next_orb)
{
struct se_session *se_sess = sess->se_sess;
struct sbp_target_request *req;
int tag, cpu;
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
req->se_cmd.map_cpu = cpu;
req->se_cmd.tag = next_orb;
return req;
}
static void tgt_agent_fetch_work(struct work_struct *work)
{
struct sbp_target_agent *agent =
container_of(work, struct sbp_target_agent, work);
struct sbp_session *sess = agent->login->sess;
struct sbp_target_request *req;
int ret;
bool doorbell = agent->doorbell;
u64 next_orb = agent->orb_pointer;
while (next_orb && tgt_agent_check_active(agent)) {
req = sbp_mgt_get_req(sess, sess->card, next_orb);
if (IS_ERR(req)) {
spin_lock_bh(&agent->lock);
agent->state = AGENT_STATE_DEAD;
spin_unlock_bh(&agent->lock);
return;
}
req->login = agent->login;
req->orb_pointer = next_orb;
req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
req->orb_pointer >> 32));
req->status.orb_low = cpu_to_be32(
req->orb_pointer & 0xfffffffc);
/* read in the ORB */
ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
sess->node_id, sess->generation, sess->speed,
req->orb_pointer, &req->orb, sizeof(req->orb));
if (ret != RCODE_COMPLETE) {
pr_debug("tgt_orb fetch failed: %x\n", ret);
req->status.status |= cpu_to_be32(
STATUS_BLOCK_SRC(
STATUS_SRC_ORB_FINISHED) |
STATUS_BLOCK_RESP(
STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(1) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_UNSPECIFIED_ERROR));
spin_lock_bh(&agent->lock);
agent->state = AGENT_STATE_DEAD;
spin_unlock_bh(&agent->lock);
sbp_send_status(req);
return;
}
/* check the next_ORB field */
if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
next_orb = 0;
req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
STATUS_SRC_ORB_FINISHED));
} else {
next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
STATUS_SRC_ORB_CONTINUING));
}
if (tgt_agent_check_active(agent) && !doorbell) {
INIT_WORK(&req->work, tgt_agent_process_work);
queue_work(system_unbound_wq, &req->work);
} else {
/* don't process this request, just check next_ORB */
sbp_free_request(req);
}
spin_lock_bh(&agent->lock);
doorbell = agent->doorbell = false;
/* check if we should carry on processing */
if (next_orb)
agent->orb_pointer = next_orb;
else
agent->state = AGENT_STATE_SUSPENDED;
spin_unlock_bh(&agent->lock);
}
}
static struct sbp_target_agent *sbp_target_agent_register(
struct sbp_login_descriptor *login)
{
struct sbp_target_agent *agent;
int ret;
agent = kmalloc(sizeof(*agent), GFP_KERNEL);
if (!agent)
return ERR_PTR(-ENOMEM);
spin_lock_init(&agent->lock);
agent->handler.length = 0x20;
agent->handler.address_callback = tgt_agent_rw;
agent->handler.callback_data = agent;
agent->login = login;
agent->state = AGENT_STATE_RESET;
INIT_WORK(&agent->work, tgt_agent_fetch_work);
agent->orb_pointer = 0;
agent->doorbell = false;
ret = fw_core_add_address_handler(&agent->handler,
&sbp_register_region);
if (ret < 0) {
kfree(agent);
return ERR_PTR(ret);
}
return agent;
}
static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
{
fw_core_remove_address_handler(&agent->handler);
cancel_work_sync(&agent->work);
kfree(agent);
}
/*
* Simple wrapper around fw_run_transaction that retries the transaction several
* times in case of failure, with an exponential backoff.
*/
static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *payload, size_t length)
{
int attempt, ret, delay;
for (attempt = 1; attempt <= 5; attempt++) {
ret = fw_run_transaction(card, tcode, destination_id,
generation, speed, offset, payload, length);
switch (ret) {
case RCODE_COMPLETE:
case RCODE_TYPE_ERROR:
case RCODE_ADDRESS_ERROR:
case RCODE_GENERATION:
return ret;
default:
delay = 5 * attempt * attempt;
usleep_range(delay, delay * 2);
}
}
return ret;
}
/*
* Wrapper around sbp_run_transaction that gets the card, destination,
* generation and speed out of the request's session.
*/
static int sbp_run_request_transaction(struct sbp_target_request *req,
int tcode, unsigned long long offset, void *payload,
size_t length)
{
struct sbp_login_descriptor *login = req->login;
struct sbp_session *sess = login->sess;
struct fw_card *card;
int node_id, generation, speed, ret;
spin_lock_bh(&sess->lock);
card = fw_card_get(sess->card);
node_id = sess->node_id;
generation = sess->generation;
speed = sess->speed;
spin_unlock_bh(&sess->lock);
ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
offset, payload, length);
fw_card_put(card);
return ret;
}
static int sbp_fetch_command(struct sbp_target_request *req)
{
int ret, cmd_len, copy_len;
cmd_len = scsi_command_size(req->orb.command_block);
req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
if (!req->cmd_buf)
return -ENOMEM;
memcpy(req->cmd_buf, req->orb.command_block,
min_t(int, cmd_len, sizeof(req->orb.command_block)));
if (cmd_len > sizeof(req->orb.command_block)) {
pr_debug("sbp_fetch_command: filling in long command\n");
copy_len = cmd_len - sizeof(req->orb.command_block);
ret = sbp_run_request_transaction(req,
TCODE_READ_BLOCK_REQUEST,
req->orb_pointer + sizeof(req->orb),
req->cmd_buf + sizeof(req->orb.command_block),
copy_len);
if (ret != RCODE_COMPLETE)
return -EIO;
}
return 0;
}
static int sbp_fetch_page_table(struct sbp_target_request *req)
{
int pg_tbl_sz, ret;
struct sbp_page_table_entry *pg_tbl;
if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
return 0;
pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
sizeof(struct sbp_page_table_entry);
pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
if (!pg_tbl)
return -ENOMEM;
ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
sbp2_pointer_to_addr(&req->orb.data_descriptor),
pg_tbl, pg_tbl_sz);
if (ret != RCODE_COMPLETE) {
kfree(pg_tbl);
return -EIO;
}
req->pg_tbl = pg_tbl;
return 0;
}
static void sbp_calc_data_length_direction(struct sbp_target_request *req,
u32 *data_len, enum dma_data_direction *data_dir)
{
int data_size, direction, idx;
data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
if (!data_size) {
*data_len = 0;
*data_dir = DMA_NONE;
return;
}
*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (req->pg_tbl) {
*data_len = 0;
for (idx = 0; idx < data_size; idx++) {
*data_len += be16_to_cpu(
req->pg_tbl[idx].segment_length);
}
} else {
*data_len = data_size;
}
}
static void sbp_handle_command(struct sbp_target_request *req)
{
struct sbp_login_descriptor *login = req->login;
struct sbp_session *sess = login->sess;
int ret, unpacked_lun;
u32 data_length;
enum dma_data_direction data_dir;
ret = sbp_fetch_command(req);
if (ret) {
pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
goto err;
}
ret = sbp_fetch_page_table(req);
if (ret) {
pr_debug("sbp_handle_command: fetch page table failed: %d\n",
ret);
goto err;
}
unpacked_lun = req->login->login_lun;
sbp_calc_data_length_direction(req, &data_length, &data_dir);
pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
req->orb_pointer, unpacked_lun, data_length, data_dir);
/* only used for printk until we do TMRs */
req->se_cmd.tag = req->orb_pointer;
target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req->sense_buf, unpacked_lun, data_length,
TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
return;
err:
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
}
/*
* DMA_TO_DEVICE = read from initiator (SCSI WRITE)
* DMA_FROM_DEVICE = write to initiator (SCSI READ)
*/
static int sbp_rw_data(struct sbp_target_request *req)
{
struct sbp_session *sess = req->login->sess;
int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
generation, num_pte, length, tfr_length,
rcode = RCODE_COMPLETE;
struct sbp_page_table_entry *pte;
unsigned long long offset;
struct fw_card *card;
struct sg_mapping_iter iter;
if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
tcode = TCODE_WRITE_BLOCK_REQUEST;
sg_miter_flags = SG_MITER_FROM_SG;
} else {
tcode = TCODE_READ_BLOCK_REQUEST;
sg_miter_flags = SG_MITER_TO_SG;
}
max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
if (pg_size) {
pr_err("sbp_run_transaction: page size ignored\n");
}
spin_lock_bh(&sess->lock);
card = fw_card_get(sess->card);
node_id = sess->node_id;
generation = sess->generation;
spin_unlock_bh(&sess->lock);
if (req->pg_tbl) {
pte = req->pg_tbl;
num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
offset = 0;
length = 0;
} else {
pte = NULL;
num_pte = 0;
offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
length = req->se_cmd.data_length;
}
sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
sg_miter_flags);
while (length || num_pte) {
if (!length) {
offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
be32_to_cpu(pte->segment_base_lo);
length = be16_to_cpu(pte->segment_length);
pte++;
num_pte--;
}
sg_miter_next(&iter);
tfr_length = min3(length, max_payload, (int)iter.length);
/* FIXME: take page_size into account */
rcode = sbp_run_transaction(card, tcode, node_id,
generation, speed,
offset, iter.addr, tfr_length);
if (rcode != RCODE_COMPLETE)
break;
length -= tfr_length;
offset += tfr_length;
iter.consumed = tfr_length;
}
sg_miter_stop(&iter);
fw_card_put(card);
if (rcode == RCODE_COMPLETE) {
WARN_ON(length != 0);
return 0;
} else {
return -EIO;
}
}
static int sbp_send_status(struct sbp_target_request *req)
{
int rc, ret = 0, length;
struct sbp_login_descriptor *login = req->login;
length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
login->status_fifo_addr, &req->status, length);
if (rc != RCODE_COMPLETE) {
pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
ret = -EIO;
goto put_ref;
}
pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
req->orb_pointer);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
* final se_cmd->cmd_kref put.
*/
put_ref:
target_put_sess_cmd(&req->se_cmd);
return ret;
}
static void sbp_sense_mangle(struct sbp_target_request *req)
{
struct se_cmd *se_cmd = &req->se_cmd;
u8 *sense = req->sense_buf;
u8 *status = req->status.data;
WARN_ON(se_cmd->scsi_sense_length < 18);
switch (sense[0] & 0x7f) { /* sfmt */
case 0x70: /* current, fixed */
status[0] = 0 << 6;
break;
case 0x71: /* deferred, fixed */
status[0] = 1 << 6;
break;
case 0x72: /* current, descriptor */
case 0x73: /* deferred, descriptor */
default:
/*
* TODO: SBP-3 specifies what we should do with descriptor
* format sense data
*/
pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
sense[0]);
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
return;
}
status[0] |= se_cmd->scsi_status & 0x3f;/* status */
status[1] =
(sense[0] & 0x80) | /* valid */
((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
(sense[2] & 0x0f); /* sense_key */
status[2] = 0; /* XXX sense_code */
status[3] = 0; /* XXX sense_qualifier */
/* information */
status[4] = sense[3];
status[5] = sense[4];
status[6] = sense[5];
status[7] = sense[6];
/* CDB-dependent */
status[8] = sense[8];
status[9] = sense[9];
status[10] = sense[10];
status[11] = sense[11];
/* fru */
status[12] = sense[14];
/* sense_key-dependent */
status[13] = sense[15];
status[14] = sense[16];
status[15] = sense[17];
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(5) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
}
static int sbp_send_sense(struct sbp_target_request *req)
{
struct se_cmd *se_cmd = &req->se_cmd;
if (se_cmd->scsi_sense_length) {
sbp_sense_mangle(req);
} else {
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
}
return sbp_send_status(req);
}
static void sbp_free_request(struct sbp_target_request *req)
{
struct se_cmd *se_cmd = &req->se_cmd;
struct se_session *se_sess = se_cmd->se_sess;
kfree(req->pg_tbl);
kfree(req->cmd_buf);
target_free_tag(se_sess, se_cmd);
}
static void sbp_mgt_agent_process(struct work_struct *work)
{
struct sbp_management_agent *agent =
container_of(work, struct sbp_management_agent, work);
struct sbp_management_request *req = agent->request;
int ret;
int status_data_len = 0;
/* fetch the ORB from the initiator */
ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
req->node_addr, req->generation, req->speed,
agent->orb_offset, &req->orb, sizeof(req->orb));
if (ret != RCODE_COMPLETE) {
pr_debug("mgt_orb fetch failed: %x\n", ret);
goto out;
}
pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
sbp2_pointer_to_addr(&req->orb.ptr1),
sbp2_pointer_to_addr(&req->orb.ptr2),
be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
sbp2_pointer_to_addr(&req->orb.status_fifo));
if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
pr_err("mgt_orb bad request\n");
goto out;
}
switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
case MANAGEMENT_ORB_FUNCTION_LOGIN:
sbp_management_request_login(agent, req, &status_data_len);
break;
case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
sbp_management_request_query_logins(agent, req,
&status_data_len);
break;
case MANAGEMENT_ORB_FUNCTION_RECONNECT:
sbp_management_request_reconnect(agent, req, &status_data_len);
break;
case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
pr_notice("SET PASSWORD not implemented\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
break;
case MANAGEMENT_ORB_FUNCTION_LOGOUT:
sbp_management_request_logout(agent, req, &status_data_len);
break;
case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
pr_notice("ABORT TASK not implemented\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
break;
case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
pr_notice("ABORT TASK SET not implemented\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
break;
case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
pr_notice("LOGICAL UNIT RESET not implemented\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
break;
case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
pr_notice("TARGET RESET not implemented\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
break;
default:
pr_notice("unknown management function 0x%x\n",
MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
break;
}
req->status.status |= cpu_to_be32(
STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
req->status.orb_low = cpu_to_be32(agent->orb_offset);
/* write the status block back to the initiator */
ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
req->node_addr, req->generation, req->speed,
sbp2_pointer_to_addr(&req->orb.status_fifo),
&req->status, 8 + status_data_len);
if (ret != RCODE_COMPLETE) {
pr_debug("mgt_orb status write failed: %x\n", ret);
goto out;
}
out:
fw_card_put(req->card);
kfree(req);
spin_lock_bh(&agent->lock);
agent->state = MANAGEMENT_AGENT_STATE_IDLE;
spin_unlock_bh(&agent->lock);
}
static void sbp_mgt_agent_rw(struct fw_card *card,
struct fw_request *request, int tcode, int destination, int source,
int generation, unsigned long long offset, void *data, size_t length,
void *callback_data)
{
struct sbp_management_agent *agent = callback_data;
struct sbp2_pointer *ptr = data;
int rcode = RCODE_ADDRESS_ERROR;
if (!agent->tport->enable)
goto out;
if ((offset != agent->handler.offset) || (length != 8))
goto out;
if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
struct sbp_management_request *req;
int prev_state;
spin_lock_bh(&agent->lock);
prev_state = agent->state;
agent->state = MANAGEMENT_AGENT_STATE_BUSY;
spin_unlock_bh(&agent->lock);
if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
pr_notice("ignoring management request while busy\n");
rcode = RCODE_CONFLICT_ERROR;
goto out;
}
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
rcode = RCODE_CONFLICT_ERROR;
goto out;
}
req->card = fw_card_get(card);
req->generation = generation;
req->node_addr = source;
req->speed = fw_get_request_speed(request);
agent->orb_offset = sbp2_pointer_to_addr(ptr);
agent->request = req;
queue_work(system_unbound_wq, &agent->work);
rcode = RCODE_COMPLETE;
} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
addr_to_sbp2_pointer(agent->orb_offset, ptr);
rcode = RCODE_COMPLETE;
} else {
rcode = RCODE_TYPE_ERROR;
}
out:
fw_send_response(card, request, rcode);
}
static struct sbp_management_agent *sbp_management_agent_register(
struct sbp_tport *tport)
{
int ret;
struct sbp_management_agent *agent;
agent = kmalloc(sizeof(*agent), GFP_KERNEL);
if (!agent)
return ERR_PTR(-ENOMEM);
spin_lock_init(&agent->lock);
agent->tport = tport;
agent->handler.length = 0x08;
agent->handler.address_callback = sbp_mgt_agent_rw;
agent->handler.callback_data = agent;
agent->state = MANAGEMENT_AGENT_STATE_IDLE;
INIT_WORK(&agent->work, sbp_mgt_agent_process);
agent->orb_offset = 0;
agent->request = NULL;
ret = fw_core_add_address_handler(&agent->handler,
&sbp_register_region);
if (ret < 0) {
kfree(agent);
return ERR_PTR(ret);
}
return agent;
}
static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
{
fw_core_remove_address_handler(&agent->handler);
cancel_work_sync(&agent->work);
kfree(agent);
}
static int sbp_check_true(struct se_portal_group *se_tpg)
{
return 1;
}
static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return &tport->tport_name[0];
}
static u16 sbp_get_tag(struct se_portal_group *se_tpg)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
return tpg->tport_tpgt;
}
static void sbp_release_cmd(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd);
sbp_free_request(req);
}
static int sbp_write_pending(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd);
int ret;
ret = sbp_rw_data(req);
if (ret) {
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(
STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
return ret;
}
target_execute_cmd(se_cmd);
return 0;
}
static int sbp_queue_data_in(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd);
int ret;
ret = sbp_rw_data(req);
if (ret) {
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
return ret;
}
return sbp_send_sense(req);
}
/*
* Called after command (no data transfer) or after the write (to device)
* operation is completed
*/
static int sbp_queue_status(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd);
return sbp_send_sense(req);
}
static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
{
}
static void sbp_aborted_task(struct se_cmd *se_cmd)
{
return;
}
static int sbp_check_stop_free(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd);
return transport_generic_free_cmd(&req->se_cmd, 0);
}
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
{
struct se_lun *lun;
int count = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
count++;
rcu_read_unlock();
return count;
}
static int sbp_update_unit_directory(struct sbp_tport *tport)
{
struct se_lun *lun;
int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
u32 *data;
if (tport->unit_directory.data) {
fw_core_remove_descriptor(&tport->unit_directory);
kfree(tport->unit_directory.data);
tport->unit_directory.data = NULL;
}
if (!tport->enable || !tport->tpg)
return 0;
num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
/*
* Number of entries in the final unit directory:
* - all of those in the template
* - management_agent
* - unit_characteristics
* - reconnect_timeout
* - unit unique ID
* - one for each LUN
*
* MUST NOT include leaf or sub-directory entries
*/
num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
if (tport->directory_id != -1)
num_entries++;
/* allocate num_entries + 4 for the header and unique ID leaf */
data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
if (!data)
return -ENOMEM;
/* directory_length */
data[idx++] = num_entries << 16;
/* directory_id */
if (tport->directory_id != -1)
data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
/* unit directory template */
memcpy(&data[idx], sbp_unit_directory_template,
sizeof(sbp_unit_directory_template));
idx += ARRAY_SIZE(sbp_unit_directory_template);
/* management_agent */
mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
/* unit_characteristics */
data[idx++] = 0x3a000000 |
(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
SBP_ORB_FETCH_SIZE;
/* reconnect_timeout */
data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
/* unit unique ID (leaf is just after LUNs) */
data[idx++] = 0x8d000000 | (num_luns + 1);
rcu_read_lock();
hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
struct se_device *dev;
int type;
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
dev = rcu_dereference_raw(lun->lun_se_dev);
type = dev->transport->get_device_type(dev);
/* logical_unit_number */
data[idx++] = 0x14000000 |
((type << 16) & 0x1f0000) |
(lun->unpacked_lun & 0xffff);
}
rcu_read_unlock();
/* unit unique ID leaf */
data[idx++] = 2 << 16;
data[idx++] = tport->guid >> 32;
data[idx++] = tport->guid;
tport->unit_directory.length = idx;
tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
tport->unit_directory.data = data;
ret = fw_core_add_descriptor(&tport->unit_directory);
if (ret < 0) {
kfree(tport->unit_directory.data);
tport->unit_directory.data = NULL;
}
return ret;
}
static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
{
const char *cp;
char c, nibble;
int pos = 0, err;
*wwn = 0;
for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
c = *cp;
if (c == '\n' && cp[1] == '\0')
continue;
if (c == '\0') {
err = 2;
if (pos != 16)
goto fail;
return cp - name;
}
err = 3;
if (isdigit(c))
nibble = c - '0';
else if (isxdigit(c))
nibble = tolower(c) - 'a' + 10;
else
goto fail;
*wwn = (*wwn << 4) | nibble;
pos++;
}
err = 4;
fail:
printk(KERN_INFO "err %u len %zu pos %u\n",
err, cp - name, pos);
return -1;
}
static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
{
return snprintf(buf, len, "%016llx", wwn);
}
static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
u64 guid = 0;
if (sbp_parse_wwn(name, &guid) < 0)
return -EINVAL;
return 0;
}
static int sbp_post_link_lun(
struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
return sbp_update_unit_directory(tpg->tport);
}
static void sbp_pre_unlink_lun(
struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
int ret;
if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
tport->enable = 0;
ret = sbp_update_unit_directory(tport);
if (ret < 0)
pr_err("unlink LUN: failed to update unit directory\n");
}
static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
const char *name)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
struct sbp_tpg *tpg;
unsigned long tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
return ERR_PTR(-EINVAL);
if (tport->tpg) {
pr_err("Only one TPG per Unit is possible.\n");
return ERR_PTR(-EBUSY);
}
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return ERR_PTR(-ENOMEM);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
tport->tpg = tpg;
/* default attribute values */
tport->enable = 0;
tport->directory_id = -1;
tport->mgt_orb_timeout = 15;
tport->max_reconnect_timeout = 5;
tport->max_logins_per_lun = 1;
tport->mgt_agt = sbp_management_agent_register(tport);
if (IS_ERR(tport->mgt_agt)) {
ret = PTR_ERR(tport->mgt_agt);
goto out_free_tpg;
}
ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
if (ret < 0)
goto out_unreg_mgt_agt;
return &tpg->se_tpg;
out_unreg_mgt_agt:
sbp_management_agent_unregister(tport->mgt_agt);
out_free_tpg:
tport->tpg = NULL;
kfree(tpg);
return ERR_PTR(ret);
}
static void sbp_drop_tpg(struct se_portal_group *se_tpg)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
core_tpg_deregister(se_tpg);
sbp_management_agent_unregister(tport->mgt_agt);
tport->tpg = NULL;
kfree(tpg);
}
static struct se_wwn *sbp_make_tport(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct sbp_tport *tport;
u64 guid = 0;
if (sbp_parse_wwn(name, &guid) < 0)
return ERR_PTR(-EINVAL);
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport)
return ERR_PTR(-ENOMEM);
tport->guid = guid;
sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
return &tport->tport_wwn;
}
static void sbp_drop_tport(struct se_wwn *wwn)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
kfree(tport);
}
static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
}
CONFIGFS_ATTR_RO(sbp_wwn_, version);
static struct configfs_attribute *sbp_wwn_attrs[] = {
&sbp_wwn_attr_version,
NULL,
};
static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
if (tport->directory_id == -1)
return sprintf(page, "implicit\n");
else
return sprintf(page, "%06x\n", tport->directory_id);
}
static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
if (tport->enable) {
pr_err("Cannot change the directory_id on an active target.\n");
return -EBUSY;
}
if (strstr(page, "implicit") == page) {
tport->directory_id = -1;
} else {
if (kstrtoul(page, 16, &val) < 0)
return -EINVAL;
if (val > 0xffffff)
return -EINVAL;
tport->directory_id = val;
}
return count;
}
static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
int ret;
if (enable) {
if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
pr_err("Cannot enable a target with no LUNs!\n");
return -EINVAL;
}
} else {
/* XXX: force-shutdown sessions instead? */
spin_lock_bh(&se_tpg->session_lock);
if (!list_empty(&se_tpg->tpg_sess_list)) {
spin_unlock_bh(&se_tpg->session_lock);
return -EBUSY;
}
spin_unlock_bh(&se_tpg->session_lock);
}
tport->enable = enable;
ret = sbp_update_unit_directory(tport);
if (ret < 0) {
pr_err("Could not update Config ROM\n");
return ret;
}
return 0;
}
CONFIGFS_ATTR(sbp_tpg_, directory_id);
static struct configfs_attribute *sbp_tpg_base_attrs[] = {
&sbp_tpg_attr_directory_id,
NULL,
};
static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return sprintf(page, "%d\n", tport->mgt_orb_timeout);
}
static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
int ret;
if (kstrtoul(page, 0, &val) < 0)
return -EINVAL;
if ((val < 1) || (val > 127))
return -EINVAL;
if (tport->mgt_orb_timeout == val)
return count;
tport->mgt_orb_timeout = val;
ret = sbp_update_unit_directory(tport);
if (ret < 0)
return ret;
return count;
}
static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return sprintf(page, "%d\n", tport->max_reconnect_timeout);
}
static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
int ret;
if (kstrtoul(page, 0, &val) < 0)
return -EINVAL;
if ((val < 1) || (val > 32767))
return -EINVAL;
if (tport->max_reconnect_timeout == val)
return count;
tport->max_reconnect_timeout = val;
ret = sbp_update_unit_directory(tport);
if (ret < 0)
return ret;
return count;
}
static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return sprintf(page, "%d\n", tport->max_logins_per_lun);
}
static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
if (kstrtoul(page, 0, &val) < 0)
return -EINVAL;
if ((val < 1) || (val > 127))
return -EINVAL;
/* XXX: also check against current count? */
tport->max_logins_per_lun = val;
return count;
}
CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
&sbp_tpg_attrib_attr_mgt_orb_timeout,
&sbp_tpg_attrib_attr_max_reconnect_timeout,
&sbp_tpg_attrib_attr_max_logins_per_lun,
NULL,
};
static const struct target_core_fabric_ops sbp_ops = {
.module = THIS_MODULE,
.fabric_name = "sbp",
.tpg_get_wwn = sbp_get_fabric_wwn,
.tpg_get_tag = sbp_get_tag,
.tpg_check_demo_mode = sbp_check_true,
.tpg_check_demo_mode_cache = sbp_check_true,
.release_cmd = sbp_release_cmd,
.write_pending = sbp_write_pending,
.queue_data_in = sbp_queue_data_in,
.queue_status = sbp_queue_status,
.queue_tm_rsp = sbp_queue_tm_rsp,
.aborted_task = sbp_aborted_task,
.check_stop_free = sbp_check_stop_free,
.fabric_make_wwn = sbp_make_tport,
.fabric_drop_wwn = sbp_drop_tport,
.fabric_make_tpg = sbp_make_tpg,
.fabric_enable_tpg = sbp_enable_tpg,
.fabric_drop_tpg = sbp_drop_tpg,
.fabric_post_link = sbp_post_link_lun,
.fabric_pre_unlink = sbp_pre_unlink_lun,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
.fabric_init_nodeacl = sbp_init_nodeacl,
.tfc_wwn_attrs = sbp_wwn_attrs,
.tfc_tpg_base_attrs = sbp_tpg_base_attrs,
.tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
};
static int __init sbp_init(void)
{
return target_register_template(&sbp_ops);
};
static void __exit sbp_exit(void)
{
target_unregister_template(&sbp_ops);
};
MODULE_DESCRIPTION("FireWire SBP fabric driver");
MODULE_LICENSE("GPL");
module_init(sbp_init);
module_exit(sbp_exit);
|
linux-master
|
drivers/target/sbp/sbp_target.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kref.h>
#include <asm/unaligned.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "tcm_fc.h"
#define TFC_SESS_DBG(lport, fmt, args...) \
pr_debug("host%u: rport %6.6x: " fmt, \
(lport)->host->host_no, \
(lport)->port_id, ##args )
static void ft_sess_delete_all(struct ft_tport *);
/*
* Lookup or allocate target local port.
* Caller holds ft_lport_lock.
*/
static struct ft_tport *ft_tport_get(struct fc_lport *lport)
{
struct ft_tpg *tpg;
struct ft_tport *tport;
int i;
tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
tpg = ft_lport_find_tpg(lport);
if (!tpg)
return NULL;
if (tport) {
tport->tpg = tpg;
tpg->tport = tport;
return tport;
}
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport)
return NULL;
tport->lport = lport;
tport->tpg = tpg;
tpg->tport = tport;
for (i = 0; i < FT_SESS_HASH_SIZE; i++)
INIT_HLIST_HEAD(&tport->hash[i]);
rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
return tport;
}
/*
* Delete a target local port.
* Caller holds ft_lport_lock.
*/
static void ft_tport_delete(struct ft_tport *tport)
{
struct fc_lport *lport;
struct ft_tpg *tpg;
ft_sess_delete_all(tport);
lport = tport->lport;
lport->service_params &= ~FCP_SPPF_TARG_FCN;
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
tpg = tport->tpg;
if (tpg) {
tpg->tport = NULL;
tport->tpg = NULL;
}
kfree_rcu(tport, rcu);
}
/*
* Add local port.
* Called thru fc_lport_iterate().
*/
void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
ft_tport_get(lport);
lport->service_params |= FCP_SPPF_TARG_FCN;
mutex_unlock(&ft_lport_lock);
}
/*
* Delete local port.
* Called thru fc_lport_iterate().
*/
void ft_lport_del(struct fc_lport *lport, void *arg)
{
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = lport->prov[FC_TYPE_FCP];
if (tport)
ft_tport_delete(tport);
mutex_unlock(&ft_lport_lock);
}
/*
* Notification of local port change from libfc.
* Create or delete local port and associated tport.
*/
int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
{
struct fc_lport *lport = arg;
switch (event) {
case FC_LPORT_EV_ADD:
ft_lport_add(lport, NULL);
break;
case FC_LPORT_EV_DEL:
ft_lport_del(lport, NULL);
break;
}
return NOTIFY_DONE;
}
/*
* Hash function for FC_IDs.
*/
static u32 ft_sess_hash(u32 port_id)
{
return hash_32(port_id, FT_SESS_HASH_BITS);
}
/*
* Find session in local port.
* Sessions and hash lists are RCU-protected.
* A reference is taken which must be eventually freed.
*/
static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
if (!tport) {
reason = "not an FCP port";
goto out;
}
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
TFC_SESS_DBG(lport, "port_id %x found %p\n",
port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
port_id, reason);
return NULL;
}
static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct ft_sess *sess = p;
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
return 0;
}
/*
* Allocate session and enter it in the hash for the local port.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
struct fc_rport_priv *rdata)
{
struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
struct ft_sess *sess;
struct hlist_head *head;
unsigned char initiatorname[TRANSPORT_IQN_LEN];
ft_format_wwn(&initiatorname[0], TRANSPORT_IQN_LEN, rdata->ids.port_name);
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id)
return sess;
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
return ERR_PTR(-ENOMEM);
kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
sess->port_id = port_id;
sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
int rc = PTR_ERR(sess->se_sess);
kfree(sess);
sess = ERR_PTR(rc);
}
return sess;
}
/*
* Unhash the session.
* Caller holds ft_lport_lock.
*/
static void ft_sess_unhash(struct ft_sess *sess)
{
struct ft_tport *tport = sess->tport;
hlist_del_rcu(&sess->hash);
BUG_ON(!tport->sess_count);
tport->sess_count--;
sess->port_id = -1;
sess->params = 0;
}
/*
* Delete session from hash.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{
struct hlist_head *head;
struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
ft_sess_unhash(sess);
return sess;
}
}
return NULL;
}
static void ft_close_sess(struct ft_sess *sess)
{
target_stop_session(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
}
/*
* Delete all sessions from tport.
* Caller holds ft_lport_lock.
*/
static void ft_sess_delete_all(struct ft_tport *tport)
{
struct hlist_head *head;
struct ft_sess *sess;
for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
ft_close_sess(sess); /* release from table */
}
}
}
/*
* TCM ops for sessions.
*/
/*
* Remove session and send PRLO.
* This is called when the ACL is being deleted or queue depth is changing.
*/
void ft_sess_close(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
u32 port_id;
mutex_lock(&ft_lport_lock);
port_id = sess->port_id;
if (port_id == -1) {
mutex_unlock(&ft_lport_lock);
return;
}
TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess);
/* XXX Send LOGO or PRLO */
synchronize_rcu(); /* let transport deregister happen */
}
u32 ft_sess_get_index(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
return sess->port_id; /* XXX TBD probably not what is needed */
}
u32 ft_sess_get_port_name(struct se_session *se_sess,
unsigned char *buf, u32 len)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
return ft_format_wwn(buf, len, sess->port_name);
}
/*
* libfc ops involving sessions.
*/
static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp, struct fc_els_spp *spp)
{
struct ft_tport *tport;
struct ft_sess *sess;
u32 fcp_parm;
tport = ft_tport_get(rdata->local_port);
if (!tport)
goto not_target; /* not a target for this local port */
if (!rspp)
goto fill;
if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
return FC_SPP_RESP_NO_PA;
/*
* If both target and initiator bits are off, the SPP is invalid.
*/
fcp_parm = ntohl(rspp->spp_params);
if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
return FC_SPP_RESP_INVL;
/*
* Create session (image pair) only if requested by
* EST_IMG_PAIR flag and if the requestor is an initiator.
*/
if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
if (IS_ERR(sess)) {
if (PTR_ERR(sess) == -EACCES) {
spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
return FC_SPP_RESP_CONF;
} else
return FC_SPP_RESP_RES;
}
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
sess->port_name = rdata->ids.port_name;
sess->max_frame = rdata->maxframe_size;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
/*
* OR in our service parameters with other provider (initiator), if any.
*/
fill:
fcp_parm = ntohl(spp->spp_params);
fcp_parm &= ~FCP_SPPF_RETRY;
spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
return FC_SPP_RESP_ACK;
not_target:
fcp_parm = ntohl(spp->spp_params);
fcp_parm &= ~FCP_SPPF_TARG_FCN;
spp->spp_params = htonl(fcp_parm);
return 0;
}
/**
* ft_prli() - Handle incoming or outgoing PRLI for the FCP target
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page (NULL for outgoing PRLI)
* @spp: response service parameter page
*
* Returns spp response code.
*/
static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp, struct fc_els_spp *spp)
{
int ret;
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
target_remove_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
void ft_sess_put(struct ft_sess *sess)
{
int sess_held = kref_read(&sess->kref);
BUG_ON(!sess_held);
kref_put(&sess->kref, ft_sess_free);
}
static void ft_prlo(struct fc_rport_priv *rdata)
{
struct ft_sess *sess;
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (!tport) {
mutex_unlock(&ft_lport_lock);
return;
}
sess = ft_sess_delete(tport, rdata->ids.port_id);
if (!sess) {
mutex_unlock(&ft_lport_lock);
return;
}
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess); /* release from table */
rdata->prli_count--;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
/*
* Handle incoming FCP request.
* Caller has verified that the frame is type FCP.
*/
static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
}
ft_recv_req(sess, fp); /* must do ft_sess_put() */
}
/*
* Provider ops for libfc.
*/
struct fc4_prov ft_prov = {
.prli = ft_prli,
.prlo = ft_prlo,
.recv = ft_recv,
.module = THIS_MODULE,
};
|
linux-master
|
drivers/target/tcm_fc/tfc_sess.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: tcm_fc.c
*
* This file contains the configfs implementation for TCM_fc fabric node.
* Based on tcm_loop_configfs.c
*
* Copyright (c) 2010 Cisco Systems, Inc.
* Copyright (c) 2009,2010 Rising Tide, Inc.
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Copyright (c) 2009,2010 Nicholas A. Bellinger <[email protected]>
*
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "tcm_fc.h"
static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
unsigned int ft_debug_logging;
module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
* the name is the same as what would be generated by ft_format_wwn()
* so the name and wwn are mapped one-to-one.
*/
static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
{
const char *cp;
char c;
u32 byte = 0;
u32 pos = 0;
u32 err;
int val;
*wwn = 0;
for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
c = *cp;
if (c == '\n' && cp[1] == '\0')
continue;
if (strict && pos++ == 2 && byte++ < 7) {
pos = 0;
if (c == ':')
continue;
err = 1;
goto fail;
}
if (c == '\0') {
err = 2;
if (strict && byte != 8)
goto fail;
return cp - name;
}
err = 3;
val = hex_to_bin(c);
if (val < 0 || (strict && isupper(c)))
goto fail;
*wwn = (*wwn << 4) | val;
}
err = 4;
fail:
pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
{
u8 b[8];
put_unaligned_be64(wwn, b);
return snprintf(buf, len,
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
static ssize_t ft_wwn_show(void *arg, char *buf)
{
u64 *wwn = arg;
ssize_t len;
len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
buf[len++] = '\n';
return len;
}
static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
{
ssize_t ret;
u64 wwn;
ret = ft_parse_wwn(buf, &wwn, 0);
if (ret > 0)
*(u64 *)arg = wwn;
return ret;
}
/*
* ACL auth ops.
*/
static ssize_t ft_nacl_port_name_show(struct config_item *item, char *page)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.port_name, page);
}
static ssize_t ft_nacl_port_name_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.port_name, page, count);
}
static ssize_t ft_nacl_node_name_show(struct config_item *item,
char *page)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.node_name, page);
}
static ssize_t ft_nacl_node_name_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.node_name, page, count);
}
CONFIGFS_ATTR(ft_nacl_, node_name);
CONFIGFS_ATTR(ft_nacl_, port_name);
static ssize_t ft_nacl_tag_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
}
static ssize_t ft_nacl_tag_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
if (ret < 0)
return ret;
return count;
}
CONFIGFS_ATTR(ft_nacl_, tag);
static struct configfs_attribute *ft_nacl_base_attrs[] = {
&ft_nacl_attr_port_name,
&ft_nacl_attr_node_name,
&ft_nacl_attr_tag,
NULL,
};
/*
* ACL ops.
*/
/*
* Add ACL for an initiator. The ACL is named arbitrarily.
* The port_name and/or node_name are attributes.
*/
static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
{
struct ft_node_acl *acl =
container_of(nacl, struct ft_node_acl, se_node_acl);
u64 wwpn;
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return -EINVAL;
acl->node_auth.port_name = wwpn;
return 0;
}
/*
* local_port port_group (tpg) ops.
*/
static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
struct workqueue_struct *wq;
unsigned long index;
int ret;
pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
*/
if (strstr(name, "tpgt_") != name)
return NULL;
ret = kstrtoul(name + 5, 10, &index);
if (ret)
return NULL;
if (index > UINT_MAX)
return NULL;
if ((index != 1)) {
pr_err("Error, a single TPG=1 is used for HW port mappings\n");
return ERR_PTR(-ENOSYS);
}
ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return NULL;
tpg->index = index;
tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
wq = alloc_workqueue("tcm_fc", 0, 1);
if (!wq) {
kfree(tpg);
return NULL;
}
ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
destroy_workqueue(wq);
kfree(tpg);
return NULL;
}
tpg->workqueue = wq;
mutex_lock(&ft_lport_lock);
ft_wwn->tpg = tpg;
mutex_unlock(&ft_lport_lock);
return &tpg->se_tpg;
}
static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
destroy_workqueue(tpg->workqueue);
/* Wait for sessions to be freed thru RCU, for BUG_ON below */
synchronize_rcu();
mutex_lock(&ft_lport_lock);
ft_wwn->tpg = NULL;
if (tpg->tport) {
tpg->tport->tpg = NULL;
tpg->tport = NULL;
}
mutex_unlock(&ft_lport_lock);
core_tpg_deregister(se_tpg);
kfree(tpg);
}
/*
* Verify that an lport is configured to use the tcm_fc module, and return
* the target port group that should be used.
*
* The caller holds ft_lport_lock.
*/
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
{
struct ft_lport_wwn *ft_wwn;
list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
if (ft_wwn->wwpn == lport->wwpn)
return ft_wwn->tpg;
}
return NULL;
}
/*
* target config instance ops.
*/
/*
* Add lport to allowed config.
* The name is the WWPN in lower-case ASCII, colon-separated bytes.
*/
static struct se_wwn *ft_add_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_lport_wwn *old_ft_wwn;
u64 wwpn;
pr_debug("add wwn %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
if (!ft_wwn)
return NULL;
ft_wwn->wwpn = wwpn;
mutex_lock(&ft_lport_lock);
list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
if (old_ft_wwn->wwpn == wwpn) {
mutex_unlock(&ft_lport_lock);
kfree(ft_wwn);
return NULL;
}
}
list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
mutex_unlock(&ft_lport_lock);
return &ft_wwn->se_wwn;
}
static void ft_del_wwn(struct se_wwn *wwn)
{
struct ft_lport_wwn *ft_wwn = container_of(wwn,
struct ft_lport_wwn, se_wwn);
pr_debug("del wwn %s\n", ft_wwn->name);
mutex_lock(&ft_lport_lock);
list_del(&ft_wwn->ft_wwn_node);
mutex_unlock(&ft_lport_lock);
kfree(ft_wwn);
}
static ssize_t ft_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
}
CONFIGFS_ATTR_RO(ft_wwn_, version);
static struct configfs_attribute *ft_wwn_attrs[] = {
&ft_wwn_attr_version,
NULL,
};
static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
{
return container_of(se_tpg, struct ft_tpg, se_tpg);
}
static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
return ft_tpg(se_tpg)->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
{
/*
* This tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return ft_tpg(se_tpg)->index;
}
static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return ft_tpg(se_tpg)->index;
}
static const struct target_core_fabric_ops ft_fabric_ops = {
.module = THIS_MODULE,
.fabric_name = "fc",
.node_acl_size = sizeof(struct ft_node_acl),
.tpg_get_wwn = ft_get_fabric_wwn,
.tpg_get_tag = ft_get_tag,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
.close_session = ft_sess_close,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = ft_write_pending,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
.aborted_task = ft_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
.fabric_make_wwn = &ft_add_wwn,
.fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
.fabric_init_nodeacl = &ft_init_nodeacl,
.tfc_wwn_attrs = ft_wwn_attrs,
.tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs,
};
static struct notifier_block ft_notifier = {
.notifier_call = ft_lport_notify
};
static int __init ft_init(void)
{
int ret;
ret = target_register_template(&ft_fabric_ops);
if (ret)
goto out;
ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
if (ret)
goto out_unregister_template;
blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
fc_lport_iterate(ft_lport_add, NULL);
return 0;
out_unregister_template:
target_unregister_template(&ft_fabric_ops);
out:
return ret;
}
static void __exit ft_exit(void)
{
blocking_notifier_chain_unregister(&fc_lport_notifier_head,
&ft_notifier);
fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
fc_lport_iterate(ft_lport_del, NULL);
target_unregister_template(&ft_fabric_ops);
synchronize_rcu();
}
MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
MODULE_LICENSE("GPL");
module_init(ft_init);
module_exit(ft_exit);
|
linux-master
|
drivers/target/tcm_fc/tfc_conf.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <asm/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "tcm_fc.h"
/*
* Dump cmd state for debugging.
*/
static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
struct fc_exch *ep;
struct fc_seq *sp;
struct se_cmd *se_cmd;
struct scatterlist *sg;
int count;
se_cmd = &cmd->se_cmd;
pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
caller, cmd, se_cmd->t_data_nents,
se_cmd->data_length, se_cmd->se_cmd_flags);
for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
pr_debug("%s: cmd %p sg %p page %p "
"len 0x%x off 0x%x\n",
caller, cmd, sg,
sg_page(sg), sg->length, sg->offset);
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
}
}
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
if (unlikely(ft_debug_logging))
_ft_dump_cmd(cmd, caller);
}
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct ft_sess *sess;
if (!cmd)
return;
sess = cmd->sess;
fp = cmd->req_frame;
if (fr_seq(fp))
fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
target_free_tag(sess->se_sess, &cmd->se_cmd);
ft_sess_put(sess); /* undo get from lookup at recv */
}
void ft_release_cmd(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
ft_free_cmd(cmd);
}
int ft_check_stop_free(struct se_cmd *se_cmd)
{
return transport_generic_free_cmd(se_cmd, 0);
}
/*
* Send response.
*/
int ft_queue_status(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp;
struct fcp_resp_with_ext *fcp;
struct fc_lport *lport;
struct fc_exch *ep;
size_t len;
int rc;
if (cmd->aborted)
return 0;
ft_dump_cmd(cmd, __func__);
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len);
if (!fp) {
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status;
len = se_cmd->scsi_sense_length;
if (len) {
fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
fcp->ext.fr_sns_len = htonl(len);
memcpy((fcp + 1), se_cmd->sense_buffer, len);
}
/*
* Test underflow and overflow with one mask. Usually both are off.
* Bidirectional commands are not handled yet.
*/
if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
fcp->resp.fr_flags |= FCP_RESID_OVER;
else
fcp->resp.fr_flags |= FCP_RESID_UNDER;
fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
}
/*
* Send response.
*/
cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
rc = fc_seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
/*
* Generate a TASK_SET_FULL status to notify the initiator
* to reduce it's queue_depth after the se_cmd response has
* been re-queued by target-core.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
fc_exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
* final se_cmd->cmd_kref put.
*/
target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
/*
* Send TX_RDY (transfer ready).
*/
int ft_write_pending(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp;
struct fcp_txrdy *txrdy;
struct fc_lport *lport;
struct fc_exch *ep;
struct fc_frame_header *fh;
u32 f_ctl;
ft_dump_cmd(cmd, __func__);
if (cmd->aborted)
return 0;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
fh = fc_frame_header_get(fp);
f_ctl = ntoh24(fh->fh_f_ctl);
/* Only if it is 'Exchange Responder' */
if (f_ctl & FC_FC_EX_CTX) {
/* Target is 'exchange responder' and sending XFER_READY
* to 'exchange initiator (initiator)'
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg,
se_cmd->t_data_nents))
cmd->was_ddp_setup = 1;
}
}
fc_seq_send(lport, cmd->seq, fp);
return 0;
}
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
{
struct ft_cmd *cmd = arg;
struct fc_frame_header *fh;
if (IS_ERR(fp)) {
/* XXX need to find cmd if queued */
cmd->seq = NULL;
cmd->aborted = true;
return;
}
fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_DD_SOL_DATA: /* write data */
ft_recv_write_data(cmd, fp);
break;
case FC_RCTL_DD_UNSOL_CTL: /* command */
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
ft_invl_hw_context(cmd);
fc_frame_free(fp);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
}
/*
* Send a FCP response including SCSI status and optional FCP rsp_code.
* status is SAM_STAT_GOOD (zero) iff code is valid.
* This is used in error cases, such as allocation failures.
*/
static void ft_send_resp_status(struct fc_lport *lport,
const struct fc_frame *rx_fp,
u32 status, enum fcp_resp_rsp_codes code)
{
struct fc_frame *fp;
struct fc_seq *sp;
const struct fc_frame_header *fh;
size_t len;
struct fcp_resp_with_ext *fcp;
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
len += sizeof(*info);
fp = fc_frame_alloc(lport, len);
if (!fp)
return;
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = status;
if (status == SAM_STAT_GOOD) {
fcp->ext.fr_rsp_len = htonl(sizeof(*info));
fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
info = (struct fcp_resp_rsp_info *)(fcp + 1);
info->rsp_code = code;
}
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
fc_seq_send(lport, sp, fp);
fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
}
/*
* Send error or task management response.
*/
static void ft_send_resp_code(struct ft_cmd *cmd,
enum fcp_resp_rsp_codes code)
{
ft_send_resp_status(cmd->sess->tport->lport,
cmd->req_frame, SAM_STAT_GOOD, code);
}
/*
* Send error or task management response.
* Always frees the cmd and associated state.
*/
static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
enum fcp_resp_rsp_codes code)
{
ft_send_resp_code(cmd, code);
ft_free_cmd(cmd);
}
/*
* Handle Task Management Request.
*/
static void ft_send_tm(struct ft_cmd *cmd)
{
struct fcp_cmnd *fcp;
int rc;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
break;
case FCP_TMF_CLR_TASK_SET:
tm_func = TMR_CLEAR_TASK_SET;
break;
case FCP_TMF_ABT_TASK_SET:
tm_func = TMR_ABORT_TASK_SET;
break;
case FCP_TMF_CLR_ACA:
tm_func = TMR_CLEAR_ACA;
break;
default:
/*
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
/* FIXME: Add referenced task tag for ABORT_TASK */
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
}
/*
* Send status from completed task management request.
*/
void ft_queue_tm_resp(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct se_tmr_req *tmr = se_cmd->se_tmr_req;
enum fcp_resp_rsp_codes code;
if (cmd->aborted)
return;
switch (tmr->response) {
case TMR_FUNCTION_COMPLETE:
code = FCP_TMF_CMPL;
break;
case TMR_LUN_DOES_NOT_EXIST:
code = FCP_TMF_INVALID_LUN;
break;
case TMR_FUNCTION_REJECTED:
code = FCP_TMF_REJECTED;
break;
case TMR_TASK_DOES_NOT_EXIST:
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
default:
code = FCP_TMF_FAILED;
break;
}
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
/*
* Drop the extra ACK_KREF reference taken by target_submit_tmr()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
* final se_cmd->cmd_kref put.
*/
target_put_sess_cmd(&cmd->se_cmd);
}
void ft_aborted_task(struct se_cmd *se_cmd)
{
return;
}
static void ft_send_work(struct work_struct *work);
/*
* Handle incoming FCP command.
*/
static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
int tag, cpu;
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
goto busy;
cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
target_free_tag(se_sess, &cmd->se_cmd);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
INIT_WORK(&cmd->work, ft_send_work);
queue_work(sess->tport->tpg->workqueue, &cmd->work);
return;
busy:
pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
}
/*
* Handle incoming FCP frame.
* Caller has verified that the frame is type FCP.
*/
void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_DD_UNSOL_CMD: /* command */
ft_recv_cmd(sess, fp);
break;
case FC_RCTL_DD_SOL_DATA: /* write data */
case FC_RCTL_DD_UNSOL_CTL:
case FC_RCTL_DD_SOL_CTL:
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
break;
}
}
/*
* Send new command to target.
*/
static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct fcp_cmnd *fcp;
int data_dir = 0;
int task_attr;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
if (!fcp)
goto err;
if (fcp->fc_flags & FCP_CFL_LEN_MASK)
goto err; /* not handling longer CDBs yet */
/*
* Check for FCP task management flags
*/
if (fcp->fc_tm_flags) {
ft_send_tm(cmd);
return;
}
switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
case 0:
data_dir = DMA_NONE;
break;
case FCP_CFL_RDDATA:
data_dir = DMA_FROM_DEVICE;
break;
case FCP_CFL_WRDATA:
data_dir = DMA_TO_DEVICE;
break;
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
/*
* Locate the SAM Task Attr from fc_pri_ta
*/
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
case FCP_PTA_HEADQ:
task_attr = TCM_HEAD_TAG;
break;
case FCP_PTA_ORDERED:
task_attr = TCM_ORDERED_TAG;
break;
case FCP_PTA_ACA:
task_attr = TCM_ACA_TAG;
break;
case FCP_PTA_SIMPLE:
default:
task_attr = TCM_SIMPLE_TAG;
}
fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0],
scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl),
task_attr, data_dir, TARGET_SCF_ACK_KREF))
goto err;
if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0,
NULL, 0, GFP_KERNEL))
return;
target_submit(&cmd->se_cmd);
pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
|
linux-master
|
drivers/target/tcm_fc/tfc_cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
*
* Copyright (c) 2007 Intel Corporation. All rights reserved.
* Copyright (c) 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2008 Mike Christie
* Copyright (c) 2009 Rising Tide, Inc.
* Copyright (c) 2009 Linux-iSCSI.org
* Copyright (c) 2009 Nicholas A. Bellinger <[email protected]>
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "tcm_fc.h"
/*
* Deliver read data back to initiator.
* XXX TBD handle resource problems later.
*/
int ft_queue_data_in(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp = NULL;
struct fc_exch *ep;
struct fc_lport *lport;
struct scatterlist *sg = NULL;
size_t remaining;
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
u32 mem_off = 0;
u32 fh_off = 0;
u32 frame_off = 0;
size_t frame_len = 0;
size_t mem_len = 0;
size_t tlen;
size_t off_in_page;
struct page *page = NULL;
int use_sg;
int error;
void *page_addr;
void *from;
void *to = NULL;
if (cmd->aborted)
return 0;
if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
goto queue_status;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
/*
* Setup to use first mem list entry, unless no data.
*/
BUG_ON(remaining && !se_cmd->t_data_sg);
if (remaining) {
sg = se_cmd->t_data_sg;
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
use_sg = !(remaining % 4);
while (remaining) {
struct fc_seq *seq = cmd->seq;
if (!seq) {
pr_debug("%s: Command aborted, xid 0x%x\n",
__func__, ep->xid);
break;
}
if (!mem_len) {
sg = sg_next(sg);
mem_len = min((size_t)sg->length, remaining);
mem_off = sg->offset;
page = sg_page(sg);
}
if (!frame_len) {
/*
* If lport's has capability of Large Send Offload LSO)
* , then allow 'frame_len' to be as big as 'lso_max'
* if indicated transfer length is >= lport->lso_max
*/
frame_len = (lport->seq_offload) ? lport->lso_max :
cmd->sess->max_frame;
frame_len = min(frame_len, remaining);
fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
if (!fp)
return -ENOMEM;
to = fc_frame_payload_get(fp, 0);
fh_off = frame_off;
frame_off += frame_len;
/*
* Setup the frame's max payload which is used by base
* driver to indicate HW about max frame size, so that
* HW can do fragmentation appropriately based on
* "gso_max_size" of underline netdev.
*/
fr_max_payload(fp) = cmd->sess->max_frame;
}
tlen = min(mem_len, frame_len);
if (use_sg) {
off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
skb_shinfo(fp_skb(fp))->nr_frags,
page, off_in_page, tlen);
fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize += page_size(page);
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = from;
from += offset_in_page(mem_off);
tlen = min(tlen, (size_t)(PAGE_SIZE -
offset_in_page(mem_off)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
to += tlen;
}
mem_off += tlen;
mem_len -= tlen;
frame_len -= tlen;
remaining -= tlen;
if (frame_len &&
(skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
continue;
if (!remaining)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
error = fc_seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
/*
* Go ahead and set TASK_SET_FULL status ignoring the
* rest of the DataIN, and immediately attempt to
* send the response via ft_queue_status() in order
* to notify the initiator that it should reduce it's
* per LUN queue_depth.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
break;
}
}
queue_status:
return ft_queue_status(se_cmd);
}
static void ft_execute_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
target_execute_cmd(&cmd->se_cmd);
}
/*
* Receive write data frame.
*/
void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct fc_seq *seq = cmd->seq;
struct fc_exch *ep;
struct fc_lport *lport;
struct fc_frame_header *fh;
struct scatterlist *sg = NULL;
u32 mem_off = 0;
u32 rel_off;
size_t frame_len;
size_t mem_len = 0;
size_t tlen;
struct page *page = NULL;
void *page_addr;
void *from;
void *to;
u32 f_ctl;
void *buf;
fh = fc_frame_header_get(fp);
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
f_ctl = ntoh24(fh->fh_f_ctl);
ep = fc_seq_exch(seq);
lport = ep->lp;
if (cmd->was_ddp_setup) {
BUG_ON(!lport);
/*
* Since DDP (Large Rx offload) was setup for this request,
* payload is expected to be copied directly to user buffers.
*/
buf = fc_frame_payload_get(fp, 1);
if (buf)
pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
"cmd->sg_cnt 0x%x. DDP was setup"
" hence not expected to receive frame with "
"payload, Frame will be dropped if"
"'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
se_cmd->t_data_sg, se_cmd->t_data_nents);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both
* situation (success and error).
*/
ft_invl_hw_context(cmd);
/*
* If "Sequence Initiative (TSI)" bit set in f_ctl, means last
* write data frame is received successfully where payload is
* posted directly to user buffer and only the last frame's
* header is posted in receive queue.
*
* If "Sequence Initiative (TSI)" bit is not set, means error
* condition w.r.t. DDP, hence drop the packet and let explict
* ABORTS from other end of exchange timer trigger the recovery.
*/
if (f_ctl & FC_FC_SEQ_INIT)
goto last_frame;
else
goto drop;
}
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
if (frame_len <= sizeof(*fh))
goto drop;
frame_len -= sizeof(*fh);
from = fc_frame_payload_get(fp, 0);
if (rel_off >= se_cmd->data_length)
goto drop;
if (frame_len + rel_off > se_cmd->data_length)
frame_len = se_cmd->data_length - rel_off;
/*
* Setup to use first mem list entry, unless no data.
*/
BUG_ON(frame_len && !se_cmd->t_data_sg);
if (frame_len) {
sg = se_cmd->t_data_sg;
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
while (frame_len) {
if (!mem_len) {
sg = sg_next(sg);
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
if (rel_off >= mem_len) {
rel_off -= mem_len;
mem_len = 0;
continue;
}
mem_off += rel_off;
mem_len -= rel_off;
rel_off = 0;
tlen = min(mem_len, frame_len);
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = to;
to += offset_in_page(mem_off);
tlen = min(tlen, (size_t)(PAGE_SIZE -
offset_in_page(mem_off)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
from += tlen;
frame_len -= tlen;
mem_off += tlen;
mem_len -= tlen;
cmd->write_data_len += tlen;
}
last_frame:
if (cmd->write_data_len == se_cmd->data_length) {
INIT_WORK(&cmd->work, ft_execute_work);
queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
}
drop:
fc_frame_free(fp);
}
/*
* Handle and cleanup any HW specific resources if
* received ABORTS, errors, timeouts.
*/
void ft_invl_hw_context(struct ft_cmd *cmd)
{
struct fc_seq *seq;
struct fc_exch *ep = NULL;
struct fc_lport *lport = NULL;
BUG_ON(!cmd);
seq = cmd->seq;
/* Cleanup the DDP context in HW if DDP was setup */
if (cmd->was_ddp_setup && seq) {
ep = fc_seq_exch(seq);
if (ep) {
lport = ep->lp;
if (lport && (ep->xid <= lport->lro_xid)) {
/*
* "ddp_done" trigger invalidation of HW
* specific DDP context
*/
cmd->write_data_len = lport->tt.ddp_done(lport,
ep->xid);
/*
* Resetting same variable to indicate HW's
* DDP context has been invalidated to avoid
* re_invalidation of same context (context is
* identified using ep->xid)
*/
cmd->was_ddp_setup = 0;
}
}
}
}
|
linux-master
|
drivers/target/tcm_fc/tfc_io.c
|
/*******************************************************************************
*
* This file contains the Linux/SCSI LLD virtual SCSI initiator driver
* for emulated SAS initiator ports
*
* © Copyright 2011-2013 Datera, Inc.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "tcm_loop.h"
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
static int tcm_loop_queue_status(struct se_cmd *se_cmd);
static unsigned int tcm_loop_nr_hw_queues = 1;
module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
static unsigned int tcm_loop_can_queue = 1024;
module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
static unsigned int tcm_loop_cmd_per_lun = 1024;
module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
return transport_generic_free_cmd(se_cmd, 0);
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
else
scsi_done(sc);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
seq_puts(m, "tcm_loop_proc_info()\n");
return 0;
}
static int tcm_loop_driver_probe(struct device *);
static void tcm_loop_driver_remove(struct device *);
static struct bus_type tcm_loop_lld_bus = {
.name = "tcm_loop_bus",
.probe = tcm_loop_driver_probe,
.remove = tcm_loop_driver_remove,
};
static struct device_driver tcm_loop_driverfs = {
.name = "tcm_loop",
.bus = &tcm_loop_lld_bus,
};
/*
* Used with root_device_register() in tcm_loop_alloc_core_bus() below
*/
static struct device *tcm_loop_primary;
static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
{
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
struct scsi_cmnd *sc = tl_cmd->sc;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
u32 sgl_bidi_count = 0, transfer_length;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
/*
* Ensure that this tl_tpg reference from the incoming sc->device->id
* has already been configured via tcm_loop_make_naa_tpg().
*/
if (!tl_tpg->tl_hba) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
goto out_done;
}
tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc,
"TCM_Loop I_T Nexus does not exist\n");
set_host_byte(sc, DID_ERROR);
goto out_done;
}
transfer_length = scsi_transfer_length(sc);
if (!scsi_prot_sg_count(sc) &&
scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
se_cmd->prot_pto = true;
/*
* loopback transport doesn't support
* WRITE_GENERATE, READ_STRIP protection
* information operations, go ahead unprotected.
*/
transfer_length = scsi_bufflen(sc);
}
se_cmd->tag = tl_cmd->sc_cmd_tag;
target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
tl_cmd->sc->device->lun, transfer_length,
TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
GFP_ATOMIC))
return;
target_queue_submission(se_cmd);
return;
out_done:
scsi_done(sc);
}
/*
* ->queuecommand can be and usually is called from interrupt context, so
* defer the actual submission to a workqueue.
*/
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
__func__, sc->device->host->host_no, sc->device->id,
sc->device->channel, sc->device->lun, sc->cmnd[0],
scsi_bufflen(sc));
memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
tcm_loop_target_queue_cmd(tl_cmd);
return 0;
}
/*
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
u64 lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd;
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_cmd *tl_cmd;
int ret = TMR_FUNCTION_FAILED, rc;
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without active I_T Nexus\n");
return ret;
}
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd)
return ret;
init_completion(&tl_cmd->tmr_done);
se_cmd = &tl_cmd->tl_se_cmd;
se_sess = tl_tpg->tl_nexus->se_sess;
rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
NULL, tmr, GFP_KERNEL, task,
TARGET_SCF_ACK_KREF);
if (rc < 0)
goto release;
wait_for_completion(&tl_cmd->tmr_done);
ret = se_cmd->se_tmr_req->response;
target_put_sess_cmd(se_cmd);
out:
return ret;
release:
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
goto out;
}
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
int ret;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
/*
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
int ret;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
0, TMR_LUN_RESET);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
static int tcm_loop_target_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
if (!tl_hba) {
pr_err("Unable to perform device reset without active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
if (tl_tpg) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return SUCCESS;
}
return FAILED;
}
static const struct scsi_host_template tcm_loop_driver_template = {
.show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
.eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
.eh_target_reset_handler = tcm_loop_target_reset,
.this_id = -1,
.sg_tablesize = 256,
.max_sectors = 0xFFFF,
.dma_boundary = PAGE_SIZE - 1,
.module = THIS_MODULE,
.track_queue_depth = 1,
.cmd_size = sizeof(struct tcm_loop_cmd),
};
static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
/*
* Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
*/
*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
/*
* Setup single ID, Channel and LUN for now..
*/
sh->max_id = 2;
sh->max_lun = 0;
sh->max_channel = 0;
sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
sh->nr_hw_queues = tcm_loop_nr_hw_queues;
sh->can_queue = tcm_loop_can_queue;
sh->cmd_per_lun = tcm_loop_cmd_per_lun;
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
scsi_host_set_prot(sh, host_prot);
scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
return 0;
}
static void tcm_loop_driver_remove(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
tl_hba = to_tcm_loop_hba(dev);
sh = tl_hba->sh;
scsi_remove_host(sh);
scsi_host_put(sh);
}
static void tcm_loop_release_adapter(struct device *dev)
{
struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
kfree(tl_hba);
}
/*
* Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
*/
static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
{
int ret;
tl_hba->dev.bus = &tcm_loop_lld_bus;
tl_hba->dev.parent = tcm_loop_primary;
tl_hba->dev.release = &tcm_loop_release_adapter;
dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
ret = device_register(&tl_hba->dev);
if (ret) {
pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
put_device(&tl_hba->dev);
return -ENODEV;
}
return 0;
}
/*
* Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
* tcm_loop SCSI bus.
*/
static int tcm_loop_alloc_core_bus(void)
{
int ret;
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
pr_err("driver_register() failed for tcm_loop_driverfs\n");
goto bus_unreg;
}
pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
bus_unregister(&tcm_loop_lld_bus);
dev_unreg:
root_device_unregister(tcm_loop_primary);
return ret;
}
static void tcm_loop_release_core_bus(void)
{
driver_unregister(&tcm_loop_driverfs);
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
pr_debug("Releasing TCM Loop Core BUS\n");
}
static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
{
return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
}
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
/*
* Return the passed NAA identifier for the Target Port
*/
return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
}
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return tl_tpg(se_tpg)->tl_tpgt;
}
/*
* Returning (1) here allows for target_core_mod struct se_node_acl to be generated
* based upon the incoming fabric dependent SCSI Initiator Port
*/
static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
{
return 1;
}
static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
tl_se_tpg);
return tl_tpg->tl_fabric_prot_type;
}
static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
{
return 1;
}
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
return tl_cmd->sc_cmd_state;
}
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
* Since Linux/SCSI has already sent down a struct scsi_cmnd
* sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
* memory, and memory has already been mapped to struct se_cmd->t_mem_list
* format with transport_generic_map_mem_to_cmd().
*
* We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue.
*/
target_execute_cmd(se_cmd);
return 0;
}
static int tcm_loop_queue_data_or_status(const char *func,
struct se_cmd *se_cmd, u8 scsi_status)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
func, sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
} else
sc->result = scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
return 0;
}
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
{
return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
}
static int tcm_loop_queue_status(struct se_cmd *se_cmd)
{
return tcm_loop_queue_data_or_status(__func__,
se_cmd, se_cmd->scsi_status);
}
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
/* Wake up tcm_loop_issue_tmr(). */
complete(&tl_cmd->tmr_done);
}
static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
{
return;
}
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return "SAS";
case SCSI_PROTOCOL_FCP:
return "FCP";
case SCSI_PROTOCOL_ISCSI:
return "iSCSI";
default:
break;
}
return "Unknown";
}
/* Start items for tcm_loop_port_cit */
static int tcm_loop_port_link(
struct se_portal_group *se_tpg,
struct se_lun *lun)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
/*
* Add Linux/SCSI struct scsi_device by HCTL
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
static void tcm_loop_port_unlink(
struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct scsi_device *sd;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
tl_hba = tl_tpg->tl_hba;
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
/*
* Remove Linux/SCSI struct scsi_device by HCTL
*/
scsi_remove_device(sd);
scsi_device_put(sd);
atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
tl_se_tpg);
return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
}
static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
struct config_item *item, const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
tl_se_tpg);
unsigned long val;
int ret = kstrtoul(page, 0, &val);
if (ret) {
pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
return ret;
}
if (val != 0 && val != 1 && val != 3) {
pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
return -EINVAL;
}
tl_tpg->tl_fabric_prot_type = val;
return count;
}
CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
&tcm_loop_tpg_attrib_attr_fabric_prot_type,
NULL,
};
/* Start items for tcm_loop_nexus_cit */
static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
tl_tpg->tl_nexus = p;
return 0;
}
static int tcm_loop_make_nexus(
struct tcm_loop_tpg *tl_tpg,
const char *name)
{
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
int ret;
if (tl_tpg->tl_nexus) {
pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST;
}
tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
if (!tl_nexus)
return -ENOMEM;
tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
kfree(tl_nexus);
return ret;
}
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
tcm_loop_dump_proto_id(tl_hba), name);
return 0;
}
static int tcm_loop_drop_nexus(
struct tcm_loop_tpg *tpg)
{
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
tl_nexus = tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
se_sess = tl_nexus->se_sess;
if (!se_sess)
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
tcm_loop_dump_proto_id(tpg->tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated Target Port
*/
target_remove_session(se_sess);
tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
}
/* End items for tcm_loop_nexus_cit */
static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_nexus *tl_nexus;
ssize_t ret;
tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
ret = snprintf(page, PAGE_SIZE, "%s\n",
tl_nexus->se_sess->se_node_acl->initiatorname);
return ret;
}
static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
int ret;
/*
* Shutdown the active I_T nexus if 'NULL' is passed..
*/
if (!strncmp(page, "NULL", 4)) {
ret = tcm_loop_drop_nexus(tl_tpg);
return (!ret) ? count : ret;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
goto check_newline;
}
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
i_port);
return -EINVAL;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline:
if (i_port[strlen(i_port)-1] == '\n')
i_port[strlen(i_port)-1] = '\0';
ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
if (ret < 0)
return ret;
return count;
}
static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
const char *status = NULL;
ssize_t ret = -EINVAL;
switch (tl_tpg->tl_transport_status) {
case TCM_TRANSPORT_ONLINE:
status = "online";
break;
case TCM_TRANSPORT_OFFLINE:
status = "offline";
break;
default:
break;
}
if (status)
ret = snprintf(page, PAGE_SIZE, "%s\n", status);
return ret;
}
static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
if (!strncmp(page, "online", 6)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return count;
}
if (!strncmp(page, "offline", 7)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
if (tl_tpg->tl_nexus) {
struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
}
return count;
}
return -EINVAL;
}
static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
tl_hba->sh->host_no, tl_tpg->tl_tpgt);
}
CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_attr_nexus,
&tcm_loop_tpg_attr_transport_status,
&tcm_loop_tpg_attr_address,
NULL,
};
/* Start items for tcm_loop_naa_cit */
static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
struct tcm_loop_tpg *tl_tpg;
int ret;
unsigned long tpgt;
if (strstr(name, "tpgt_") != name) {
pr_err("Unable to locate \"tpgt_#\" directory group\n");
return ERR_PTR(-EINVAL);
}
if (kstrtoul(name+5, 10, &tpgt))
return ERR_PTR(-EINVAL);
if (tpgt >= TL_TPGS_PER_HBA) {
pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
tl_tpg->tl_hba = tl_hba;
tl_tpg->tl_tpgt = tpgt;
/*
* Register the tl_tpg as a emulated TCM Target Endpoint
*/
ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
if (ret < 0)
return ERR_PTR(-ENOMEM);
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
}
static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba;
unsigned short tpgt;
tl_hba = tl_tpg->tl_hba;
tpgt = tl_tpg->tl_tpgt;
/*
* Release the I_T Nexus for the Virtual target link if present
*/
tcm_loop_drop_nexus(tl_tpg);
/*
* Deregister the tl_tpg as a emulated TCM Target Endpoint
*/
core_tpg_deregister(se_tpg);
tl_tpg->tl_hba = NULL;
tl_tpg->tl_tpgt = 0;
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
/* End items for tcm_loop_naa_cit */
/* Start items for tcm_loop_cit */
static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
char *ptr;
int ret, off = 0;
tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
if (!tl_hba)
return ERR_PTR(-ENOMEM);
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
*/
ptr = strstr(name, "naa.");
if (ptr) {
tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
goto check_len;
}
ptr = strstr(name, "fc.");
if (ptr) {
tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
off = 3; /* Skip over "fc." */
goto check_len;
}
ptr = strstr(name, "iqn.");
if (!ptr) {
pr_err("Unable to locate prefix for emulated Target Port: %s\n",
name);
ret = -EINVAL;
goto out;
}
tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
ret = -EINVAL;
goto out;
}
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
/*
* Call device_register(tl_hba->dev) to register the emulated
* Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
* device_register() callbacks in tcm_loop_driver_probe()
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
return ERR_PTR(ret);
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
return &tl_hba->tl_hba_wwn;
out:
kfree(tl_hba);
return ERR_PTR(ret);
}
static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
}
/* Start items for tcm_loop_cit */
static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
}
CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
&tcm_loop_wwn_attr_version,
NULL,
};
/* End items for tcm_loop_cit */
static const struct target_core_fabric_ops loop_ops = {
.module = THIS_MODULE,
.fabric_name = "loopback",
.tpg_get_wwn = tcm_loop_get_endpoint_wwn,
.tpg_get_tag = tcm_loop_get_tag,
.tpg_check_demo_mode = tcm_loop_check_demo_mode,
.tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
.check_stop_free = tcm_loop_check_stop_free,
.release_cmd = tcm_loop_release_cmd,
.sess_get_index = tcm_loop_sess_get_index,
.write_pending = tcm_loop_write_pending,
.get_cmd_state = tcm_loop_get_cmd_state,
.queue_data_in = tcm_loop_queue_data_in,
.queue_status = tcm_loop_queue_status,
.queue_tm_rsp = tcm_loop_queue_tm_rsp,
.aborted_task = tcm_loop_aborted_task,
.fabric_make_wwn = tcm_loop_make_scsi_hba,
.fabric_drop_wwn = tcm_loop_drop_scsi_hba,
.fabric_make_tpg = tcm_loop_make_naa_tpg,
.fabric_drop_tpg = tcm_loop_drop_naa_tpg,
.fabric_post_link = tcm_loop_port_link,
.fabric_pre_unlink = tcm_loop_port_unlink,
.tfc_wwn_attrs = tcm_loop_wwn_attrs,
.tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
.tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
};
static int __init tcm_loop_fabric_init(void)
{
int ret = -ENOMEM;
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
goto out;
}
ret = tcm_loop_alloc_core_bus();
if (ret)
goto out_destroy_cache;
ret = target_register_template(&loop_ops);
if (ret)
goto out_release_core_bus;
return 0;
out_release_core_bus:
tcm_loop_release_core_bus();
out_destroy_cache:
kmem_cache_destroy(tcm_loop_cmd_cache);
out:
return ret;
}
static void __exit tcm_loop_fabric_exit(void)
{
target_unregister_template(&loop_ops);
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
MODULE_AUTHOR("Nicholas A. Bellinger <[email protected]>");
MODULE_LICENSE("GPL");
module_init(tcm_loop_fabric_init);
module_exit(tcm_loop_fabric_exit);
|
linux-master
|
drivers/target/loopback/tcm_loop.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "tcm_remote.h"
static inline struct tcm_remote_tpg *remote_tpg(struct se_portal_group *se_tpg)
{
return container_of(se_tpg, struct tcm_remote_tpg, remote_se_tpg);
}
static char *tcm_remote_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
/*
* Return the passed NAA identifier for the Target Port
*/
return &remote_tpg(se_tpg)->remote_hba->remote_wwn_address[0];
}
static u16 tcm_remote_get_tag(struct se_portal_group *se_tpg)
{
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return remote_tpg(se_tpg)->remote_tpgt;
}
static int tcm_remote_dummy_cmd_fn(struct se_cmd *se_cmd)
{
return 0;
}
static void tcm_remote_dummy_cmd_void_fn(struct se_cmd *se_cmd)
{
}
static char *tcm_remote_dump_proto_id(struct tcm_remote_hba *remote_hba)
{
switch (remote_hba->remote_proto_id) {
case SCSI_PROTOCOL_SAS:
return "SAS";
case SCSI_PROTOCOL_SRP:
return "SRP";
case SCSI_PROTOCOL_FCP:
return "FCP";
case SCSI_PROTOCOL_ISCSI:
return "iSCSI";
default:
break;
}
return "Unknown";
}
static int tcm_remote_port_link(
struct se_portal_group *se_tpg,
struct se_lun *lun)
{
pr_debug("TCM_Remote_ConfigFS: Port Link LUN %lld Successful\n",
lun->unpacked_lun);
return 0;
}
static void tcm_remote_port_unlink(
struct se_portal_group *se_tpg,
struct se_lun *lun)
{
pr_debug("TCM_Remote_ConfigFS: Port Unlink LUN %lld Successful\n",
lun->unpacked_lun);
}
static struct se_portal_group *tcm_remote_make_tpg(
struct se_wwn *wwn,
const char *name)
{
struct tcm_remote_hba *remote_hba = container_of(wwn,
struct tcm_remote_hba, remote_hba_wwn);
struct tcm_remote_tpg *remote_tpg;
unsigned long tpgt;
int ret;
if (strstr(name, "tpgt_") != name) {
pr_err("Unable to locate \"tpgt_#\" directory group\n");
return ERR_PTR(-EINVAL);
}
if (kstrtoul(name + 5, 10, &tpgt))
return ERR_PTR(-EINVAL);
if (tpgt >= TL_TPGS_PER_HBA) {
pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
remote_tpg = &remote_hba->remote_hba_tpgs[tpgt];
remote_tpg->remote_hba = remote_hba;
remote_tpg->remote_tpgt = tpgt;
/*
* Register the remote_tpg as a emulated TCM Target Endpoint
*/
ret = core_tpg_register(wwn, &remote_tpg->remote_se_tpg,
remote_hba->remote_proto_id);
if (ret < 0)
return ERR_PTR(-ENOMEM);
pr_debug("TCM_Remote_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
tcm_remote_dump_proto_id(remote_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &remote_tpg->remote_se_tpg;
}
static void tcm_remote_drop_tpg(struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
struct tcm_remote_tpg *remote_tpg = container_of(se_tpg,
struct tcm_remote_tpg, remote_se_tpg);
struct tcm_remote_hba *remote_hba;
unsigned short tpgt;
remote_hba = remote_tpg->remote_hba;
tpgt = remote_tpg->remote_tpgt;
/*
* Deregister the remote_tpg as a emulated TCM Target Endpoint
*/
core_tpg_deregister(se_tpg);
remote_tpg->remote_hba = NULL;
remote_tpg->remote_tpgt = 0;
pr_debug("TCM_Remote_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
tcm_remote_dump_proto_id(remote_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
static struct se_wwn *tcm_remote_make_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_remote_hba *remote_hba;
char *ptr;
int ret, off = 0;
remote_hba = kzalloc(sizeof(*remote_hba), GFP_KERNEL);
if (!remote_hba)
return ERR_PTR(-ENOMEM);
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
*/
ptr = strstr(name, "naa.");
if (ptr) {
remote_hba->remote_proto_id = SCSI_PROTOCOL_SAS;
goto check_len;
}
ptr = strstr(name, "fc.");
if (ptr) {
remote_hba->remote_proto_id = SCSI_PROTOCOL_FCP;
off = 3; /* Skip over "fc." */
goto check_len;
}
ptr = strstr(name, "0x");
if (ptr) {
remote_hba->remote_proto_id = SCSI_PROTOCOL_SRP;
off = 2; /* Skip over "0x" */
goto check_len;
}
ptr = strstr(name, "iqn.");
if (!ptr) {
pr_err("Unable to locate prefix for emulated Target Port: %s\n",
name);
ret = -EINVAL;
goto out;
}
remote_hba->remote_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
name, tcm_remote_dump_proto_id(remote_hba), TL_WWN_ADDR_LEN);
ret = -EINVAL;
goto out;
}
snprintf(&remote_hba->remote_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
pr_debug("TCM_Remote_ConfigFS: Allocated emulated Target %s Address: %s\n",
tcm_remote_dump_proto_id(remote_hba), name);
return &remote_hba->remote_hba_wwn;
out:
kfree(remote_hba);
return ERR_PTR(ret);
}
static void tcm_remote_drop_wwn(struct se_wwn *wwn)
{
struct tcm_remote_hba *remote_hba = container_of(wwn,
struct tcm_remote_hba, remote_hba_wwn);
pr_debug("TCM_Remote_ConfigFS: Deallocating emulated Target %s Address: %s\n",
tcm_remote_dump_proto_id(remote_hba),
remote_hba->remote_wwn_address);
kfree(remote_hba);
}
static ssize_t tcm_remote_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "TCM Remote Fabric module %s\n", TCM_REMOTE_VERSION);
}
CONFIGFS_ATTR_RO(tcm_remote_wwn_, version);
static struct configfs_attribute *tcm_remote_wwn_attrs[] = {
&tcm_remote_wwn_attr_version,
NULL,
};
static const struct target_core_fabric_ops remote_ops = {
.module = THIS_MODULE,
.fabric_name = "remote",
.tpg_get_wwn = tcm_remote_get_endpoint_wwn,
.tpg_get_tag = tcm_remote_get_tag,
.check_stop_free = tcm_remote_dummy_cmd_fn,
.release_cmd = tcm_remote_dummy_cmd_void_fn,
.write_pending = tcm_remote_dummy_cmd_fn,
.queue_data_in = tcm_remote_dummy_cmd_fn,
.queue_status = tcm_remote_dummy_cmd_fn,
.queue_tm_rsp = tcm_remote_dummy_cmd_void_fn,
.aborted_task = tcm_remote_dummy_cmd_void_fn,
.fabric_make_wwn = tcm_remote_make_wwn,
.fabric_drop_wwn = tcm_remote_drop_wwn,
.fabric_make_tpg = tcm_remote_make_tpg,
.fabric_drop_tpg = tcm_remote_drop_tpg,
.fabric_post_link = tcm_remote_port_link,
.fabric_pre_unlink = tcm_remote_port_unlink,
.tfc_wwn_attrs = tcm_remote_wwn_attrs,
};
static int __init tcm_remote_fabric_init(void)
{
return target_register_template(&remote_ops);
}
static void __exit tcm_remote_fabric_exit(void)
{
target_unregister_template(&remote_ops);
}
MODULE_DESCRIPTION("TCM virtual remote target");
MODULE_AUTHOR("Dmitry Bogdanov <[email protected]>");
MODULE_LICENSE("GPL");
module_init(tcm_remote_fabric_init);
module_exit(tcm_remote_fabric_exit);
|
linux-master
|
drivers/target/tcm_remote/tcm_remote.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* APM X-Gene SLIMpro MailBox Driver
*
* Copyright (c) 2015, Applied Micro Circuits Corporation
* Author: Feng Kan [email protected]
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define MBOX_CON_NAME "slimpro-mbox"
#define MBOX_REG_SET_OFFSET 0x1000
#define MBOX_CNT 8
#define MBOX_STATUS_AVAIL_MASK BIT(16)
#define MBOX_STATUS_ACK_MASK BIT(0)
/* Configuration and Status Registers */
#define REG_DB_IN 0x00
#define REG_DB_DIN0 0x04
#define REG_DB_DIN1 0x08
#define REG_DB_OUT 0x10
#define REG_DB_DOUT0 0x14
#define REG_DB_DOUT1 0x18
#define REG_DB_STAT 0x20
#define REG_DB_STATMASK 0x24
/**
* X-Gene SlimPRO mailbox channel information
*
* @dev: Device to which it is attached
* @chan: Pointer to mailbox communication channel
* @reg: Base address to access channel registers
* @irq: Interrupt number of the channel
* @rx_msg: Received message storage
*/
struct slimpro_mbox_chan {
struct device *dev;
struct mbox_chan *chan;
void __iomem *reg;
int irq;
u32 rx_msg[3];
};
/**
* X-Gene SlimPRO Mailbox controller data
*
* X-Gene SlimPRO Mailbox controller has 8 communication channels.
* Each channel has a separate IRQ number assigned to it.
*
* @mb_ctrl: Representation of the communication channel controller
* @mc: Array of SlimPRO mailbox channels of the controller
* @chans: Array of mailbox communication channels
*
*/
struct slimpro_mbox {
struct mbox_controller mb_ctrl;
struct slimpro_mbox_chan mc[MBOX_CNT];
struct mbox_chan chans[MBOX_CNT];
};
static void mb_chan_send_msg(struct slimpro_mbox_chan *mb_chan, u32 *msg)
{
writel(msg[1], mb_chan->reg + REG_DB_DOUT0);
writel(msg[2], mb_chan->reg + REG_DB_DOUT1);
writel(msg[0], mb_chan->reg + REG_DB_OUT);
}
static void mb_chan_recv_msg(struct slimpro_mbox_chan *mb_chan)
{
mb_chan->rx_msg[1] = readl(mb_chan->reg + REG_DB_DIN0);
mb_chan->rx_msg[2] = readl(mb_chan->reg + REG_DB_DIN1);
mb_chan->rx_msg[0] = readl(mb_chan->reg + REG_DB_IN);
}
static int mb_chan_status_ack(struct slimpro_mbox_chan *mb_chan)
{
u32 val = readl(mb_chan->reg + REG_DB_STAT);
if (val & MBOX_STATUS_ACK_MASK) {
writel(MBOX_STATUS_ACK_MASK, mb_chan->reg + REG_DB_STAT);
return 1;
}
return 0;
}
static int mb_chan_status_avail(struct slimpro_mbox_chan *mb_chan)
{
u32 val = readl(mb_chan->reg + REG_DB_STAT);
if (val & MBOX_STATUS_AVAIL_MASK) {
mb_chan_recv_msg(mb_chan);
writel(MBOX_STATUS_AVAIL_MASK, mb_chan->reg + REG_DB_STAT);
return 1;
}
return 0;
}
static irqreturn_t slimpro_mbox_irq(int irq, void *id)
{
struct slimpro_mbox_chan *mb_chan = id;
if (mb_chan_status_ack(mb_chan))
mbox_chan_txdone(mb_chan->chan, 0);
if (mb_chan_status_avail(mb_chan))
mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);
return IRQ_HANDLED;
}
static int slimpro_mbox_send_data(struct mbox_chan *chan, void *msg)
{
struct slimpro_mbox_chan *mb_chan = chan->con_priv;
mb_chan_send_msg(mb_chan, msg);
return 0;
}
static int slimpro_mbox_startup(struct mbox_chan *chan)
{
struct slimpro_mbox_chan *mb_chan = chan->con_priv;
int rc;
u32 val;
rc = devm_request_irq(mb_chan->dev, mb_chan->irq, slimpro_mbox_irq, 0,
MBOX_CON_NAME, mb_chan);
if (unlikely(rc)) {
dev_err(mb_chan->dev, "failed to register mailbox interrupt %d\n",
mb_chan->irq);
return rc;
}
/* Enable HW interrupt */
writel(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK,
mb_chan->reg + REG_DB_STAT);
/* Unmask doorbell status interrupt */
val = readl(mb_chan->reg + REG_DB_STATMASK);
val &= ~(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
writel(val, mb_chan->reg + REG_DB_STATMASK);
return 0;
}
static void slimpro_mbox_shutdown(struct mbox_chan *chan)
{
struct slimpro_mbox_chan *mb_chan = chan->con_priv;
u32 val;
/* Mask doorbell status interrupt */
val = readl(mb_chan->reg + REG_DB_STATMASK);
val |= (MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
writel(val, mb_chan->reg + REG_DB_STATMASK);
devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan);
}
static const struct mbox_chan_ops slimpro_mbox_ops = {
.send_data = slimpro_mbox_send_data,
.startup = slimpro_mbox_startup,
.shutdown = slimpro_mbox_shutdown,
};
static int slimpro_mbox_probe(struct platform_device *pdev)
{
struct slimpro_mbox *ctx;
void __iomem *mb_base;
int rc;
int i;
ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
platform_set_drvdata(pdev, ctx);
mb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mb_base))
return PTR_ERR(mb_base);
/* Setup mailbox links */
for (i = 0; i < MBOX_CNT; i++) {
ctx->mc[i].irq = platform_get_irq(pdev, i);
if (ctx->mc[i].irq < 0) {
if (i == 0) {
dev_err(&pdev->dev, "no available IRQ\n");
return -EINVAL;
}
dev_info(&pdev->dev, "no IRQ for channel %d\n", i);
break;
}
ctx->mc[i].dev = &pdev->dev;
ctx->mc[i].reg = mb_base + i * MBOX_REG_SET_OFFSET;
ctx->mc[i].chan = &ctx->chans[i];
ctx->chans[i].con_priv = &ctx->mc[i];
}
/* Setup mailbox controller */
ctx->mb_ctrl.dev = &pdev->dev;
ctx->mb_ctrl.chans = ctx->chans;
ctx->mb_ctrl.txdone_irq = true;
ctx->mb_ctrl.ops = &slimpro_mbox_ops;
ctx->mb_ctrl.num_chans = i;
rc = devm_mbox_controller_register(&pdev->dev, &ctx->mb_ctrl);
if (rc) {
dev_err(&pdev->dev,
"APM X-Gene SLIMpro MailBox register failed:%d\n", rc);
return rc;
}
dev_info(&pdev->dev, "APM X-Gene SLIMpro MailBox registered\n");
return 0;
}
static const struct of_device_id slimpro_of_match[] = {
{.compatible = "apm,xgene-slimpro-mbox" },
{ },
};
MODULE_DEVICE_TABLE(of, slimpro_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id slimpro_acpi_ids[] = {
{"APMC0D01", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, slimpro_acpi_ids);
#endif
static struct platform_driver slimpro_mbox_driver = {
.probe = slimpro_mbox_probe,
.driver = {
.name = "xgene-slimpro-mbox",
.of_match_table = of_match_ptr(slimpro_of_match),
.acpi_match_table = ACPI_PTR(slimpro_acpi_ids)
},
};
static int __init slimpro_mbox_init(void)
{
return platform_driver_register(&slimpro_mbox_driver);
}
static void __exit slimpro_mbox_exit(void)
{
platform_driver_unregister(&slimpro_mbox_driver);
}
subsys_initcall(slimpro_mbox_init);
module_exit(slimpro_mbox_exit);
MODULE_DESCRIPTION("APM X-Gene SLIMpro Mailbox Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/mailbox/mailbox-xgene-slimpro.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver
*
* Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
*
* Author: Conor Dooley <[email protected]>
*
*/
#include <linux/io.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <soc/microchip/mpfs.h>
#define SERVICES_CR_OFFSET 0x50u
#define SERVICES_SR_OFFSET 0x54u
#define MAILBOX_REG_OFFSET 0x800u
#define MSS_SYS_MAILBOX_DATA_OFFSET 0u
#define SCB_MASK_WIDTH 16u
/* SCBCTRL service control register */
#define SCB_CTRL_REQ (0)
#define SCB_CTRL_REQ_MASK BIT(SCB_CTRL_REQ)
#define SCB_CTRL_BUSY (1)
#define SCB_CTRL_BUSY_MASK BIT(SCB_CTRL_BUSY)
#define SCB_CTRL_ABORT (2)
#define SCB_CTRL_ABORT_MASK BIT(SCB_CTRL_ABORT)
#define SCB_CTRL_NOTIFY (3)
#define SCB_CTRL_NOTIFY_MASK BIT(SCB_CTRL_NOTIFY)
#define SCB_CTRL_POS (16)
#define SCB_CTRL_MASK GENMASK(SCB_CTRL_POS + SCB_MASK_WIDTH - 1, SCB_CTRL_POS)
/* SCBCTRL service status register */
#define SCB_STATUS_REQ (0)
#define SCB_STATUS_REQ_MASK BIT(SCB_STATUS_REQ)
#define SCB_STATUS_BUSY (1)
#define SCB_STATUS_BUSY_MASK BIT(SCB_STATUS_BUSY)
#define SCB_STATUS_ABORT (2)
#define SCB_STATUS_ABORT_MASK BIT(SCB_STATUS_ABORT)
#define SCB_STATUS_NOTIFY (3)
#define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY)
#define SCB_STATUS_POS (16)
#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS)
struct mpfs_mbox {
struct mbox_controller controller;
struct device *dev;
int irq;
void __iomem *ctrl_base;
void __iomem *mbox_base;
void __iomem *int_reg;
struct mbox_chan chans[1];
struct mpfs_mss_response *response;
u16 resp_offset;
};
static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
{
u32 status;
status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
return status & SCB_STATUS_BUSY_MASK;
}
static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
struct mpfs_mss_response *response = mbox->response;
u32 val;
if (mpfs_mbox_busy(mbox))
return false;
/*
* The service status is stored in bits 31:16 of the SERVICES_SR
* register & is only valid when the system controller is not busy.
* Failed services are intended to generated interrupts, but in reality
* this does not happen, so the status must be checked here.
*/
val = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
response->resp_status = (val & SCB_STATUS_MASK) >> SCB_STATUS_POS;
return true;
}
static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
struct mpfs_mss_msg *msg = data;
u32 tx_trigger;
u16 opt_sel;
u32 val = 0u;
mbox->response = msg->response;
mbox->resp_offset = msg->resp_offset;
if (mpfs_mbox_busy(mbox))
return -EBUSY;
if (msg->cmd_data_size) {
u32 index;
u8 extra_bits = msg->cmd_data_size & 3;
u32 *word_buf = (u32 *)msg->cmd_data;
for (index = 0; index < (msg->cmd_data_size / 4); index++)
writel_relaxed(word_buf[index],
mbox->mbox_base + msg->mbox_offset + index * 0x4);
if (extra_bits) {
u8 i;
u8 byte_off = ALIGN_DOWN(msg->cmd_data_size, 4);
u8 *byte_buf = msg->cmd_data + byte_off;
val = readl_relaxed(mbox->mbox_base + msg->mbox_offset + index * 0x4);
for (i = 0u; i < extra_bits; i++) {
val &= ~(0xffu << (i * 8u));
val |= (byte_buf[i] << (i * 8u));
}
writel_relaxed(val, mbox->mbox_base + msg->mbox_offset + index * 0x4);
}
}
opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu));
tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK;
tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK;
writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET);
return 0;
}
static void mpfs_mbox_rx_data(struct mbox_chan *chan)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
struct mpfs_mss_response *response = mbox->response;
u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
u32 i;
if (!response->resp_msg) {
dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
return;
}
/*
* We should *never* get an interrupt while the controller is
* still in the busy state. If we do, something has gone badly
* wrong & the content of the mailbox would not be valid.
*/
if (mpfs_mbox_busy(mbox)) {
dev_err(mbox->dev, "got an interrupt but system controller is busy\n");
response->resp_status = 0xDEAD;
return;
}
for (i = 0; i < num_words; i++) {
response->resp_msg[i] =
readl_relaxed(mbox->mbox_base
+ mbox->resp_offset + i * 0x4);
}
mbox_chan_received_data(chan, response);
}
static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)
{
struct mbox_chan *chan = data;
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
writel_relaxed(0, mbox->int_reg);
mpfs_mbox_rx_data(chan);
return IRQ_HANDLED;
}
static int mpfs_mbox_startup(struct mbox_chan *chan)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
int ret = 0;
if (!mbox)
return -EINVAL;
ret = devm_request_irq(mbox->dev, mbox->irq, mpfs_mbox_inbox_isr, 0, "mpfs-mailbox", chan);
if (ret)
dev_err(mbox->dev, "failed to register mailbox interrupt:%d\n", ret);
return ret;
}
static void mpfs_mbox_shutdown(struct mbox_chan *chan)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
devm_free_irq(mbox->dev, mbox->irq, chan);
}
static const struct mbox_chan_ops mpfs_mbox_ops = {
.send_data = mpfs_mbox_send_data,
.startup = mpfs_mbox_startup,
.shutdown = mpfs_mbox_shutdown,
.last_tx_done = mpfs_mbox_last_tx_done,
};
static int mpfs_mbox_probe(struct platform_device *pdev)
{
struct mpfs_mbox *mbox;
struct resource *regs;
int ret;
mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
mbox->ctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
if (IS_ERR(mbox->ctrl_base))
return PTR_ERR(mbox->ctrl_base);
mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, ®s);
if (IS_ERR(mbox->int_reg))
return PTR_ERR(mbox->int_reg);
mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 2, ®s);
if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs
mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET;
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq < 0)
return mbox->irq;
mbox->dev = &pdev->dev;
mbox->chans[0].con_priv = mbox;
mbox->controller.dev = mbox->dev;
mbox->controller.num_chans = 1;
mbox->controller.chans = mbox->chans;
mbox->controller.ops = &mpfs_mbox_ops;
mbox->controller.txdone_poll = true;
mbox->controller.txpoll_period = 10u;
ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
if (ret) {
dev_err(&pdev->dev, "Registering MPFS mailbox controller failed\n");
return ret;
}
dev_info(&pdev->dev, "Registered MPFS mailbox controller driver\n");
return 0;
}
static const struct of_device_id mpfs_mbox_of_match[] = {
{.compatible = "microchip,mpfs-mailbox", },
{},
};
MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match);
static struct platform_driver mpfs_mbox_driver = {
.driver = {
.name = "mpfs-mailbox",
.of_match_table = mpfs_mbox_of_match,
},
.probe = mpfs_mbox_probe,
};
module_platform_driver(mpfs_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Conor Dooley <[email protected]>");
MODULE_DESCRIPTION("MPFS mailbox controller driver");
|
linux-master
|
drivers/mailbox/mailbox-mpfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
* Copyright (C) 2015 Linaro Ltd.
* Author: Jassi Brar <[email protected]>
*/
#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
#define INTR_CLR_OFS 0x10
#define MHU_LP_OFFSET 0x0
#define MHU_HP_OFFSET 0x20
#define MHU_SEC_OFFSET 0x200
#define TX_REG_OFFSET 0x100
#define MHU_CHANS 3
struct mhu_link {
unsigned irq;
void __iomem *tx_reg;
void __iomem *rx_reg;
};
struct arm_mhu {
void __iomem *base;
struct mhu_link mlink[MHU_CHANS];
struct mbox_chan chan[MHU_CHANS];
struct mbox_controller mbox;
};
static irqreturn_t mhu_rx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = p;
struct mhu_link *mlink = chan->con_priv;
u32 val;
val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
if (!val)
return IRQ_NONE;
mbox_chan_received_data(chan, (void *)&val);
writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
return IRQ_HANDLED;
}
static bool mhu_last_tx_done(struct mbox_chan *chan)
{
struct mhu_link *mlink = chan->con_priv;
u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
return (val == 0);
}
static int mhu_send_data(struct mbox_chan *chan, void *data)
{
struct mhu_link *mlink = chan->con_priv;
u32 *arg = data;
writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
return 0;
}
static int mhu_startup(struct mbox_chan *chan)
{
struct mhu_link *mlink = chan->con_priv;
u32 val;
int ret;
val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
ret = request_irq(mlink->irq, mhu_rx_interrupt,
IRQF_SHARED, "mhu_link", chan);
if (ret) {
dev_err(chan->mbox->dev,
"Unable to acquire IRQ %d\n", mlink->irq);
return ret;
}
return 0;
}
static void mhu_shutdown(struct mbox_chan *chan)
{
struct mhu_link *mlink = chan->con_priv;
free_irq(mlink->irq, chan);
}
static const struct mbox_chan_ops mhu_ops = {
.send_data = mhu_send_data,
.startup = mhu_startup,
.shutdown = mhu_shutdown,
.last_tx_done = mhu_last_tx_done,
};
static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
{
int i, err;
struct arm_mhu *mhu;
struct device *dev = &adev->dev;
int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
if (!of_device_is_compatible(dev->of_node, "arm,mhu"))
return -ENODEV;
/* Allocate memory for device */
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
return -ENOMEM;
mhu->base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(mhu->base))
return PTR_ERR(mhu->base);
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
mhu->mlink[i].irq = adev->irq[i];
mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
}
mhu->mbox.dev = dev;
mhu->mbox.chans = &mhu->chan[0];
mhu->mbox.num_chans = MHU_CHANS;
mhu->mbox.ops = &mhu_ops;
mhu->mbox.txdone_irq = false;
mhu->mbox.txdone_poll = true;
mhu->mbox.txpoll_period = 1;
amba_set_drvdata(adev, mhu);
err = devm_mbox_controller_register(dev, &mhu->mbox);
if (err) {
dev_err(dev, "Failed to register mailboxes %d\n", err);
return err;
}
dev_info(dev, "ARM MHU Mailbox registered\n");
return 0;
}
static struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, mhu_ids);
static struct amba_driver arm_mhu_driver = {
.drv = {
.name = "mhu",
},
.id_table = mhu_ids,
.probe = mhu_probe,
};
module_amba_driver(arm_mhu_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ARM MHU Driver");
MODULE_AUTHOR("Jassi Brar <[email protected]>");
|
linux-master
|
drivers/mailbox/arm_mhu.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* STi Mailbox
*
* Copyright (C) 2015 ST Microelectronics
*
* Author: Lee Jones <[email protected]> for ST Microelectronics
*
* Based on the original driver written by;
* Alexandre Torgue, Olivier Lebreton and Loic Pallardy
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "mailbox.h"
#define STI_MBOX_INST_MAX 4 /* RAM saving: Max supported instances */
#define STI_MBOX_CHAN_MAX 20 /* RAM saving: Max supported channels */
#define STI_IRQ_VAL_OFFSET 0x04 /* Read interrupt status */
#define STI_IRQ_SET_OFFSET 0x24 /* Generate a Tx channel interrupt */
#define STI_IRQ_CLR_OFFSET 0x44 /* Clear pending Rx interrupts */
#define STI_ENA_VAL_OFFSET 0x64 /* Read enable status */
#define STI_ENA_SET_OFFSET 0x84 /* Enable a channel */
#define STI_ENA_CLR_OFFSET 0xa4 /* Disable a channel */
#define MBOX_BASE(mdev, inst) ((mdev)->base + ((inst) * 4))
/**
* struct sti_mbox_device - STi Mailbox device data
*
* @dev: Device to which it is attached
* @mbox: Representation of a communication channel controller
* @base: Base address of the register mapping region
* @name: Name of the mailbox
* @enabled: Local copy of enabled channels
* @lock: Mutex protecting enabled status
*
* An IP Mailbox is currently composed of 4 instances
* Each instance is currently composed of 32 channels
* This means that we have 128 channels per Mailbox
* A channel an be used for TX or RX
*/
struct sti_mbox_device {
struct device *dev;
struct mbox_controller *mbox;
void __iomem *base;
const char *name;
u32 enabled[STI_MBOX_INST_MAX];
spinlock_t lock;
};
/**
* struct sti_mbox_pdata - STi Mailbox platform specific configuration
*
* @num_inst: Maximum number of instances in one HW Mailbox
* @num_chan: Maximum number of channel per instance
*/
struct sti_mbox_pdata {
unsigned int num_inst;
unsigned int num_chan;
};
/**
* struct sti_channel - STi Mailbox allocated channel information
*
* @mdev: Pointer to parent Mailbox device
* @instance: Instance number channel resides in
* @channel: Channel number pertaining to this container
*/
struct sti_channel {
struct sti_mbox_device *mdev;
unsigned int instance;
unsigned int channel;
};
static inline bool sti_mbox_channel_is_enabled(struct mbox_chan *chan)
{
struct sti_channel *chan_info = chan->con_priv;
struct sti_mbox_device *mdev = chan_info->mdev;
unsigned int instance = chan_info->instance;
unsigned int channel = chan_info->channel;
return mdev->enabled[instance] & BIT(channel);
}
static inline
struct mbox_chan *sti_mbox_to_channel(struct mbox_controller *mbox,
unsigned int instance,
unsigned int channel)
{
struct sti_channel *chan_info;
int i;
for (i = 0; i < mbox->num_chans; i++) {
chan_info = mbox->chans[i].con_priv;
if (chan_info &&
chan_info->instance == instance &&
chan_info->channel == channel)
return &mbox->chans[i];
}
dev_err(mbox->dev,
"Channel not registered: instance: %d channel: %d\n",
instance, channel);
return NULL;
}
static void sti_mbox_enable_channel(struct mbox_chan *chan)
{
struct sti_channel *chan_info = chan->con_priv;
struct sti_mbox_device *mdev = chan_info->mdev;
unsigned int instance = chan_info->instance;
unsigned int channel = chan_info->channel;
unsigned long flags;
void __iomem *base = MBOX_BASE(mdev, instance);
spin_lock_irqsave(&mdev->lock, flags);
mdev->enabled[instance] |= BIT(channel);
writel_relaxed(BIT(channel), base + STI_ENA_SET_OFFSET);
spin_unlock_irqrestore(&mdev->lock, flags);
}
static void sti_mbox_disable_channel(struct mbox_chan *chan)
{
struct sti_channel *chan_info = chan->con_priv;
struct sti_mbox_device *mdev = chan_info->mdev;
unsigned int instance = chan_info->instance;
unsigned int channel = chan_info->channel;
unsigned long flags;
void __iomem *base = MBOX_BASE(mdev, instance);
spin_lock_irqsave(&mdev->lock, flags);
mdev->enabled[instance] &= ~BIT(channel);
writel_relaxed(BIT(channel), base + STI_ENA_CLR_OFFSET);
spin_unlock_irqrestore(&mdev->lock, flags);
}
static void sti_mbox_clear_irq(struct mbox_chan *chan)
{
struct sti_channel *chan_info = chan->con_priv;
struct sti_mbox_device *mdev = chan_info->mdev;
unsigned int instance = chan_info->instance;
unsigned int channel = chan_info->channel;
void __iomem *base = MBOX_BASE(mdev, instance);
writel_relaxed(BIT(channel), base + STI_IRQ_CLR_OFFSET);
}
static struct mbox_chan *sti_mbox_irq_to_channel(struct sti_mbox_device *mdev,
unsigned int instance)
{
struct mbox_controller *mbox = mdev->mbox;
struct mbox_chan *chan = NULL;
unsigned int channel;
unsigned long bits;
void __iomem *base = MBOX_BASE(mdev, instance);
bits = readl_relaxed(base + STI_IRQ_VAL_OFFSET);
if (!bits)
/* No IRQs fired in specified instance */
return NULL;
/* An IRQ has fired, find the associated channel */
for (channel = 0; bits; channel++) {
if (!test_and_clear_bit(channel, &bits))
continue;
chan = sti_mbox_to_channel(mbox, instance, channel);
if (chan) {
dev_dbg(mbox->dev,
"IRQ fired on instance: %d channel: %d\n",
instance, channel);
break;
}
}
return chan;
}
static irqreturn_t sti_mbox_thread_handler(int irq, void *data)
{
struct sti_mbox_device *mdev = data;
struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
struct mbox_chan *chan;
unsigned int instance;
for (instance = 0; instance < pdata->num_inst; instance++) {
keep_looking:
chan = sti_mbox_irq_to_channel(mdev, instance);
if (!chan)
continue;
mbox_chan_received_data(chan, NULL);
sti_mbox_clear_irq(chan);
sti_mbox_enable_channel(chan);
goto keep_looking;
}
return IRQ_HANDLED;
}
static irqreturn_t sti_mbox_irq_handler(int irq, void *data)
{
struct sti_mbox_device *mdev = data;
struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
struct sti_channel *chan_info;
struct mbox_chan *chan;
unsigned int instance;
int ret = IRQ_NONE;
for (instance = 0; instance < pdata->num_inst; instance++) {
chan = sti_mbox_irq_to_channel(mdev, instance);
if (!chan)
continue;
chan_info = chan->con_priv;
if (!sti_mbox_channel_is_enabled(chan)) {
dev_warn(mdev->dev,
"Unexpected IRQ: %s\n"
" instance: %d: channel: %d [enabled: %x]\n",
mdev->name, chan_info->instance,
chan_info->channel, mdev->enabled[instance]);
/* Only handle IRQ if no other valid IRQs were found */
if (ret == IRQ_NONE)
ret = IRQ_HANDLED;
continue;
}
sti_mbox_disable_channel(chan);
ret = IRQ_WAKE_THREAD;
}
if (ret == IRQ_NONE)
dev_err(mdev->dev, "Spurious IRQ - was a channel requested?\n");
return ret;
}
static bool sti_mbox_tx_is_ready(struct mbox_chan *chan)
{
struct sti_channel *chan_info = chan->con_priv;
struct sti_mbox_device *mdev = chan_info->mdev;
unsigned int instance = chan_info->instance;
unsigned int channel = chan_info->channel;
void __iomem *base = MBOX_BASE(mdev, instance);
if (!(readl_relaxed(base + STI_ENA_VAL_OFFSET) & BIT(channel))) {
dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d disabled\n",
mdev->name, instance, channel);
return false;
}
if (readl_relaxed(base + STI_IRQ_VAL_OFFSET) & BIT(channel)) {
dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d not ready\n",
mdev->name, instance, channel);
return false;
}
return true;
}
static int sti_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct sti_channel *chan_info = chan->con_priv;
struct sti_mbox_device *mdev = chan_info->mdev;
unsigned int instance = chan_info->instance;
unsigned int channel = chan_info->channel;
void __iomem *base = MBOX_BASE(mdev, instance);
/* Send event to co-processor */
writel_relaxed(BIT(channel), base + STI_IRQ_SET_OFFSET);
dev_dbg(mdev->dev,
"Sent via Mailbox %s: instance: %d channel: %d\n",
mdev->name, instance, channel);
return 0;
}
static int sti_mbox_startup_chan(struct mbox_chan *chan)
{
sti_mbox_clear_irq(chan);
sti_mbox_enable_channel(chan);
return 0;
}
static void sti_mbox_shutdown_chan(struct mbox_chan *chan)
{
struct sti_channel *chan_info = chan->con_priv;
struct mbox_controller *mbox = chan_info->mdev->mbox;
int i;
for (i = 0; i < mbox->num_chans; i++)
if (chan == &mbox->chans[i])
break;
if (mbox->num_chans == i) {
dev_warn(mbox->dev, "Request to free non-existent channel\n");
return;
}
/* Reset channel */
sti_mbox_disable_channel(chan);
sti_mbox_clear_irq(chan);
chan->con_priv = NULL;
}
static struct mbox_chan *sti_mbox_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *spec)
{
struct sti_mbox_device *mdev = dev_get_drvdata(mbox->dev);
struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
struct sti_channel *chan_info;
struct mbox_chan *chan = NULL;
unsigned int instance = spec->args[0];
unsigned int channel = spec->args[1];
int i;
/* Bounds checking */
if (instance >= pdata->num_inst || channel >= pdata->num_chan) {
dev_err(mbox->dev,
"Invalid channel requested instance: %d channel: %d\n",
instance, channel);
return ERR_PTR(-EINVAL);
}
for (i = 0; i < mbox->num_chans; i++) {
chan_info = mbox->chans[i].con_priv;
/* Is requested channel free? */
if (chan_info &&
mbox->dev == chan_info->mdev->dev &&
instance == chan_info->instance &&
channel == chan_info->channel) {
dev_err(mbox->dev, "Channel in use\n");
return ERR_PTR(-EBUSY);
}
/*
* Find the first free slot, then continue checking
* to see if requested channel is in use
*/
if (!chan && !chan_info)
chan = &mbox->chans[i];
}
if (!chan) {
dev_err(mbox->dev, "No free channels left\n");
return ERR_PTR(-EBUSY);
}
chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
if (!chan_info)
return ERR_PTR(-ENOMEM);
chan_info->mdev = mdev;
chan_info->instance = instance;
chan_info->channel = channel;
chan->con_priv = chan_info;
dev_info(mbox->dev,
"Mbox: %s: Created channel: instance: %d channel: %d\n",
mdev->name, instance, channel);
return chan;
}
static const struct mbox_chan_ops sti_mbox_ops = {
.startup = sti_mbox_startup_chan,
.shutdown = sti_mbox_shutdown_chan,
.send_data = sti_mbox_send_data,
.last_tx_done = sti_mbox_tx_is_ready,
};
static const struct sti_mbox_pdata mbox_stih407_pdata = {
.num_inst = 4,
.num_chan = 32,
};
static const struct of_device_id sti_mailbox_match[] = {
{
.compatible = "st,stih407-mailbox",
.data = (void *)&mbox_stih407_pdata
},
{ }
};
MODULE_DEVICE_TABLE(of, sti_mailbox_match);
static int sti_mbox_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct mbox_controller *mbox;
struct sti_mbox_device *mdev;
struct device_node *np = pdev->dev.of_node;
struct mbox_chan *chans;
int irq;
int ret;
match = of_match_device(sti_mailbox_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "No configuration found\n");
return -ENODEV;
}
pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data;
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
platform_set_drvdata(pdev, mdev);
mdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->base))
return PTR_ERR(mdev->base);
ret = of_property_read_string(np, "mbox-name", &mdev->name);
if (ret)
mdev->name = np->full_name;
mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
chans = devm_kcalloc(&pdev->dev,
STI_MBOX_CHAN_MAX, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mdev->dev = &pdev->dev;
mdev->mbox = mbox;
spin_lock_init(&mdev->lock);
/* STi Mailbox does not have a Tx-Done or Tx-Ready IRQ */
mbox->txdone_irq = false;
mbox->txdone_poll = true;
mbox->txpoll_period = 100;
mbox->ops = &sti_mbox_ops;
mbox->dev = mdev->dev;
mbox->of_xlate = sti_mbox_xlate;
mbox->chans = chans;
mbox->num_chans = STI_MBOX_CHAN_MAX;
ret = devm_mbox_controller_register(&pdev->dev, mbox);
if (ret)
return ret;
/* It's okay for Tx Mailboxes to not supply IRQs */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_info(&pdev->dev,
"%s: Registered Tx only Mailbox\n", mdev->name);
return 0;
}
ret = devm_request_threaded_irq(&pdev->dev, irq,
sti_mbox_irq_handler,
sti_mbox_thread_handler,
IRQF_ONESHOT, mdev->name, mdev);
if (ret) {
dev_err(&pdev->dev, "Can't claim IRQ %d\n", irq);
return -EINVAL;
}
dev_info(&pdev->dev, "%s: Registered Tx/Rx Mailbox\n", mdev->name);
return 0;
}
static struct platform_driver sti_mbox_driver = {
.probe = sti_mbox_probe,
.driver = {
.name = "sti-mailbox",
.of_match_table = sti_mailbox_match,
},
};
module_platform_driver(sti_mbox_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("STMicroelectronics Mailbox Controller");
MODULE_AUTHOR("Lee Jones <[email protected]");
MODULE_ALIAS("platform:mailbox-sti");
|
linux-master
|
drivers/mailbox/mailbox-sti.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, Linaro Ltd
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mailbox_controller.h>
#define QCOM_APCS_IPC_BITS 32
struct qcom_apcs_ipc {
struct mbox_controller mbox;
struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
struct regmap *regmap;
unsigned long offset;
struct platform_device *clk;
};
struct qcom_apcs_ipc_data {
int offset;
char *clk_name;
};
static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
.offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
};
static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
.offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
};
static const struct qcom_apcs_ipc_data msm8994_apcs_data = {
.offset = 8, .clk_name = NULL
};
static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
.offset = 16, .clk_name = "qcom-apcs-msm8996-clk"
};
static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
.offset = 12, .clk_name = NULL
};
static const struct qcom_apcs_ipc_data sdx55_apcs_data = {
.offset = 0x1008, .clk_name = "qcom-sdx55-acps-clk"
};
static const struct regmap_config apcs_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1008,
.fast_io = true,
};
static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
{
struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
struct qcom_apcs_ipc, mbox);
unsigned long idx = (unsigned long)chan->con_priv;
return regmap_write(apcs->regmap, apcs->offset, BIT(idx));
}
static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
.send_data = qcom_apcs_ipc_send_data,
};
static int qcom_apcs_ipc_probe(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs;
const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
void __iomem *base;
unsigned long i;
int ret;
apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
if (!apcs)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(&pdev->dev, base, &apcs_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
apcs_data = of_device_get_match_data(&pdev->dev);
apcs->regmap = regmap;
apcs->offset = apcs_data->offset;
/* Initialize channel identifiers */
for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
apcs->mbox_chans[i].con_priv = (void *)i;
apcs->mbox.dev = &pdev->dev;
apcs->mbox.ops = &qcom_apcs_ipc_ops;
apcs->mbox.chans = apcs->mbox_chans;
apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
ret = devm_mbox_controller_register(&pdev->dev, &apcs->mbox);
if (ret) {
dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
return ret;
}
if (apcs_data->clk_name) {
apcs->clk = platform_device_register_data(&pdev->dev,
apcs_data->clk_name,
PLATFORM_DEVID_AUTO,
NULL, 0);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
platform_set_drvdata(pdev, apcs);
return 0;
}
static int qcom_apcs_ipc_remove(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
struct platform_device *clk = apcs->clk;
platform_device_unregister(clk);
return 0;
}
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
/* Do not add any more entries using existing driver data */
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
static struct platform_driver qcom_apcs_ipc_driver = {
.probe = qcom_apcs_ipc_probe,
.remove = qcom_apcs_ipc_remove,
.driver = {
.name = "qcom_apcs_ipc",
.of_match_table = qcom_apcs_ipc_of_match,
},
};
static int __init qcom_apcs_ipc_init(void)
{
return platform_driver_register(&qcom_apcs_ipc_driver);
}
postcore_initcall(qcom_apcs_ipc_init);
static void __exit qcom_apcs_ipc_exit(void)
{
platform_driver_unregister(&qcom_apcs_ipc_driver);
}
module_exit(qcom_apcs_ipc_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
|
linux-master
|
drivers/mailbox/qcom-apcs-ipc-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hisilicon's Hi6220 mailbox driver
*
* Copyright (c) 2015 HiSilicon Limited.
* Copyright (c) 2015 Linaro Limited.
*
* Author: Leo Yan <[email protected]>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define MBOX_CHAN_MAX 32
#define MBOX_TX 0x1
/* Mailbox message length: 8 words */
#define MBOX_MSG_LEN 8
/* Mailbox Registers */
#define MBOX_OFF(m) (0x40 * (m))
#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x0)
#define MBOX_DATA_REG(m) (MBOX_OFF(m) + 0x4)
#define MBOX_STATE_MASK (0xF << 4)
#define MBOX_STATE_IDLE (0x1 << 4)
#define MBOX_STATE_TX (0x2 << 4)
#define MBOX_STATE_RX (0x4 << 4)
#define MBOX_STATE_ACK (0x8 << 4)
#define MBOX_ACK_CONFIG_MASK (0x1 << 0)
#define MBOX_ACK_AUTOMATIC (0x1 << 0)
#define MBOX_ACK_IRQ (0x0 << 0)
/* IPC registers */
#define ACK_INT_RAW_REG(i) ((i) + 0x400)
#define ACK_INT_MSK_REG(i) ((i) + 0x404)
#define ACK_INT_STAT_REG(i) ((i) + 0x408)
#define ACK_INT_CLR_REG(i) ((i) + 0x40c)
#define ACK_INT_ENA_REG(i) ((i) + 0x500)
#define ACK_INT_DIS_REG(i) ((i) + 0x504)
#define DST_INT_RAW_REG(i) ((i) + 0x420)
struct hi6220_mbox_chan {
/*
* Description for channel's hardware info:
* - direction: tx or rx
* - dst irq: peer core's irq number
* - ack irq: local irq number
* - slot number
*/
unsigned int dir, dst_irq, ack_irq;
unsigned int slot;
struct hi6220_mbox *parent;
};
struct hi6220_mbox {
struct device *dev;
int irq;
/* flag of enabling tx's irq mode */
bool tx_irq_mode;
/* region for ipc event */
void __iomem *ipc;
/* region for mailbox */
void __iomem *base;
unsigned int chan_num;
struct hi6220_mbox_chan *mchan;
void *irq_map_chan[MBOX_CHAN_MAX];
struct mbox_chan *chan;
struct mbox_controller controller;
};
static void mbox_set_state(struct hi6220_mbox *mbox,
unsigned int slot, u32 val)
{
u32 status;
status = readl(mbox->base + MBOX_MODE_REG(slot));
status = (status & ~MBOX_STATE_MASK) | val;
writel(status, mbox->base + MBOX_MODE_REG(slot));
}
static void mbox_set_mode(struct hi6220_mbox *mbox,
unsigned int slot, u32 val)
{
u32 mode;
mode = readl(mbox->base + MBOX_MODE_REG(slot));
mode = (mode & ~MBOX_ACK_CONFIG_MASK) | val;
writel(mode, mbox->base + MBOX_MODE_REG(slot));
}
static bool hi6220_mbox_last_tx_done(struct mbox_chan *chan)
{
struct hi6220_mbox_chan *mchan = chan->con_priv;
struct hi6220_mbox *mbox = mchan->parent;
u32 state;
/* Only set idle state for polling mode */
BUG_ON(mbox->tx_irq_mode);
state = readl(mbox->base + MBOX_MODE_REG(mchan->slot));
return ((state & MBOX_STATE_MASK) == MBOX_STATE_IDLE);
}
static int hi6220_mbox_send_data(struct mbox_chan *chan, void *msg)
{
struct hi6220_mbox_chan *mchan = chan->con_priv;
struct hi6220_mbox *mbox = mchan->parent;
unsigned int slot = mchan->slot;
u32 *buf = msg;
int i;
/* indicate as a TX channel */
mchan->dir = MBOX_TX;
mbox_set_state(mbox, slot, MBOX_STATE_TX);
if (mbox->tx_irq_mode)
mbox_set_mode(mbox, slot, MBOX_ACK_IRQ);
else
mbox_set_mode(mbox, slot, MBOX_ACK_AUTOMATIC);
for (i = 0; i < MBOX_MSG_LEN; i++)
writel(buf[i], mbox->base + MBOX_DATA_REG(slot) + i * 4);
/* trigger remote request */
writel(BIT(mchan->dst_irq), DST_INT_RAW_REG(mbox->ipc));
return 0;
}
static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
{
struct hi6220_mbox *mbox = p;
struct hi6220_mbox_chan *mchan;
struct mbox_chan *chan;
unsigned int state, intr_bit, i;
u32 msg[MBOX_MSG_LEN];
state = readl(ACK_INT_STAT_REG(mbox->ipc));
if (!state) {
dev_warn(mbox->dev, "%s: spurious interrupt\n",
__func__);
return IRQ_HANDLED;
}
while (state) {
intr_bit = __ffs(state);
state &= (state - 1);
chan = mbox->irq_map_chan[intr_bit];
if (!chan) {
dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
__func__, intr_bit);
continue;
}
mchan = chan->con_priv;
if (mchan->dir == MBOX_TX)
mbox_chan_txdone(chan, 0);
else {
for (i = 0; i < MBOX_MSG_LEN; i++)
msg[i] = readl(mbox->base +
MBOX_DATA_REG(mchan->slot) + i * 4);
mbox_chan_received_data(chan, (void *)msg);
}
/* clear IRQ source */
writel(BIT(mchan->ack_irq), ACK_INT_CLR_REG(mbox->ipc));
mbox_set_state(mbox, mchan->slot, MBOX_STATE_IDLE);
}
return IRQ_HANDLED;
}
static int hi6220_mbox_startup(struct mbox_chan *chan)
{
struct hi6220_mbox_chan *mchan = chan->con_priv;
struct hi6220_mbox *mbox = mchan->parent;
mchan->dir = 0;
/* enable interrupt */
writel(BIT(mchan->ack_irq), ACK_INT_ENA_REG(mbox->ipc));
return 0;
}
static void hi6220_mbox_shutdown(struct mbox_chan *chan)
{
struct hi6220_mbox_chan *mchan = chan->con_priv;
struct hi6220_mbox *mbox = mchan->parent;
/* disable interrupt */
writel(BIT(mchan->ack_irq), ACK_INT_DIS_REG(mbox->ipc));
mbox->irq_map_chan[mchan->ack_irq] = NULL;
}
static const struct mbox_chan_ops hi6220_mbox_ops = {
.send_data = hi6220_mbox_send_data,
.startup = hi6220_mbox_startup,
.shutdown = hi6220_mbox_shutdown,
.last_tx_done = hi6220_mbox_last_tx_done,
};
static struct mbox_chan *hi6220_mbox_xlate(struct mbox_controller *controller,
const struct of_phandle_args *spec)
{
struct hi6220_mbox *mbox = dev_get_drvdata(controller->dev);
struct hi6220_mbox_chan *mchan;
struct mbox_chan *chan;
unsigned int i = spec->args[0];
unsigned int dst_irq = spec->args[1];
unsigned int ack_irq = spec->args[2];
/* Bounds checking */
if (i >= mbox->chan_num || dst_irq >= mbox->chan_num ||
ack_irq >= mbox->chan_num) {
dev_err(mbox->dev,
"Invalid channel idx %d dst_irq %d ack_irq %d\n",
i, dst_irq, ack_irq);
return ERR_PTR(-EINVAL);
}
/* Is requested channel free? */
chan = &mbox->chan[i];
if (mbox->irq_map_chan[ack_irq] == (void *)chan) {
dev_err(mbox->dev, "Channel in use\n");
return ERR_PTR(-EBUSY);
}
mchan = chan->con_priv;
mchan->dst_irq = dst_irq;
mchan->ack_irq = ack_irq;
mbox->irq_map_chan[ack_irq] = (void *)chan;
return chan;
}
static const struct of_device_id hi6220_mbox_of_match[] = {
{ .compatible = "hisilicon,hi6220-mbox", },
{},
};
MODULE_DEVICE_TABLE(of, hi6220_mbox_of_match);
static int hi6220_mbox_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct hi6220_mbox *mbox;
int i, err;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
mbox->dev = dev;
mbox->chan_num = MBOX_CHAN_MAX;
mbox->mchan = devm_kcalloc(dev,
mbox->chan_num, sizeof(*mbox->mchan), GFP_KERNEL);
if (!mbox->mchan)
return -ENOMEM;
mbox->chan = devm_kcalloc(dev,
mbox->chan_num, sizeof(*mbox->chan), GFP_KERNEL);
if (!mbox->chan)
return -ENOMEM;
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq < 0)
return mbox->irq;
mbox->ipc = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->ipc)) {
dev_err(dev, "ioremap ipc failed\n");
return PTR_ERR(mbox->ipc);
}
mbox->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mbox->base)) {
dev_err(dev, "ioremap buffer failed\n");
return PTR_ERR(mbox->base);
}
err = devm_request_irq(dev, mbox->irq, hi6220_mbox_interrupt, 0,
dev_name(dev), mbox);
if (err) {
dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
err);
return -ENODEV;
}
mbox->controller.dev = dev;
mbox->controller.chans = &mbox->chan[0];
mbox->controller.num_chans = mbox->chan_num;
mbox->controller.ops = &hi6220_mbox_ops;
mbox->controller.of_xlate = hi6220_mbox_xlate;
for (i = 0; i < mbox->chan_num; i++) {
mbox->chan[i].con_priv = &mbox->mchan[i];
mbox->irq_map_chan[i] = NULL;
mbox->mchan[i].parent = mbox;
mbox->mchan[i].slot = i;
}
/* mask and clear all interrupt vectors */
writel(0x0, ACK_INT_MSK_REG(mbox->ipc));
writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
/* use interrupt for tx's ack */
mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq");
if (mbox->tx_irq_mode)
mbox->controller.txdone_irq = true;
else {
mbox->controller.txdone_poll = true;
mbox->controller.txpoll_period = 5;
}
err = devm_mbox_controller_register(dev, &mbox->controller);
if (err) {
dev_err(dev, "Failed to register mailbox %d\n", err);
return err;
}
platform_set_drvdata(pdev, mbox);
dev_info(dev, "Mailbox enabled\n");
return 0;
}
static struct platform_driver hi6220_mbox_driver = {
.driver = {
.name = "hi6220-mbox",
.of_match_table = hi6220_mbox_of_match,
},
.probe = hi6220_mbox_probe,
};
static int __init hi6220_mbox_init(void)
{
return platform_driver_register(&hi6220_mbox_driver);
}
core_initcall(hi6220_mbox_init);
static void __exit hi6220_mbox_exit(void)
{
platform_driver_unregister(&hi6220_mbox_driver);
}
module_exit(hi6220_mbox_exit);
MODULE_AUTHOR("Leo Yan <[email protected]>");
MODULE_DESCRIPTION("Hi6220 mailbox driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/hi6220-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
* Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
*
* Contact: Hiroshi DOYU <[email protected]>
* Suman Anna <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/omap-mailbox.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include "mailbox.h"
#define MAILBOX_REVISION 0x000
#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
OMAP2_MAILBOX_IRQSTATUS(u))
#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
OMAP2_MAILBOX_IRQENABLE(u))
#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
: OMAP2_MAILBOX_IRQENABLE(u))
#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
/* Interrupt register configuration types */
#define MBOX_INTR_CFG_TYPE1 0
#define MBOX_INTR_CFG_TYPE2 1
struct omap_mbox_fifo {
unsigned long msg;
unsigned long fifo_stat;
unsigned long msg_stat;
unsigned long irqenable;
unsigned long irqstatus;
unsigned long irqdisable;
u32 intr_bit;
};
struct omap_mbox_queue {
spinlock_t lock;
struct kfifo fifo;
struct work_struct work;
struct omap_mbox *mbox;
bool full;
};
struct omap_mbox_match_data {
u32 intr_type;
};
struct omap_mbox_device {
struct device *dev;
struct mutex cfg_lock;
void __iomem *mbox_base;
u32 *irq_ctx;
u32 num_users;
u32 num_fifos;
u32 intr_type;
struct omap_mbox **mboxes;
struct mbox_controller controller;
struct list_head elem;
};
struct omap_mbox_fifo_info {
int tx_id;
int tx_usr;
int tx_irq;
int rx_id;
int rx_usr;
int rx_irq;
const char *name;
bool send_no_irq;
};
struct omap_mbox {
const char *name;
int irq;
struct omap_mbox_queue *rxq;
struct device *dev;
struct omap_mbox_device *parent;
struct omap_mbox_fifo tx_fifo;
struct omap_mbox_fifo rx_fifo;
u32 intr_type;
struct mbox_chan *chan;
bool send_no_irq;
};
/* global variables for the mailbox devices */
static DEFINE_MUTEX(omap_mbox_devices_lock);
static LIST_HEAD(omap_mbox_devices);
static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
{
if (!chan || !chan->con_priv)
return NULL;
return (struct omap_mbox *)chan->con_priv;
}
static inline
unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
{
return __raw_readl(mdev->mbox_base + ofs);
}
static inline
void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
{
__raw_writel(val, mdev->mbox_base + ofs);
}
/* Mailbox FIFO handle functions */
static u32 mbox_fifo_read(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
return mbox_read_reg(mbox->parent, fifo->msg);
}
static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg)
{
struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
mbox_write_reg(mbox->parent, msg, fifo->msg);
}
static int mbox_fifo_empty(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
}
static int mbox_fifo_full(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
return mbox_read_reg(mbox->parent, fifo->fifo_stat);
}
/* Mailbox IRQ handle functions */
static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqstatus = fifo->irqstatus;
mbox_write_reg(mbox->parent, bit, irqstatus);
/* Flush posted write for irq status to avoid spurious interrupts */
mbox_read_reg(mbox->parent, irqstatus);
}
static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqenable = fifo->irqenable;
u32 irqstatus = fifo->irqstatus;
u32 enable = mbox_read_reg(mbox->parent, irqenable);
u32 status = mbox_read_reg(mbox->parent, irqstatus);
return (int)(enable & status & bit);
}
static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
u32 l;
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqenable = fifo->irqenable;
l = mbox_read_reg(mbox->parent, irqenable);
l |= bit;
mbox_write_reg(mbox->parent, l, irqenable);
}
static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqdisable = fifo->irqdisable;
/*
* Read and update the interrupt configuration register for pre-OMAP4.
* OMAP4 and later SoCs have a dedicated interrupt disabling register.
*/
if (!mbox->intr_type)
bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
mbox_write_reg(mbox->parent, bit, irqdisable);
}
void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
{
struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
if (WARN_ON(!mbox))
return;
_omap_mbox_enable_irq(mbox, irq);
}
EXPORT_SYMBOL(omap_mbox_enable_irq);
void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
{
struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
if (WARN_ON(!mbox))
return;
_omap_mbox_disable_irq(mbox, irq);
}
EXPORT_SYMBOL(omap_mbox_disable_irq);
/*
* Message receiver(workqueue)
*/
static void mbox_rx_work(struct work_struct *work)
{
struct omap_mbox_queue *mq =
container_of(work, struct omap_mbox_queue, work);
mbox_msg_t data;
u32 msg;
int len;
while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
data = msg;
mbox_chan_received_data(mq->mbox->chan, (void *)data);
spin_lock_irq(&mq->lock);
if (mq->full) {
mq->full = false;
_omap_mbox_enable_irq(mq->mbox, IRQ_RX);
}
spin_unlock_irq(&mq->lock);
}
}
/*
* Mailbox interrupt handler
*/
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
_omap_mbox_disable_irq(mbox, IRQ_TX);
ack_mbox_irq(mbox, IRQ_TX);
mbox_chan_txdone(mbox->chan, 0);
}
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
struct omap_mbox_queue *mq = mbox->rxq;
u32 msg;
int len;
while (!mbox_fifo_empty(mbox)) {
if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
_omap_mbox_disable_irq(mbox, IRQ_RX);
mq->full = true;
goto nomem;
}
msg = mbox_fifo_read(mbox);
len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
}
/* no more messages in the fifo. clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
nomem:
schedule_work(&mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
{
struct omap_mbox *mbox = p;
if (is_mbox_irq(mbox, IRQ_TX))
__mbox_tx_interrupt(mbox);
if (is_mbox_irq(mbox, IRQ_RX))
__mbox_rx_interrupt(mbox);
return IRQ_HANDLED;
}
static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
void (*work)(struct work_struct *))
{
struct omap_mbox_queue *mq;
if (!work)
return NULL;
mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
return NULL;
spin_lock_init(&mq->lock);
if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
goto error;
INIT_WORK(&mq->work, work);
return mq;
error:
kfree(mq);
return NULL;
}
static void mbox_queue_free(struct omap_mbox_queue *q)
{
kfifo_free(&q->fifo);
kfree(q);
}
static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox_queue *mq;
mq = mbox_queue_alloc(mbox, mbox_rx_work);
if (!mq)
return -ENOMEM;
mbox->rxq = mq;
mq->mbox = mbox;
ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
mbox->name, mbox);
if (unlikely(ret)) {
pr_err("failed to register mailbox interrupt:%d\n", ret);
goto fail_request_irq;
}
if (mbox->send_no_irq)
mbox->chan->txdone_method = TXDONE_BY_ACK;
_omap_mbox_enable_irq(mbox, IRQ_RX);
return 0;
fail_request_irq:
mbox_queue_free(mbox->rxq);
return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
_omap_mbox_disable_irq(mbox, IRQ_RX);
free_irq(mbox->irq, mbox);
flush_work(&mbox->rxq->work);
mbox_queue_free(mbox->rxq);
}
static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
const char *mbox_name)
{
struct omap_mbox *_mbox, *mbox = NULL;
struct omap_mbox **mboxes = mdev->mboxes;
int i;
if (!mboxes)
return NULL;
for (i = 0; (_mbox = mboxes[i]); i++) {
if (!strcmp(_mbox->name, mbox_name)) {
mbox = _mbox;
break;
}
}
return mbox;
}
struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
const char *chan_name)
{
struct device *dev = cl->dev;
struct omap_mbox *mbox = NULL;
struct omap_mbox_device *mdev;
int ret;
if (!dev)
return ERR_PTR(-ENODEV);
if (dev->of_node) {
pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
__func__);
return ERR_PTR(-ENODEV);
}
mutex_lock(&omap_mbox_devices_lock);
list_for_each_entry(mdev, &omap_mbox_devices, elem) {
mbox = omap_mbox_device_find(mdev, chan_name);
if (mbox)
break;
}
mutex_unlock(&omap_mbox_devices_lock);
if (!mbox || !mbox->chan)
return ERR_PTR(-ENOENT);
ret = mbox_bind_client(mbox->chan, cl);
if (ret)
return ERR_PTR(ret);
return mbox->chan;
}
EXPORT_SYMBOL(omap_mbox_request_channel);
static struct class omap_mbox_class = { .name = "mbox", };
static int omap_mbox_register(struct omap_mbox_device *mdev)
{
int ret;
int i;
struct omap_mbox **mboxes;
if (!mdev || !mdev->mboxes)
return -EINVAL;
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++) {
struct omap_mbox *mbox = mboxes[i];
mbox->dev = device_create(&omap_mbox_class, mdev->dev,
0, mbox, "%s", mbox->name);
if (IS_ERR(mbox->dev)) {
ret = PTR_ERR(mbox->dev);
goto err_out;
}
}
mutex_lock(&omap_mbox_devices_lock);
list_add(&mdev->elem, &omap_mbox_devices);
mutex_unlock(&omap_mbox_devices_lock);
ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
err_out:
if (ret) {
while (i--)
device_unregister(mboxes[i]->dev);
}
return ret;
}
static int omap_mbox_unregister(struct omap_mbox_device *mdev)
{
int i;
struct omap_mbox **mboxes;
if (!mdev || !mdev->mboxes)
return -EINVAL;
mutex_lock(&omap_mbox_devices_lock);
list_del(&mdev->elem);
mutex_unlock(&omap_mbox_devices_lock);
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
return 0;
}
static int omap_mbox_chan_startup(struct mbox_chan *chan)
{
struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
struct omap_mbox_device *mdev = mbox->parent;
int ret = 0;
mutex_lock(&mdev->cfg_lock);
pm_runtime_get_sync(mdev->dev);
ret = omap_mbox_startup(mbox);
if (ret)
pm_runtime_put_sync(mdev->dev);
mutex_unlock(&mdev->cfg_lock);
return ret;
}
static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
{
struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
struct omap_mbox_device *mdev = mbox->parent;
mutex_lock(&mdev->cfg_lock);
omap_mbox_fini(mbox);
pm_runtime_put_sync(mdev->dev);
mutex_unlock(&mdev->cfg_lock);
}
static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
{
int ret = -EBUSY;
if (!mbox_fifo_full(mbox)) {
_omap_mbox_enable_irq(mbox, IRQ_RX);
mbox_fifo_write(mbox, msg);
ret = 0;
_omap_mbox_disable_irq(mbox, IRQ_RX);
/* we must read and ack the interrupt directly from here */
mbox_fifo_read(mbox);
ack_mbox_irq(mbox, IRQ_RX);
}
return ret;
}
static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
{
int ret = -EBUSY;
if (!mbox_fifo_full(mbox)) {
mbox_fifo_write(mbox, msg);
ret = 0;
}
/* always enable the interrupt */
_omap_mbox_enable_irq(mbox, IRQ_TX);
return ret;
}
static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
{
struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
int ret;
u32 msg = omap_mbox_message(data);
if (!mbox)
return -EINVAL;
if (mbox->send_no_irq)
ret = omap_mbox_chan_send_noirq(mbox, msg);
else
ret = omap_mbox_chan_send(mbox, msg);
return ret;
}
static const struct mbox_chan_ops omap_mbox_chan_ops = {
.startup = omap_mbox_chan_startup,
.send_data = omap_mbox_chan_send_data,
.shutdown = omap_mbox_chan_shutdown,
};
#ifdef CONFIG_PM_SLEEP
static int omap_mbox_suspend(struct device *dev)
{
struct omap_mbox_device *mdev = dev_get_drvdata(dev);
u32 usr, fifo, reg;
if (pm_runtime_status_suspended(dev))
return 0;
for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
fifo);
return -EBUSY;
}
}
for (usr = 0; usr < mdev->num_users; usr++) {
reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
}
return 0;
}
static int omap_mbox_resume(struct device *dev)
{
struct omap_mbox_device *mdev = dev_get_drvdata(dev);
u32 usr, reg;
if (pm_runtime_status_suspended(dev))
return 0;
for (usr = 0; usr < mdev->num_users; usr++) {
reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
}
return 0;
}
#endif
static const struct dev_pm_ops omap_mbox_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
};
static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
static const struct of_device_id omap_mailbox_of_match[] = {
{
.compatible = "ti,omap2-mailbox",
.data = &omap2_data,
},
{
.compatible = "ti,omap3-mailbox",
.data = &omap2_data,
},
{
.compatible = "ti,omap4-mailbox",
.data = &omap4_data,
},
{
.compatible = "ti,am654-mailbox",
.data = &omap4_data,
},
{
.compatible = "ti,am64-mailbox",
.data = &omap4_data,
},
{
/* end */
},
};
MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
const struct of_phandle_args *sp)
{
phandle phandle = sp->args[0];
struct device_node *node;
struct omap_mbox_device *mdev;
struct omap_mbox *mbox;
mdev = container_of(controller, struct omap_mbox_device, controller);
if (WARN_ON(!mdev))
return ERR_PTR(-EINVAL);
node = of_find_node_by_phandle(phandle);
if (!node) {
pr_err("%s: could not find node phandle 0x%x\n",
__func__, phandle);
return ERR_PTR(-ENODEV);
}
mbox = omap_mbox_device_find(mdev, node->name);
of_node_put(node);
return mbox ? mbox->chan : ERR_PTR(-ENOENT);
}
static int omap_mbox_probe(struct platform_device *pdev)
{
int ret;
struct mbox_chan *chnls;
struct omap_mbox **list, *mbox, *mboxblk;
struct omap_mbox_fifo_info *finfo, *finfoblk;
struct omap_mbox_device *mdev;
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
const struct omap_mbox_match_data *match_data;
u32 intr_type, info_count;
u32 num_users, num_fifos;
u32 tmp[3];
u32 l;
int i;
if (!node) {
pr_err("%s: only DT-based devices are supported\n", __func__);
return -ENODEV;
}
match_data = of_device_get_match_data(&pdev->dev);
if (!match_data)
return -ENODEV;
intr_type = match_data->intr_type;
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
return -ENODEV;
if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
return -ENODEV;
info_count = of_get_available_child_count(node);
if (!info_count) {
dev_err(&pdev->dev, "no available mbox devices found\n");
return -ENODEV;
}
finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
GFP_KERNEL);
if (!finfoblk)
return -ENOMEM;
finfo = finfoblk;
child = NULL;
for (i = 0; i < info_count; i++, finfo++) {
child = of_get_next_available_child(node, child);
ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
ARRAY_SIZE(tmp));
if (ret)
return ret;
finfo->tx_id = tmp[0];
finfo->tx_irq = tmp[1];
finfo->tx_usr = tmp[2];
ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
ARRAY_SIZE(tmp));
if (ret)
return ret;
finfo->rx_id = tmp[0];
finfo->rx_irq = tmp[1];
finfo->rx_usr = tmp[2];
finfo->name = child->name;
finfo->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
return -EINVAL;
}
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
GFP_KERNEL);
if (!mdev->irq_ctx)
return -ENOMEM;
/* allocate one extra for marking end of list */
list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
GFP_KERNEL);
if (!list)
return -ENOMEM;
chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
GFP_KERNEL);
if (!chnls)
return -ENOMEM;
mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
GFP_KERNEL);
if (!mboxblk)
return -ENOMEM;
mbox = mboxblk;
finfo = finfoblk;
for (i = 0; i < info_count; i++, finfo++) {
fifo = &mbox->tx_fifo;
fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
fifo = &mbox->rx_fifo;
fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
mbox->send_no_irq = finfo->send_no_irq;
mbox->intr_type = intr_type;
mbox->parent = mdev;
mbox->name = finfo->name;
mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
if (mbox->irq < 0)
return mbox->irq;
mbox->chan = &chnls[i];
chnls[i].con_priv = mbox;
list[i] = mbox++;
}
mutex_init(&mdev->cfg_lock);
mdev->dev = &pdev->dev;
mdev->num_users = num_users;
mdev->num_fifos = num_fifos;
mdev->intr_type = intr_type;
mdev->mboxes = list;
/*
* OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
* IRQ and is needed to run the Tx state machine
*/
mdev->controller.txdone_irq = true;
mdev->controller.dev = mdev->dev;
mdev->controller.ops = &omap_mbox_chan_ops;
mdev->controller.chans = chnls;
mdev->controller.num_chans = info_count;
mdev->controller.of_xlate = omap_mbox_of_xlate;
ret = omap_mbox_register(mdev);
if (ret)
return ret;
platform_set_drvdata(pdev, mdev);
pm_runtime_enable(mdev->dev);
ret = pm_runtime_resume_and_get(mdev->dev);
if (ret < 0)
goto unregister;
/*
* just print the raw revision register, the format is not
* uniform across all SoCs
*/
l = mbox_read_reg(mdev, MAILBOX_REVISION);
dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
ret = pm_runtime_put_sync(mdev->dev);
if (ret < 0 && ret != -ENOSYS)
goto unregister;
devm_kfree(&pdev->dev, finfoblk);
return 0;
unregister:
pm_runtime_disable(mdev->dev);
omap_mbox_unregister(mdev);
return ret;
}
static int omap_mbox_remove(struct platform_device *pdev)
{
struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
pm_runtime_disable(mdev->dev);
omap_mbox_unregister(mdev);
return 0;
}
static struct platform_driver omap_mbox_driver = {
.probe = omap_mbox_probe,
.remove = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
.pm = &omap_mbox_pm_ops,
.of_match_table = of_match_ptr(omap_mailbox_of_match),
},
};
static int __init omap_mbox_init(void)
{
int err;
err = class_register(&omap_mbox_class);
if (err)
return err;
/* kfifo size sanity check: alignment and minimal size */
mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
err = platform_driver_register(&omap_mbox_driver);
if (err)
class_unregister(&omap_mbox_class);
return err;
}
subsys_initcall(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
platform_driver_unregister(&omap_mbox_driver);
class_unregister(&omap_mbox_class);
}
module_exit(omap_mbox_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
MODULE_AUTHOR("Toshihiro Kobayashi");
MODULE_AUTHOR("Hiroshi DOYU");
|
linux-master
|
drivers/mailbox/omap-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010,2015 Broadcom
* Copyright (C) 2013-2014 Lubomir Rintel
* Copyright (C) 2013 Craig McGeachie
*
* Parts of the driver are based on:
* - arch/arm/mach-bcm2708/vcio.c file written by Gray Girling that was
* obtained from branch "rpi-3.6.y" of git://github.com/raspberrypi/
* linux.git
* - drivers/mailbox/bcm2835-ipc.c by Lubomir Rintel at
* https://github.com/hackerspace/rpi-linux/blob/lr-raspberry-pi/drivers/
* mailbox/bcm2835-ipc.c
* - documentation available on the following web site:
* https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
/* Mailboxes */
#define ARM_0_MAIL0 0x00
#define ARM_0_MAIL1 0x20
/*
* Mailbox registers. We basically only support mailbox 0 & 1. We
* deliver to the VC in mailbox 1, it delivers to us in mailbox 0. See
* BCM2835-ARM-Peripherals.pdf section 1.3 for an explanation about
* the placement of memory barriers.
*/
#define MAIL0_RD (ARM_0_MAIL0 + 0x00)
#define MAIL0_POL (ARM_0_MAIL0 + 0x10)
#define MAIL0_STA (ARM_0_MAIL0 + 0x18)
#define MAIL0_CNF (ARM_0_MAIL0 + 0x1C)
#define MAIL1_WRT (ARM_0_MAIL1 + 0x00)
#define MAIL1_STA (ARM_0_MAIL1 + 0x18)
/* Status register: FIFO state. */
#define ARM_MS_FULL BIT(31)
#define ARM_MS_EMPTY BIT(30)
/* Configuration register: Enable interrupts. */
#define ARM_MC_IHAVEDATAIRQEN BIT(0)
struct bcm2835_mbox {
void __iomem *regs;
spinlock_t lock;
struct mbox_controller controller;
};
static struct bcm2835_mbox *bcm2835_link_mbox(struct mbox_chan *link)
{
return container_of(link->mbox, struct bcm2835_mbox, controller);
}
static irqreturn_t bcm2835_mbox_irq(int irq, void *dev_id)
{
struct bcm2835_mbox *mbox = dev_id;
struct device *dev = mbox->controller.dev;
struct mbox_chan *link = &mbox->controller.chans[0];
while (!(readl(mbox->regs + MAIL0_STA) & ARM_MS_EMPTY)) {
u32 msg = readl(mbox->regs + MAIL0_RD);
dev_dbg(dev, "Reply 0x%08X\n", msg);
mbox_chan_received_data(link, &msg);
}
return IRQ_HANDLED;
}
static int bcm2835_send_data(struct mbox_chan *link, void *data)
{
struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
u32 msg = *(u32 *)data;
spin_lock(&mbox->lock);
writel(msg, mbox->regs + MAIL1_WRT);
dev_dbg(mbox->controller.dev, "Request 0x%08X\n", msg);
spin_unlock(&mbox->lock);
return 0;
}
static int bcm2835_startup(struct mbox_chan *link)
{
struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
/* Enable the interrupt on data reception */
writel(ARM_MC_IHAVEDATAIRQEN, mbox->regs + MAIL0_CNF);
return 0;
}
static void bcm2835_shutdown(struct mbox_chan *link)
{
struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
writel(0, mbox->regs + MAIL0_CNF);
}
static bool bcm2835_last_tx_done(struct mbox_chan *link)
{
struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
bool ret;
spin_lock(&mbox->lock);
ret = !(readl(mbox->regs + MAIL1_STA) & ARM_MS_FULL);
spin_unlock(&mbox->lock);
return ret;
}
static const struct mbox_chan_ops bcm2835_mbox_chan_ops = {
.send_data = bcm2835_send_data,
.startup = bcm2835_startup,
.shutdown = bcm2835_shutdown,
.last_tx_done = bcm2835_last_tx_done
};
static struct mbox_chan *bcm2835_mbox_index_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
if (sp->args_count != 0)
return ERR_PTR(-EINVAL);
return &mbox->chans[0];
}
static int bcm2835_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret = 0;
struct bcm2835_mbox *mbox;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
if (mbox == NULL)
return -ENOMEM;
spin_lock_init(&mbox->lock);
ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
bcm2835_mbox_irq, 0, dev_name(dev), mbox);
if (ret) {
dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
ret);
return -ENODEV;
}
mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
return ret;
}
mbox->controller.txdone_poll = true;
mbox->controller.txpoll_period = 5;
mbox->controller.ops = &bcm2835_mbox_chan_ops;
mbox->controller.of_xlate = &bcm2835_mbox_index_xlate;
mbox->controller.dev = dev;
mbox->controller.num_chans = 1;
mbox->controller.chans = devm_kzalloc(dev,
sizeof(*mbox->controller.chans), GFP_KERNEL);
if (!mbox->controller.chans)
return -ENOMEM;
ret = devm_mbox_controller_register(dev, &mbox->controller);
if (ret)
return ret;
platform_set_drvdata(pdev, mbox);
dev_info(dev, "mailbox enabled\n");
return ret;
}
static const struct of_device_id bcm2835_mbox_of_match[] = {
{ .compatible = "brcm,bcm2835-mbox", },
{},
};
MODULE_DEVICE_TABLE(of, bcm2835_mbox_of_match);
static struct platform_driver bcm2835_mbox_driver = {
.driver = {
.name = "bcm2835-mbox",
.of_match_table = bcm2835_mbox_of_match,
},
.probe = bcm2835_mbox_probe,
};
module_platform_driver(bcm2835_mbox_driver);
MODULE_AUTHOR("Lubomir Rintel <[email protected]>");
MODULE_DESCRIPTION("BCM2835 mailbox IPC driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/bcm2835-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright Altera Corporation (C) 2013-2014. All rights reserved
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "altera-mailbox"
#define MAILBOX_CMD_REG 0x00
#define MAILBOX_PTR_REG 0x04
#define MAILBOX_STS_REG 0x08
#define MAILBOX_INTMASK_REG 0x0C
#define INT_PENDING_MSK 0x1
#define INT_SPACE_MSK 0x2
#define STS_PENDING_MSK 0x1
#define STS_FULL_MSK 0x2
#define STS_FULL_OFT 0x1
#define MBOX_PENDING(status) (((status) & STS_PENDING_MSK))
#define MBOX_FULL(status) (((status) & STS_FULL_MSK) >> STS_FULL_OFT)
enum altera_mbox_msg {
MBOX_CMD = 0,
MBOX_PTR,
};
#define MBOX_POLLING_MS 5 /* polling interval 5ms */
struct altera_mbox {
bool is_sender; /* 1-sender, 0-receiver */
bool intr_mode;
int irq;
void __iomem *mbox_base;
struct device *dev;
struct mbox_controller controller;
/* If the controller supports only RX polling mode */
struct timer_list rxpoll_timer;
struct mbox_chan *chan;
};
static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
{
if (!chan || !chan->con_priv)
return NULL;
return (struct altera_mbox *)chan->con_priv;
}
static inline int altera_mbox_full(struct altera_mbox *mbox)
{
u32 status;
status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
return MBOX_FULL(status);
}
static inline int altera_mbox_pending(struct altera_mbox *mbox)
{
u32 status;
status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
return MBOX_PENDING(status);
}
static void altera_mbox_rx_intmask(struct altera_mbox *mbox, bool enable)
{
u32 mask;
mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
if (enable)
mask |= INT_PENDING_MSK;
else
mask &= ~INT_PENDING_MSK;
writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
}
static void altera_mbox_tx_intmask(struct altera_mbox *mbox, bool enable)
{
u32 mask;
mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
if (enable)
mask |= INT_SPACE_MSK;
else
mask &= ~INT_SPACE_MSK;
writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
}
static bool altera_mbox_is_sender(struct altera_mbox *mbox)
{
u32 reg;
/* Write a magic number to PTR register and read back this register.
* This register is read-write if it is a sender.
*/
#define MBOX_MAGIC 0xA5A5AA55
writel_relaxed(MBOX_MAGIC, mbox->mbox_base + MAILBOX_PTR_REG);
reg = readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
if (reg == MBOX_MAGIC) {
/* Clear to 0 */
writel_relaxed(0, mbox->mbox_base + MAILBOX_PTR_REG);
return true;
}
return false;
}
static void altera_mbox_rx_data(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
u32 data[2];
if (altera_mbox_pending(mbox)) {
data[MBOX_PTR] =
readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
data[MBOX_CMD] =
readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
mbox_chan_received_data(chan, (void *)data);
}
}
static void altera_mbox_poll_rx(struct timer_list *t)
{
struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
altera_mbox_rx_data(mbox->chan);
mod_timer(&mbox->rxpoll_timer,
jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
}
static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = (struct mbox_chan *)p;
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
altera_mbox_tx_intmask(mbox, false);
mbox_chan_txdone(chan, 0);
return IRQ_HANDLED;
}
static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = (struct mbox_chan *)p;
altera_mbox_rx_data(chan);
return IRQ_HANDLED;
}
static int altera_mbox_startup_sender(struct mbox_chan *chan)
{
int ret;
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
if (mbox->intr_mode) {
ret = request_irq(mbox->irq, altera_mbox_tx_interrupt, 0,
DRIVER_NAME, chan);
if (unlikely(ret)) {
dev_err(mbox->dev,
"failed to register mailbox interrupt:%d\n",
ret);
return ret;
}
}
return 0;
}
static int altera_mbox_startup_receiver(struct mbox_chan *chan)
{
int ret;
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
if (mbox->intr_mode) {
ret = request_irq(mbox->irq, altera_mbox_rx_interrupt, 0,
DRIVER_NAME, chan);
if (unlikely(ret)) {
mbox->intr_mode = false;
goto polling; /* use polling if failed */
}
altera_mbox_rx_intmask(mbox, true);
return 0;
}
polling:
/* Setup polling timer */
mbox->chan = chan;
timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0);
mod_timer(&mbox->rxpoll_timer,
jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
return 0;
}
static int altera_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
u32 *udata = (u32 *)data;
if (!mbox || !data)
return -EINVAL;
if (!mbox->is_sender) {
dev_warn(mbox->dev,
"failed to send. This is receiver mailbox.\n");
return -EINVAL;
}
if (altera_mbox_full(mbox))
return -EBUSY;
/* Enable interrupt before send */
if (mbox->intr_mode)
altera_mbox_tx_intmask(mbox, true);
/* Pointer register must write before command register */
writel_relaxed(udata[MBOX_PTR], mbox->mbox_base + MAILBOX_PTR_REG);
writel_relaxed(udata[MBOX_CMD], mbox->mbox_base + MAILBOX_CMD_REG);
return 0;
}
static bool altera_mbox_last_tx_done(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
/* Return false if mailbox is full */
return altera_mbox_full(mbox) ? false : true;
}
static bool altera_mbox_peek_data(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
return altera_mbox_pending(mbox) ? true : false;
}
static int altera_mbox_startup(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
int ret = 0;
if (!mbox)
return -EINVAL;
if (mbox->is_sender)
ret = altera_mbox_startup_sender(chan);
else
ret = altera_mbox_startup_receiver(chan);
return ret;
}
static void altera_mbox_shutdown(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
if (mbox->intr_mode) {
/* Unmask all interrupt masks */
writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
free_irq(mbox->irq, chan);
} else if (!mbox->is_sender) {
del_timer_sync(&mbox->rxpoll_timer);
}
}
static const struct mbox_chan_ops altera_mbox_ops = {
.send_data = altera_mbox_send_data,
.startup = altera_mbox_startup,
.shutdown = altera_mbox_shutdown,
.last_tx_done = altera_mbox_last_tx_done,
.peek_data = altera_mbox_peek_data,
};
static int altera_mbox_probe(struct platform_device *pdev)
{
struct altera_mbox *mbox;
struct mbox_chan *chans;
int ret;
mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox),
GFP_KERNEL);
if (!mbox)
return -ENOMEM;
/* Allocated one channel */
chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
/* Check is it a sender or receiver? */
mbox->is_sender = altera_mbox_is_sender(mbox);
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq >= 0)
mbox->intr_mode = true;
mbox->dev = &pdev->dev;
/* Hardware supports only one channel. */
chans[0].con_priv = mbox;
mbox->controller.dev = mbox->dev;
mbox->controller.num_chans = 1;
mbox->controller.chans = chans;
mbox->controller.ops = &altera_mbox_ops;
if (mbox->is_sender) {
if (mbox->intr_mode) {
mbox->controller.txdone_irq = true;
} else {
mbox->controller.txdone_poll = true;
mbox->controller.txpoll_period = MBOX_POLLING_MS;
}
}
ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
if (ret) {
dev_err(&pdev->dev, "Register mailbox failed\n");
goto err;
}
platform_set_drvdata(pdev, mbox);
err:
return ret;
}
static const struct of_device_id altera_mbox_match[] = {
{ .compatible = "altr,mailbox-1.0" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, altera_mbox_match);
static struct platform_driver altera_mbox_driver = {
.probe = altera_mbox_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = altera_mbox_match,
},
};
module_platform_driver(altera_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Altera mailbox specific functions");
MODULE_AUTHOR("Ley Foon Tan <[email protected]>");
MODULE_ALIAS("platform:altera-mailbox");
|
linux-master
|
drivers/mailbox/mailbox-altera.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Linaro Ltd.
* Author: Ashwin Chaugule <[email protected]>
*
* PCC (Platform Communication Channel) is defined in the ACPI 5.0+
* specification. It is a mailbox like mechanism to allow clients
* such as CPPC (Collaborative Processor Performance Control), RAS
* (Reliability, Availability and Serviceability) and MPST (Memory
* Node Power State Table) to talk to the platform (e.g. BMC) through
* shared memory regions as defined in the PCC table entries. The PCC
* specification supports a Doorbell mechanism for the PCC clients
* to notify the platform about new data. This Doorbell information
* is also specified in each PCC table entry.
*
* Typical high level flow of operation is:
*
* PCC Reads:
* * Client tries to acquire a channel lock.
* * After it is acquired it writes READ cmd in communication region cmd
* address.
* * Client issues mbox_send_message() which rings the PCC doorbell
* for its PCC channel.
* * If command completes, then client has control over channel and
* it can proceed with its reads.
* * Client releases lock.
*
* PCC Writes:
* * Client tries to acquire channel lock.
* * Client writes to its communication region after it acquires a
* channel lock.
* * Client writes WRITE cmd in communication region cmd address.
* * Client issues mbox_send_message() which rings the PCC doorbell
* for its PCC channel.
* * If command completes, then writes have succeeded and it can release
* the channel lock.
*
* There is a Nominal latency defined for each channel which indicates
* how long to wait until a command completes. If command is not complete
* the client needs to retry or assume failure.
*
* For more details about PCC, please see the ACPI specification from
* http://www.uefi.org/ACPIv5.1 Section 14.
*
* This file implements PCC as a Mailbox controller and allows for PCC
* clients to be implemented as its Mailbox Client Channels.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <acpi/pcc.h>
#include "mailbox.h"
#define MBOX_IRQ_NAME "pcc-mbox"
/**
* struct pcc_chan_reg - PCC register bundle
*
* @vaddr: cached virtual address for this register
* @gas: pointer to the generic address structure for this register
* @preserve_mask: bitmask to preserve when writing to this register
* @set_mask: bitmask to set when writing to this register
* @status_mask: bitmask to determine and/or update the status for this register
*/
struct pcc_chan_reg {
void __iomem *vaddr;
struct acpi_generic_address *gas;
u64 preserve_mask;
u64 set_mask;
u64 status_mask;
};
/**
* struct pcc_chan_info - PCC channel specific information
*
* @chan: PCC channel information with Shared Memory Region info
* @db: PCC register bundle for the doorbell register
* @plat_irq_ack: PCC register bundle for the platform interrupt acknowledge
* register
* @cmd_complete: PCC register bundle for the command complete check register
* @cmd_update: PCC register bundle for the command complete update register
* @error: PCC register bundle for the error status register
* @plat_irq: platform interrupt
*/
struct pcc_chan_info {
struct pcc_mbox_chan chan;
struct pcc_chan_reg db;
struct pcc_chan_reg plat_irq_ack;
struct pcc_chan_reg cmd_complete;
struct pcc_chan_reg cmd_update;
struct pcc_chan_reg error;
int plat_irq;
};
#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
static struct pcc_chan_info *chan_info;
static int pcc_chan_count;
/*
* PCC can be used with perf critical drivers such as CPPC
* So it makes sense to locally cache the virtual address and
* use it to read/write to PCC registers such as doorbell register
*
* The below read_register and write_registers are used to read and
* write from perf critical registers such as PCC doorbell register
*/
static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
{
switch (bit_width) {
case 8:
*val = readb(vaddr);
break;
case 16:
*val = readw(vaddr);
break;
case 32:
*val = readl(vaddr);
break;
case 64:
*val = readq(vaddr);
break;
}
}
static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
{
switch (bit_width) {
case 8:
writeb(val, vaddr);
break;
case 16:
writew(val, vaddr);
break;
case 32:
writel(val, vaddr);
break;
case 64:
writeq(val, vaddr);
break;
}
}
static int pcc_chan_reg_read(struct pcc_chan_reg *reg, u64 *val)
{
int ret = 0;
if (!reg->gas) {
*val = 0;
return 0;
}
if (reg->vaddr)
read_register(reg->vaddr, val, reg->gas->bit_width);
else
ret = acpi_read(val, reg->gas);
return ret;
}
static int pcc_chan_reg_write(struct pcc_chan_reg *reg, u64 val)
{
int ret = 0;
if (!reg->gas)
return 0;
if (reg->vaddr)
write_register(reg->vaddr, val, reg->gas->bit_width);
else
ret = acpi_write(val, reg->gas);
return ret;
}
static int pcc_chan_reg_read_modify_write(struct pcc_chan_reg *reg)
{
int ret = 0;
u64 val;
ret = pcc_chan_reg_read(reg, &val);
if (ret)
return ret;
val &= reg->preserve_mask;
val |= reg->set_mask;
return pcc_chan_reg_write(reg, val);
}
/**
* pcc_map_interrupt - Map a PCC subspace GSI to a linux IRQ number
* @interrupt: GSI number.
* @flags: interrupt flags
*
* Returns: a valid linux IRQ number on success
* 0 or -EINVAL on failure
*/
static int pcc_map_interrupt(u32 interrupt, u32 flags)
{
int trigger, polarity;
if (!interrupt)
return 0;
trigger = (flags & ACPI_PCCT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
: ACPI_LEVEL_SENSITIVE;
polarity = (flags & ACPI_PCCT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
: ACPI_ACTIVE_HIGH;
return acpi_register_gsi(NULL, interrupt, trigger, polarity);
}
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
* @p: data/cookie passed from the caller to identify the channel
*
* Returns: IRQ_HANDLED if interrupt is handled or IRQ_NONE if not
*/
static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
u64 val;
int ret;
pchan = chan->con_priv;
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
return IRQ_NONE;
if (val) { /* Ensure GAS exists and value is non-zero */
val &= pchan->cmd_complete.status_mask;
if (!val)
return IRQ_NONE;
}
ret = pcc_chan_reg_read(&pchan->error, &val);
if (ret)
return IRQ_NONE;
val &= pchan->error.status_mask;
if (val) {
val &= ~pchan->error.status_mask;
pcc_chan_reg_write(&pchan->error, val);
return IRQ_NONE;
}
if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
return IRQ_NONE;
mbox_chan_received_data(chan, NULL);
return IRQ_HANDLED;
}
/**
* pcc_mbox_request_channel - PCC clients call this function to
* request a pointer to their PCC subspace, from which they
* can get the details of communicating with the remote.
* @cl: Pointer to Mailbox client, so we know where to bind the
* Channel.
* @subspace_id: The PCC Subspace index as parsed in the PCC client
* ACPI package. This is used to lookup the array of PCC
* subspaces as parsed by the PCC Mailbox controller.
*
* Return: Pointer to the PCC Mailbox Channel if successful or ERR_PTR.
*/
struct pcc_mbox_chan *
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan;
int rc;
if (subspace_id < 0 || subspace_id >= pcc_chan_count)
return ERR_PTR(-ENOENT);
pchan = chan_info + subspace_id;
chan = pchan->chan.mchan;
if (IS_ERR(chan) || chan->cl) {
pr_err("Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
rc = mbox_bind_client(chan, cl);
if (rc)
return ERR_PTR(rc);
return &pchan->chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
/**
* pcc_mbox_free_channel - Clients call this to free their Channel.
*
* @pchan: Pointer to the PCC mailbox channel as returned by
* pcc_mbox_request_channel()
*/
void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
struct mbox_chan *chan = pchan->mchan;
if (!chan || !chan->cl)
return;
mbox_free_channel(chan);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
* OS has control over it. See above for flow of operations.
* @chan: Pointer to Mailbox channel over which to send data.
* @data: Client specific data written over channel. Used here
* only for debug after PCC transaction completes.
*
* Return: Err if something failed else 0 for success.
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
return pcc_chan_reg_read_modify_write(&pchan->db);
}
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
* @chan: Pointer to Mailbox channel to startup.
*
* Return: Err if something failed else 0 for success.
*/
static int pcc_startup(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan = chan->con_priv;
int rc;
if (pchan->plat_irq > 0) {
rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
pchan->plat_irq);
return rc;
}
}
return 0;
}
/**
* pcc_shutdown - Called from Mailbox Controller code. Used here
* to free the interrupt.
* @chan: Pointer to Mailbox channel to shutdown.
*/
static void pcc_shutdown(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan = chan->con_priv;
if (pchan->plat_irq > 0)
devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan);
}
static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
};
/**
* parse_pcc_subspace - Count PCC subspaces defined
* @header: Pointer to the ACPI subtable header under the PCCT.
* @end: End of subtable entry.
*
* Return: If we find a PCC subspace entry of a valid type, return 0.
* Otherwise, return -EINVAL.
*
* This gets called for each entry in the PCC table.
*/
static int parse_pcc_subspace(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header;
if (ss->header.type < ACPI_PCCT_TYPE_RESERVED)
return 0;
return -EINVAL;
}
static int
pcc_chan_reg_init(struct pcc_chan_reg *reg, struct acpi_generic_address *gas,
u64 preserve_mask, u64 set_mask, u64 status_mask, char *name)
{
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (!(gas->bit_width >= 8 && gas->bit_width <= 64 &&
is_power_of_2(gas->bit_width))) {
pr_err("Error: Cannot access register of %u bit width",
gas->bit_width);
return -EFAULT;
}
reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8);
if (!reg->vaddr) {
pr_err("Failed to ioremap PCC %s register\n", name);
return -ENOMEM;
}
}
reg->gas = gas;
reg->preserve_mask = preserve_mask;
reg->set_mask = set_mask;
reg->status_mask = status_mask;
return 0;
}
/**
* pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register
*
* @pchan: Pointer to the PCC channel info structure.
* @pcct_entry: Pointer to the ACPI subtable header.
*
* Return: 0 for Success, else errno.
*
* There should be one entry per PCC channel. This gets called for each
* entry in the PCC table. This uses PCCY Type1 structure for all applicable
* types(Type 1-4) to fetch irq
*/
static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
struct acpi_subtable_header *pcct_entry)
{
int ret = 0;
struct acpi_pcct_hw_reduced *pcct_ss;
if (pcct_entry->type < ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
pcct_entry->type > ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
return 0;
pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
pchan->plat_irq = pcc_map_interrupt(pcct_ss->platform_interrupt,
(u32)pcct_ss->flags);
if (pchan->plat_irq <= 0) {
pr_err("PCC GSI %d not registered\n",
pcct_ss->platform_interrupt);
return -EINVAL;
}
if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
&pcct2_ss->platform_ack_register,
pcct2_ss->ack_preserve_mask,
pcct2_ss->ack_write_mask, 0,
"PLAT IRQ ACK");
} else if (pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE ||
pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) {
struct acpi_pcct_ext_pcc_master *pcct_ext = (void *)pcct_ss;
ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
&pcct_ext->platform_ack_register,
pcct_ext->ack_preserve_mask,
pcct_ext->ack_set_mask, 0,
"PLAT IRQ ACK");
}
return ret;
}
/**
* pcc_parse_subspace_db_reg - Parse the PCC doorbell register
*
* @pchan: Pointer to the PCC channel info structure.
* @pcct_entry: Pointer to the ACPI subtable header.
*
* Return: 0 for Success, else errno.
*/
static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
struct acpi_subtable_header *pcct_entry)
{
int ret = 0;
if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_subspace *pcct_ss;
pcct_ss = (struct acpi_pcct_subspace *)pcct_entry;
ret = pcc_chan_reg_init(&pchan->db,
&pcct_ss->doorbell_register,
pcct_ss->preserve_mask,
pcct_ss->write_mask, 0, "Doorbell");
} else {
struct acpi_pcct_ext_pcc_master *pcct_ext;
pcct_ext = (struct acpi_pcct_ext_pcc_master *)pcct_entry;
ret = pcc_chan_reg_init(&pchan->db,
&pcct_ext->doorbell_register,
pcct_ext->preserve_mask,
pcct_ext->write_mask, 0, "Doorbell");
if (ret)
return ret;
ret = pcc_chan_reg_init(&pchan->cmd_complete,
&pcct_ext->cmd_complete_register,
0, 0, pcct_ext->cmd_complete_mask,
"Command Complete Check");
if (ret)
return ret;
ret = pcc_chan_reg_init(&pchan->cmd_update,
&pcct_ext->cmd_update_register,
pcct_ext->cmd_update_preserve_mask,
pcct_ext->cmd_update_set_mask, 0,
"Command Complete Update");
if (ret)
return ret;
ret = pcc_chan_reg_init(&pchan->error,
&pcct_ext->error_status_register,
0, 0, pcct_ext->error_status_mask,
"Error Status");
}
return ret;
}
/**
* pcc_parse_subspace_shmem - Parse the PCC Shared Memory Region information
*
* @pchan: Pointer to the PCC channel info structure.
* @pcct_entry: Pointer to the ACPI subtable header.
*
*/
static void pcc_parse_subspace_shmem(struct pcc_chan_info *pchan,
struct acpi_subtable_header *pcct_entry)
{
if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_subspace *pcct_ss =
(struct acpi_pcct_subspace *)pcct_entry;
pchan->chan.shmem_base_addr = pcct_ss->base_address;
pchan->chan.shmem_size = pcct_ss->length;
pchan->chan.latency = pcct_ss->latency;
pchan->chan.max_access_rate = pcct_ss->max_access_rate;
pchan->chan.min_turnaround_time = pcct_ss->min_turnaround_time;
} else {
struct acpi_pcct_ext_pcc_master *pcct_ext =
(struct acpi_pcct_ext_pcc_master *)pcct_entry;
pchan->chan.shmem_base_addr = pcct_ext->base_address;
pchan->chan.shmem_size = pcct_ext->length;
pchan->chan.latency = pcct_ext->latency;
pchan->chan.max_access_rate = pcct_ext->max_access_rate;
pchan->chan.min_turnaround_time = pcct_ext->min_turnaround_time;
}
}
/**
* acpi_pcc_probe - Parse the ACPI tree for the PCCT.
*
* Return: 0 for Success, else errno.
*/
static int __init acpi_pcc_probe(void)
{
int count, i, rc = 0;
acpi_status status;
struct acpi_table_header *pcct_tbl;
struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
/* Set up the subtable handlers */
for (i = ACPI_PCCT_TYPE_GENERIC_SUBSPACE;
i < ACPI_PCCT_TYPE_RESERVED; i++) {
proc[i].id = i;
proc[i].count = 0;
proc[i].handler = parse_pcc_subspace;
}
count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
sizeof(struct acpi_table_pcct), proc,
ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
if (count <= 0 || count > MAX_PCC_SUBSPACES) {
if (count < 0)
pr_warn("Error parsing PCC subspaces from PCCT\n");
else
pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
rc = -EINVAL;
} else {
pcc_chan_count = count;
}
acpi_put_table(pcct_tbl);
return rc;
}
/**
* pcc_mbox_probe - Called when we find a match for the
* PCCT platform device. This is purely used to represent
* the PCCT as a virtual device for registering with the
* generic Mailbox framework.
*
* @pdev: Pointer to platform device returned when a match
* is found.
*
* Return: 0 for Success, else errno.
*/
static int pcc_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mbox_controller *pcc_mbox_ctrl;
struct mbox_chan *pcc_mbox_channels;
struct acpi_table_header *pcct_tbl;
struct acpi_subtable_header *pcct_entry;
struct acpi_table_pcct *acpi_pcct_tbl;
acpi_status status = AE_OK;
int i, rc, count = pcc_chan_count;
/* Search for PCCT */
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
pcc_mbox_channels = devm_kcalloc(dev, count, sizeof(*pcc_mbox_channels),
GFP_KERNEL);
if (!pcc_mbox_channels) {
rc = -ENOMEM;
goto err;
}
chan_info = devm_kcalloc(dev, count, sizeof(*chan_info), GFP_KERNEL);
if (!chan_info) {
rc = -ENOMEM;
goto err;
}
pcc_mbox_ctrl = devm_kzalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL);
if (!pcc_mbox_ctrl) {
rc = -ENOMEM;
goto err;
}
/* Point to the first PCC subspace entry */
pcct_entry = (struct acpi_subtable_header *) (
(unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
pcc_mbox_ctrl->txdone_irq = true;
for (i = 0; i < count; i++) {
struct pcc_chan_info *pchan = chan_info + i;
pcc_mbox_channels[i].con_priv = pchan;
pchan->chan.mchan = &pcc_mbox_channels[i];
if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
!pcc_mbox_ctrl->txdone_irq) {
pr_err("Platform Interrupt flag must be set to 1");
rc = -EINVAL;
goto err;
}
if (pcc_mbox_ctrl->txdone_irq) {
rc = pcc_parse_subspace_irq(pchan, pcct_entry);
if (rc < 0)
goto err;
}
rc = pcc_parse_subspace_db_reg(pchan, pcct_entry);
if (rc < 0)
goto err;
pcc_parse_subspace_shmem(pchan, pcct_entry);
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
pcc_mbox_ctrl->num_chans = count;
pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl->num_chans);
pcc_mbox_ctrl->chans = pcc_mbox_channels;
pcc_mbox_ctrl->ops = &pcc_chan_ops;
pcc_mbox_ctrl->dev = dev;
pr_info("Registering PCC driver as Mailbox controller\n");
rc = mbox_controller_register(pcc_mbox_ctrl);
if (rc)
pr_err("Err registering PCC as Mailbox controller: %d\n", rc);
else
return 0;
err:
acpi_put_table(pcct_tbl);
return rc;
}
static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
.name = "PCCT",
},
};
static int __init pcc_init(void)
{
int ret;
struct platform_device *pcc_pdev;
if (acpi_disabled)
return -ENODEV;
/* Check if PCC support is available. */
ret = acpi_pcc_probe();
if (ret) {
pr_debug("ACPI PCC probe failed.\n");
return -ENODEV;
}
pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
pcc_mbox_probe, NULL, 0, NULL, 0);
if (IS_ERR(pcc_pdev)) {
pr_debug("Err creating PCC platform bundle\n");
pcc_chan_count = 0;
return PTR_ERR(pcc_pdev);
}
return 0;
}
/*
* Make PCC init postcore so that users of this mailbox
* such as the ACPI Processor driver have it available
* at their init.
*/
postcore_initcall(pcc_init);
|
linux-master
|
drivers/mailbox/pcc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define MAILBOX_A2B_INTEN 0x00
#define MAILBOX_A2B_STATUS 0x04
#define MAILBOX_A2B_CMD(x) (0x08 + (x) * 8)
#define MAILBOX_A2B_DAT(x) (0x0c + (x) * 8)
#define MAILBOX_B2A_INTEN 0x28
#define MAILBOX_B2A_STATUS 0x2C
#define MAILBOX_B2A_CMD(x) (0x30 + (x) * 8)
#define MAILBOX_B2A_DAT(x) (0x34 + (x) * 8)
struct rockchip_mbox_msg {
u32 cmd;
int rx_size;
};
struct rockchip_mbox_data {
int num_chans;
};
struct rockchip_mbox_chan {
int idx;
int irq;
struct rockchip_mbox_msg *msg;
struct rockchip_mbox *mb;
};
struct rockchip_mbox {
struct mbox_controller mbox;
struct clk *pclk;
void __iomem *mbox_base;
/* The maximum size of buf for each channel */
u32 buf_size;
struct rockchip_mbox_chan *chans;
};
static int rockchip_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
struct rockchip_mbox_msg *msg = data;
struct rockchip_mbox_chan *chans = mb->chans;
if (!msg)
return -EINVAL;
if (msg->rx_size > mb->buf_size) {
dev_err(mb->mbox.dev, "Transmit size over buf size(%d)\n",
mb->buf_size);
return -EINVAL;
}
dev_dbg(mb->mbox.dev, "Chan[%d]: A2B message, cmd 0x%08x\n",
chans->idx, msg->cmd);
mb->chans[chans->idx].msg = msg;
writel_relaxed(msg->cmd, mb->mbox_base + MAILBOX_A2B_CMD(chans->idx));
writel_relaxed(msg->rx_size, mb->mbox_base +
MAILBOX_A2B_DAT(chans->idx));
return 0;
}
static int rockchip_mbox_startup(struct mbox_chan *chan)
{
struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
/* Enable all B2A interrupts */
writel_relaxed((1 << mb->mbox.num_chans) - 1,
mb->mbox_base + MAILBOX_B2A_INTEN);
return 0;
}
static void rockchip_mbox_shutdown(struct mbox_chan *chan)
{
struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
struct rockchip_mbox_chan *chans = mb->chans;
/* Disable all B2A interrupts */
writel_relaxed(0, mb->mbox_base + MAILBOX_B2A_INTEN);
mb->chans[chans->idx].msg = NULL;
}
static const struct mbox_chan_ops rockchip_mbox_chan_ops = {
.send_data = rockchip_mbox_send_data,
.startup = rockchip_mbox_startup,
.shutdown = rockchip_mbox_shutdown,
};
static irqreturn_t rockchip_mbox_irq(int irq, void *dev_id)
{
int idx;
struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
u32 status = readl_relaxed(mb->mbox_base + MAILBOX_B2A_STATUS);
for (idx = 0; idx < mb->mbox.num_chans; idx++) {
if ((status & (1 << idx)) && (irq == mb->chans[idx].irq)) {
/* Clear mbox interrupt */
writel_relaxed(1 << idx,
mb->mbox_base + MAILBOX_B2A_STATUS);
return IRQ_WAKE_THREAD;
}
}
return IRQ_NONE;
}
static irqreturn_t rockchip_mbox_isr(int irq, void *dev_id)
{
int idx;
struct rockchip_mbox_msg *msg = NULL;
struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
for (idx = 0; idx < mb->mbox.num_chans; idx++) {
if (irq != mb->chans[idx].irq)
continue;
msg = mb->chans[idx].msg;
if (!msg) {
dev_err(mb->mbox.dev,
"Chan[%d]: B2A message is NULL\n", idx);
break; /* spurious */
}
mbox_chan_received_data(&mb->mbox.chans[idx], msg);
mb->chans[idx].msg = NULL;
dev_dbg(mb->mbox.dev, "Chan[%d]: B2A message, cmd 0x%08x\n",
idx, msg->cmd);
break;
}
return IRQ_HANDLED;
}
static const struct rockchip_mbox_data rk3368_drv_data = {
.num_chans = 4,
};
static const struct of_device_id rockchip_mbox_of_match[] = {
{ .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
{ },
};
MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
static int rockchip_mbox_probe(struct platform_device *pdev)
{
struct rockchip_mbox *mb;
const struct rockchip_mbox_data *drv_data;
struct resource *res;
int ret, irq, i;
if (!pdev->dev.of_node)
return -ENODEV;
drv_data = (const struct rockchip_mbox_data *) device_get_match_data(&pdev->dev);
mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
if (!mb)
return -ENOMEM;
mb->chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
sizeof(*mb->chans), GFP_KERNEL);
if (!mb->chans)
return -ENOMEM;
mb->mbox.chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
sizeof(*mb->mbox.chans), GFP_KERNEL);
if (!mb->mbox.chans)
return -ENOMEM;
platform_set_drvdata(pdev, mb);
mb->mbox.dev = &pdev->dev;
mb->mbox.num_chans = drv_data->num_chans;
mb->mbox.ops = &rockchip_mbox_chan_ops;
mb->mbox.txdone_irq = true;
mb->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mb->mbox_base))
return PTR_ERR(mb->mbox_base);
/* Each channel has two buffers for A2B and B2A */
mb->buf_size = (size_t)resource_size(res) / (drv_data->num_chans * 2);
mb->pclk = devm_clk_get(&pdev->dev, "pclk_mailbox");
if (IS_ERR(mb->pclk)) {
ret = PTR_ERR(mb->pclk);
dev_err(&pdev->dev, "failed to get pclk_mailbox clock: %d\n",
ret);
return ret;
}
ret = clk_prepare_enable(mb->pclk);
if (ret) {
dev_err(&pdev->dev, "failed to enable pclk: %d\n", ret);
return ret;
}
for (i = 0; i < mb->mbox.num_chans; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq,
rockchip_mbox_irq,
rockchip_mbox_isr, IRQF_ONESHOT,
dev_name(&pdev->dev), mb);
if (ret < 0)
return ret;
mb->chans[i].idx = i;
mb->chans[i].irq = irq;
mb->chans[i].mb = mb;
mb->chans[i].msg = NULL;
}
ret = devm_mbox_controller_register(&pdev->dev, &mb->mbox);
if (ret < 0)
dev_err(&pdev->dev, "Failed to register mailbox: %d\n", ret);
return ret;
}
static struct platform_driver rockchip_mbox_driver = {
.probe = rockchip_mbox_probe,
.driver = {
.name = "rockchip-mailbox",
.of_match_table = rockchip_mbox_of_match,
},
};
module_platform_driver(rockchip_mbox_driver);
MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
MODULE_AUTHOR("Addy Ke <[email protected]>");
MODULE_AUTHOR("Caesar Wang <[email protected]>");
|
linux-master
|
drivers/mailbox/rockchip-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Authors: Ludovic Barre <[email protected]> for STMicroelectronics.
* Fabien Dessenne <[email protected]> for STMicroelectronics.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#define IPCC_XCR 0x000
#define XCR_RXOIE BIT(0)
#define XCR_TXOIE BIT(16)
#define IPCC_XMR 0x004
#define IPCC_XSCR 0x008
#define IPCC_XTOYSR 0x00c
#define IPCC_PROC_OFFST 0x010
#define IPCC_HWCFGR 0x3f0
#define IPCFGR_CHAN_MASK GENMASK(7, 0)
#define IPCC_VER 0x3f4
#define VER_MINREV_MASK GENMASK(3, 0)
#define VER_MAJREV_MASK GENMASK(7, 4)
#define RX_BIT_MASK GENMASK(15, 0)
#define RX_BIT_CHAN(chan) BIT(chan)
#define TX_BIT_SHIFT 16
#define TX_BIT_MASK GENMASK(31, 16)
#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
#define STM32_MAX_PROCS 2
enum {
IPCC_IRQ_RX,
IPCC_IRQ_TX,
IPCC_IRQ_NUM,
};
struct stm32_ipcc {
struct mbox_controller controller;
void __iomem *reg_base;
void __iomem *reg_proc;
struct clk *clk;
spinlock_t lock; /* protect access to IPCC registers */
int irqs[IPCC_IRQ_NUM];
u32 proc_id;
u32 n_chans;
u32 xcr;
u32 xmr;
};
static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
u32 mask)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
writel_relaxed(readl_relaxed(reg) | mask, reg);
spin_unlock_irqrestore(lock, flags);
}
static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
u32 mask)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
writel_relaxed(readl_relaxed(reg) & ~mask, reg);
spin_unlock_irqrestore(lock, flags);
}
static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
{
struct stm32_ipcc *ipcc = data;
struct device *dev = ipcc->controller.dev;
u32 status, mr, tosr, chan;
irqreturn_t ret = IRQ_NONE;
int proc_offset;
/* read 'channel occupied' status from other proc */
proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
/* search for unmasked 'channel occupied' */
status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
for (chan = 0; chan < ipcc->n_chans; chan++) {
if (!(status & (1 << chan)))
continue;
dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
RX_BIT_CHAN(chan));
ret = IRQ_HANDLED;
}
return ret;
}
static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
{
struct stm32_ipcc *ipcc = data;
struct device *dev = ipcc->controller.dev;
u32 status, mr, tosr, chan;
irqreturn_t ret = IRQ_NONE;
tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
/* search for unmasked 'channel free' */
status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
for (chan = 0; chan < ipcc->n_chans ; chan++) {
if (!(status & (1 << chan)))
continue;
dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
/* mask 'tx channel free' interrupt */
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
TX_BIT_CHAN(chan));
mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
ret = IRQ_HANDLED;
}
return ret;
}
static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
{
unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan);
/* set channel n occupied */
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
TX_BIT_CHAN(chan));
/* unmask 'tx channel free' interrupt */
stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
TX_BIT_CHAN(chan));
return 0;
}
static int stm32_ipcc_startup(struct mbox_chan *link)
{
unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
int ret;
ret = clk_prepare_enable(ipcc->clk);
if (ret) {
dev_err(ipcc->controller.dev, "can not enable the clock\n");
return ret;
}
/* unmask 'rx channel occupied' interrupt */
stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
RX_BIT_CHAN(chan));
return 0;
}
static void stm32_ipcc_shutdown(struct mbox_chan *link)
{
unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
/* mask rx/tx interrupt */
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
clk_disable_unprepare(ipcc->clk);
}
static const struct mbox_chan_ops stm32_ipcc_ops = {
.send_data = stm32_ipcc_send_data,
.startup = stm32_ipcc_startup,
.shutdown = stm32_ipcc_shutdown,
};
static int stm32_ipcc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_ipcc *ipcc;
unsigned long i;
int ret;
u32 ip_ver;
static const char * const irq_name[] = {"rx", "tx"};
irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
if (!np) {
dev_err(dev, "No DT found\n");
return -ENODEV;
}
ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
if (!ipcc)
return -ENOMEM;
spin_lock_init(&ipcc->lock);
/* proc_id */
if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
dev_err(dev, "Missing st,proc-id\n");
return -ENODEV;
}
if (ipcc->proc_id >= STM32_MAX_PROCS) {
dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
return -EINVAL;
}
/* regs */
ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ipcc->reg_base))
return PTR_ERR(ipcc->reg_base);
ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
/* clock */
ipcc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ipcc->clk))
return PTR_ERR(ipcc->clk);
ret = clk_prepare_enable(ipcc->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
/* irq */
for (i = 0; i < IPCC_IRQ_NUM; i++) {
ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
if (ipcc->irqs[i] < 0) {
ret = ipcc->irqs[i];
goto err_clk;
}
ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
irq_thread[i], IRQF_ONESHOT,
dev_name(dev), ipcc);
if (ret) {
dev_err(dev, "failed to request irq %lu (%d)\n", i, ret);
goto err_clk;
}
}
/* mask and enable rx/tx irq */
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
RX_BIT_MASK | TX_BIT_MASK);
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
XCR_RXOIE | XCR_TXOIE);
/* wakeup */
if (of_property_read_bool(np, "wakeup-source")) {
device_set_wakeup_capable(dev, true);
ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
if (ret) {
dev_err(dev, "Failed to set wake up irq\n");
goto err_init_wkp;
}
}
/* mailbox controller */
ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
ipcc->n_chans &= IPCFGR_CHAN_MASK;
ipcc->controller.dev = dev;
ipcc->controller.txdone_irq = true;
ipcc->controller.ops = &stm32_ipcc_ops;
ipcc->controller.num_chans = ipcc->n_chans;
ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
sizeof(*ipcc->controller.chans),
GFP_KERNEL);
if (!ipcc->controller.chans) {
ret = -ENOMEM;
goto err_irq_wkp;
}
for (i = 0; i < ipcc->controller.num_chans; i++)
ipcc->controller.chans[i].con_priv = (void *)i;
ret = devm_mbox_controller_register(dev, &ipcc->controller);
if (ret)
goto err_irq_wkp;
platform_set_drvdata(pdev, ipcc);
ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
FIELD_GET(VER_MAJREV_MASK, ip_ver),
FIELD_GET(VER_MINREV_MASK, ip_ver),
ipcc->controller.num_chans, ipcc->proc_id);
clk_disable_unprepare(ipcc->clk);
return 0;
err_irq_wkp:
if (of_property_read_bool(np, "wakeup-source"))
dev_pm_clear_wake_irq(dev);
err_init_wkp:
device_set_wakeup_capable(dev, false);
err_clk:
clk_disable_unprepare(ipcc->clk);
return ret;
}
static int stm32_ipcc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (of_property_read_bool(dev->of_node, "wakeup-source"))
dev_pm_clear_wake_irq(&pdev->dev);
device_set_wakeup_capable(dev, false);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int stm32_ipcc_suspend(struct device *dev)
{
struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
return 0;
}
static int stm32_ipcc_resume(struct device *dev)
{
struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
stm32_ipcc_suspend, stm32_ipcc_resume);
static const struct of_device_id stm32_ipcc_of_match[] = {
{ .compatible = "st,stm32mp1-ipcc" },
{},
};
MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
static struct platform_driver stm32_ipcc_driver = {
.driver = {
.name = "stm32-ipcc",
.pm = &stm32_ipcc_pm_ops,
.of_match_table = stm32_ipcc_of_match,
},
.probe = stm32_ipcc_probe,
.remove = stm32_ipcc_remove,
};
module_platform_driver(stm32_ipcc_driver);
MODULE_AUTHOR("Ludovic Barre <[email protected]>");
MODULE_AUTHOR("Fabien Dessenne <[email protected]>");
MODULE_DESCRIPTION("STM32 IPCC driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/stm32-ipcc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 BayLibre SAS.
* Author: Neil Armstrong <[email protected]>
* Synchronised with arm_mhu.c from :
* Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
* Copyright (C) 2015 Linaro Ltd.
* Author: Jassi Brar <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#define INTR_SET_OFS 0x0
#define INTR_STAT_OFS 0x4
#define INTR_CLR_OFS 0x8
#define MHU_SEC_OFFSET 0x0
#define MHU_LP_OFFSET 0xc
#define MHU_HP_OFFSET 0x18
#define TX_REG_OFFSET 0x24
#define MHU_CHANS 3
struct platform_mhu_link {
int irq;
void __iomem *tx_reg;
void __iomem *rx_reg;
};
struct platform_mhu {
void __iomem *base;
struct platform_mhu_link mlink[MHU_CHANS];
struct mbox_chan chan[MHU_CHANS];
struct mbox_controller mbox;
};
static irqreturn_t platform_mhu_rx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = p;
struct platform_mhu_link *mlink = chan->con_priv;
u32 val;
val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
if (!val)
return IRQ_NONE;
mbox_chan_received_data(chan, (void *)&val);
writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
return IRQ_HANDLED;
}
static bool platform_mhu_last_tx_done(struct mbox_chan *chan)
{
struct platform_mhu_link *mlink = chan->con_priv;
u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
return (val == 0);
}
static int platform_mhu_send_data(struct mbox_chan *chan, void *data)
{
struct platform_mhu_link *mlink = chan->con_priv;
u32 *arg = data;
writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
return 0;
}
static int platform_mhu_startup(struct mbox_chan *chan)
{
struct platform_mhu_link *mlink = chan->con_priv;
u32 val;
int ret;
val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
ret = request_irq(mlink->irq, platform_mhu_rx_interrupt,
IRQF_SHARED, "platform_mhu_link", chan);
if (ret) {
dev_err(chan->mbox->dev,
"Unable to acquire IRQ %d\n", mlink->irq);
return ret;
}
return 0;
}
static void platform_mhu_shutdown(struct mbox_chan *chan)
{
struct platform_mhu_link *mlink = chan->con_priv;
free_irq(mlink->irq, chan);
}
static const struct mbox_chan_ops platform_mhu_ops = {
.send_data = platform_mhu_send_data,
.startup = platform_mhu_startup,
.shutdown = platform_mhu_shutdown,
.last_tx_done = platform_mhu_last_tx_done,
};
static int platform_mhu_probe(struct platform_device *pdev)
{
int i, err;
struct platform_mhu *mhu;
struct device *dev = &pdev->dev;
int platform_mhu_reg[MHU_CHANS] = {
MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
};
/* Allocate memory for device */
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
return -ENOMEM;
mhu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mhu->base)) {
dev_err(dev, "ioremap failed\n");
return PTR_ERR(mhu->base);
}
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
mhu->mlink[i].irq = platform_get_irq(pdev, i);
if (mhu->mlink[i].irq < 0)
return mhu->mlink[i].irq;
mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
}
mhu->mbox.dev = dev;
mhu->mbox.chans = &mhu->chan[0];
mhu->mbox.num_chans = MHU_CHANS;
mhu->mbox.ops = &platform_mhu_ops;
mhu->mbox.txdone_irq = false;
mhu->mbox.txdone_poll = true;
mhu->mbox.txpoll_period = 1;
platform_set_drvdata(pdev, mhu);
err = devm_mbox_controller_register(dev, &mhu->mbox);
if (err) {
dev_err(dev, "Failed to register mailboxes %d\n", err);
return err;
}
dev_info(dev, "Platform MHU Mailbox registered\n");
return 0;
}
static const struct of_device_id platform_mhu_dt_ids[] = {
{ .compatible = "amlogic,meson-gxbb-mhu", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, platform_mhu_dt_ids);
static struct platform_driver platform_mhu_driver = {
.probe = platform_mhu_probe,
.driver = {
.name = "platform-mhu",
.of_match_table = platform_mhu_dt_ids,
},
};
module_platform_driver(platform_mhu_driver);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:platform-mhu");
MODULE_DESCRIPTION("Platform MHU Driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
|
linux-master
|
drivers/mailbox/platform_mhu.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Spreadtrum mailbox driver
*
* Copyright (c) 2020 Spreadtrum Communications Inc.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#define SPRD_MBOX_ID 0x0
#define SPRD_MBOX_MSG_LOW 0x4
#define SPRD_MBOX_MSG_HIGH 0x8
#define SPRD_MBOX_TRIGGER 0xc
#define SPRD_MBOX_FIFO_RST 0x10
#define SPRD_MBOX_FIFO_STS 0x14
#define SPRD_MBOX_IRQ_STS 0x18
#define SPRD_MBOX_IRQ_MSK 0x1c
#define SPRD_MBOX_LOCK 0x20
#define SPRD_MBOX_FIFO_DEPTH 0x24
/* Bit and mask definition for inbox's SPRD_MBOX_FIFO_STS register */
#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16)
#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8)
#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16
#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0)
/* Bit and mask definition for SPRD_MBOX_IRQ_STS register */
#define SPRD_MBOX_IRQ_CLR BIT(0)
/* Bit and mask definition for outbox's SPRD_MBOX_FIFO_STS register */
#define SPRD_OUTBOX_FIFO_FULL BIT(2)
#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
/* Bit and mask definition for inbox's SPRD_MBOX_IRQ_MSK register */
#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0)
#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1)
#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2)
#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0)
/* Bit and mask definition for outbox's SPRD_MBOX_IRQ_MSK register */
#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0)
#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0)
#define SPRD_OUTBOX_BASE_SPAN 0x1000
#define SPRD_MBOX_CHAN_MAX 8
#define SPRD_SUPP_INBOX_ID_SC9863A 7
struct sprd_mbox_priv {
struct mbox_controller mbox;
struct device *dev;
void __iomem *inbox_base;
void __iomem *outbox_base;
/* Base register address for supplementary outbox */
void __iomem *supp_base;
struct clk *clk;
u32 outbox_fifo_depth;
struct mutex lock;
u32 refcnt;
struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
};
static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
{
return container_of(mbox, struct sprd_mbox_priv, mbox);
}
static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
{
u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
SPRD_OUTBOX_FIFO_POS_MASK;
u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
SPRD_OUTBOX_FIFO_POS_MASK;
u32 fifo_len;
/*
* If the read pointer is equal with write pointer, which means the fifo
* is full or empty.
*/
if (wr_pos == rd_pos) {
if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
fifo_len = priv->outbox_fifo_depth;
else
fifo_len = 0;
} else if (wr_pos > rd_pos) {
fifo_len = wr_pos - rd_pos;
} else {
fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
}
return fifo_len;
}
static irqreturn_t do_outbox_isr(void __iomem *base, struct sprd_mbox_priv *priv)
{
struct mbox_chan *chan;
u32 fifo_sts, fifo_len, msg[2];
int i, id;
fifo_sts = readl(base + SPRD_MBOX_FIFO_STS);
fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
if (!fifo_len) {
dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
return IRQ_NONE;
}
for (i = 0; i < fifo_len; i++) {
msg[0] = readl(base + SPRD_MBOX_MSG_LOW);
msg[1] = readl(base + SPRD_MBOX_MSG_HIGH);
id = readl(base + SPRD_MBOX_ID);
chan = &priv->chan[id];
if (chan->cl)
mbox_chan_received_data(chan, (void *)msg);
else
dev_warn_ratelimited(priv->dev,
"message's been dropped at ch[%d]\n", id);
/* Trigger to update outbox FIFO pointer */
writel(0x1, base + SPRD_MBOX_TRIGGER);
}
/* Clear irq status after reading all message. */
writel(SPRD_MBOX_IRQ_CLR, base + SPRD_MBOX_IRQ_STS);
return IRQ_HANDLED;
}
static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
{
struct sprd_mbox_priv *priv = data;
return do_outbox_isr(priv->outbox_base, priv);
}
static irqreturn_t sprd_mbox_supp_isr(int irq, void *data)
{
struct sprd_mbox_priv *priv = data;
return do_outbox_isr(priv->supp_base, priv);
}
static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
{
struct sprd_mbox_priv *priv = data;
struct mbox_chan *chan;
u32 fifo_sts, send_sts, busy, id;
fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
/* Get the inbox data delivery status */
send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
SPRD_INBOX_FIFO_DELIVER_SHIFT;
if (!send_sts) {
dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
return IRQ_NONE;
}
while (send_sts) {
id = __ffs(send_sts);
send_sts &= (send_sts - 1);
chan = &priv->chan[id];
/*
* Check if the message was fetched by remote target, if yes,
* that means the transmission has been completed.
*/
busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
if (!(busy & BIT(id)))
mbox_chan_txdone(chan, 0);
}
/* Clear FIFO delivery and overflow status */
writel(fifo_sts &
(SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
priv->inbox_base + SPRD_MBOX_FIFO_RST);
/* Clear irq status */
writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
return IRQ_HANDLED;
}
static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
{
struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
unsigned long id = (unsigned long)chan->con_priv;
u32 *data = msg;
/* Write data into inbox FIFO, and only support 8 bytes every time */
writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
/* Set target core id */
writel(id, priv->inbox_base + SPRD_MBOX_ID);
/* Trigger remote request */
writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
return 0;
}
static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
unsigned long id = (unsigned long)chan->con_priv;
u32 busy;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
SPRD_INBOX_FIFO_BUSY_MASK;
if (!(busy & BIT(id))) {
mbox_chan_txdone(chan, 0);
return 0;
}
udelay(1);
}
return -ETIME;
}
static int sprd_mbox_startup(struct mbox_chan *chan)
{
struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
u32 val;
mutex_lock(&priv->lock);
if (priv->refcnt++ == 0) {
/* Select outbox FIFO mode and reset the outbox FIFO status */
writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
/* Enable inbox FIFO overflow and delivery interrupt */
val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
/* Enable outbox FIFO not empty interrupt */
val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
/* Enable supplementary outbox as the fundamental one */
if (priv->supp_base) {
writel(0x0, priv->supp_base + SPRD_MBOX_FIFO_RST);
val = readl(priv->supp_base + SPRD_MBOX_IRQ_MSK);
val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
writel(val, priv->supp_base + SPRD_MBOX_IRQ_MSK);
}
}
mutex_unlock(&priv->lock);
return 0;
}
static void sprd_mbox_shutdown(struct mbox_chan *chan)
{
struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
mutex_lock(&priv->lock);
if (--priv->refcnt == 0) {
/* Disable inbox & outbox interrupt */
writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
if (priv->supp_base)
writel(SPRD_OUTBOX_FIFO_IRQ_MASK,
priv->supp_base + SPRD_MBOX_IRQ_MSK);
}
mutex_unlock(&priv->lock);
}
static const struct mbox_chan_ops sprd_mbox_ops = {
.send_data = sprd_mbox_send_data,
.flush = sprd_mbox_flush,
.startup = sprd_mbox_startup,
.shutdown = sprd_mbox_shutdown,
};
static void sprd_mbox_disable(void *data)
{
struct sprd_mbox_priv *priv = data;
clk_disable_unprepare(priv->clk);
}
static int sprd_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sprd_mbox_priv *priv;
int ret, inbox_irq, outbox_irq, supp_irq;
unsigned long id, supp;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
mutex_init(&priv->lock);
/*
* Unisoc mailbox uses an inbox to send messages to the target
* core, and uses (an) outbox(es) to receive messages from other
* cores.
*
* Thus in general the mailbox controller supplies 2 different
* register addresses and IRQ numbers for inbox and outbox.
*
* If necessary, a supplementary inbox could be enabled optionally
* with an independent FIFO and an extra interrupt.
*/
priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->inbox_base))
return PTR_ERR(priv->inbox_base);
priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->outbox_base))
return PTR_ERR(priv->outbox_base);
priv->clk = devm_clk_get(dev, "enable");
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get mailbox clock\n");
return PTR_ERR(priv->clk);
}
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
if (ret) {
dev_err(dev, "failed to add mailbox disable action\n");
return ret;
}
inbox_irq = platform_get_irq_byname(pdev, "inbox");
if (inbox_irq < 0)
return inbox_irq;
ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
IRQF_NO_SUSPEND, dev_name(dev), priv);
if (ret) {
dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
return ret;
}
outbox_irq = platform_get_irq_byname(pdev, "outbox");
if (outbox_irq < 0)
return outbox_irq;
ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
IRQF_NO_SUSPEND, dev_name(dev), priv);
if (ret) {
dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
return ret;
}
/* Supplementary outbox IRQ is optional */
supp_irq = platform_get_irq_byname(pdev, "supp-outbox");
if (supp_irq > 0) {
ret = devm_request_irq(dev, supp_irq, sprd_mbox_supp_isr,
IRQF_NO_SUSPEND, dev_name(dev), priv);
if (ret) {
dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
return ret;
}
supp = (unsigned long) of_device_get_match_data(dev);
if (!supp) {
dev_err(dev, "no supplementary outbox specified\n");
return -ENODEV;
}
priv->supp_base = priv->outbox_base + (SPRD_OUTBOX_BASE_SPAN * supp);
}
/* Get the default outbox FIFO depth */
priv->outbox_fifo_depth =
readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
priv->mbox.dev = dev;
priv->mbox.chans = &priv->chan[0];
priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
priv->mbox.ops = &sprd_mbox_ops;
priv->mbox.txdone_irq = true;
for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
priv->chan[id].con_priv = (void *)id;
ret = devm_mbox_controller_register(dev, &priv->mbox);
if (ret) {
dev_err(dev, "failed to register mailbox: %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id sprd_mbox_of_match[] = {
{ .compatible = "sprd,sc9860-mailbox" },
{ .compatible = "sprd,sc9863a-mailbox",
.data = (void *)SPRD_SUPP_INBOX_ID_SC9863A },
{ },
};
MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
static struct platform_driver sprd_mbox_driver = {
.driver = {
.name = "sprd-mailbox",
.of_match_table = sprd_mbox_of_match,
},
.probe = sprd_mbox_probe,
};
module_platform_driver(sprd_mbox_driver);
MODULE_AUTHOR("Baolin Wang <[email protected]>");
MODULE_DESCRIPTION("Spreadtrum mailbox driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/sprd-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Inter Processor Interrupt(IPI) Mailbox Driver
*
* Copyright (C) 2018 Xilinx, Inc.
*/
#include <linux/arm-smccc.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/zynqmp-ipi-message.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
/* IPI agent ID any */
#define IPI_ID_ANY 0xFFUL
/* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */
#define USE_SMC 0
#define USE_HVC 1
/* Default IPI SMC function IDs */
#define SMC_IPI_MAILBOX_OPEN 0x82001000U
#define SMC_IPI_MAILBOX_RELEASE 0x82001001U
#define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U
#define SMC_IPI_MAILBOX_NOTIFY 0x82001003U
#define SMC_IPI_MAILBOX_ACK 0x82001004U
#define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U
#define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U
/* IPI SMC Macros */
#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if
* notification interrupt
* to be disabled.
*/
#define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if
* notification interrupt
* to be enabled.
*/
/* IPI mailbox status */
#define IPI_MB_STATUS_IDLE 0
#define IPI_MB_STATUS_SEND_PENDING 1
#define IPI_MB_STATUS_RECV_PENDING 2
#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
/**
* struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
* @is_opened: indicate if the IPI channel is opened
* @req_buf: local to remote request buffer start address
* @resp_buf: local to remote response buffer start address
* @req_buf_size: request buffer size
* @resp_buf_size: response buffer size
* @rx_buf: receive buffer to pass received message to client
* @chan_type: channel type
*/
struct zynqmp_ipi_mchan {
int is_opened;
void __iomem *req_buf;
void __iomem *resp_buf;
void *rx_buf;
size_t req_buf_size;
size_t resp_buf_size;
unsigned int chan_type;
};
/**
* struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
* platform data.
* @pdata: pointer to the IPI private data
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI mailbox
* @remote_id: remote IPI agent ID
* @mbox: mailbox Controller
* @mchans: array for channels, tx channel and rx channel.
* @irq: IPI agent interrupt ID
*/
struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata *pdata;
struct device dev;
u32 remote_id;
struct mbox_controller mbox;
struct zynqmp_ipi_mchan mchans[2];
};
/**
* struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data.
*
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI agent
* @irq: IPI agent interrupt ID
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
* @num_mboxes: number of mailboxes of this IPI agent
* @ipi_mboxes: IPI mailboxes of this IPI agent
*/
struct zynqmp_ipi_pdata {
struct device *dev;
int irq;
unsigned int method;
u32 local_id;
int num_mboxes;
struct zynqmp_ipi_mbox ipi_mboxes[];
};
static struct device_driver zynqmp_ipi_mbox_driver = {
.owner = THIS_MODULE,
.name = "zynqmp-ipi-mbox",
};
static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox,
unsigned long a0, unsigned long a3,
struct arm_smccc_res *res)
{
struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata;
unsigned long a1, a2;
a1 = pdata->local_id;
a2 = ipi_mbox->remote_id;
if (pdata->method == USE_SMC)
arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res);
else
arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res);
}
/**
* zynqmp_ipi_interrupt - Interrupt handler for IPI notification
*
* @irq: Interrupt number
* @data: ZynqMP IPI mailbox platform data.
*
* Return: -EINVAL if there is no instance
* IRQ_NONE if the interrupt is not ours.
* IRQ_HANDLED if the rx interrupt was successfully handled.
*/
static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
{
struct zynqmp_ipi_pdata *pdata = data;
struct mbox_chan *chan;
struct zynqmp_ipi_mbox *ipi_mbox;
struct zynqmp_ipi_mchan *mchan;
struct zynqmp_ipi_message *msg;
u64 arg0, arg3;
struct arm_smccc_res res;
int ret, i, status = IRQ_NONE;
(void)irq;
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK;
for (i = 0; i < pdata->num_mboxes; i++) {
ipi_mbox = &pdata->ipi_mboxes[i];
mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX];
zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res);
ret = (int)(res.a0 & 0xFFFFFFFF);
if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
if (mchan->is_opened) {
msg = mchan->rx_buf;
msg->len = mchan->req_buf_size;
memcpy_fromio(msg->data, mchan->req_buf,
msg->len);
mbox_chan_received_data(chan, (void *)msg);
status = IRQ_HANDLED;
}
}
}
return status;
}
/**
* zynqmp_ipi_peek_data - Peek to see if there are any rx messages.
*
* @chan: Channel Pointer
*
* Return: 'true' if there is pending rx data, 'false' if there is none.
*/
static bool zynqmp_ipi_peek_data(struct mbox_chan *chan)
{
struct device *dev = chan->mbox->dev;
struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
struct zynqmp_ipi_mchan *mchan = chan->con_priv;
int ret;
u64 arg0;
struct arm_smccc_res res;
if (WARN_ON(!ipi_mbox)) {
dev_err(dev, "no platform drv data??\n");
return false;
}
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
ret = (int)(res.a0 & 0xFFFFFFFF);
if (mchan->chan_type == IPI_MB_CHNL_TX) {
/* TX channel, check if the message has been acked
* by the remote, if yes, response is available.
*/
if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
return false;
else
return true;
} else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
/* RX channel, check if there is message arrived. */
return true;
}
return false;
}
/**
* zynqmp_ipi_last_tx_done - See if the last tx message is sent
*
* @chan: Channel pointer
*
* Return: 'true' is no pending tx data, 'false' if there are any.
*/
static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan)
{
struct device *dev = chan->mbox->dev;
struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
struct zynqmp_ipi_mchan *mchan = chan->con_priv;
int ret;
u64 arg0;
struct arm_smccc_res res;
if (WARN_ON(!ipi_mbox)) {
dev_err(dev, "no platform drv data??\n");
return false;
}
if (mchan->chan_type == IPI_MB_CHNL_TX) {
/* We only need to check if the message been taken
* by the remote in the TX channel
*/
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
/* Check the SMC call status, a0 of the result */
ret = (int)(res.a0 & 0xFFFFFFFF);
if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
return false;
return true;
}
/* Always true for the response message in RX channel */
return true;
}
/**
* zynqmp_ipi_send_data - Send data
*
* @chan: Channel Pointer
* @data: Message Pointer
*
* Return: 0 if all goes good, else appropriate error messages.
*/
static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
{
struct device *dev = chan->mbox->dev;
struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
struct zynqmp_ipi_mchan *mchan = chan->con_priv;
struct zynqmp_ipi_message *msg = data;
u64 arg0;
struct arm_smccc_res res;
if (WARN_ON(!ipi_mbox)) {
dev_err(dev, "no platform drv data??\n");
return -EINVAL;
}
if (mchan->chan_type == IPI_MB_CHNL_TX) {
/* Send request message */
if (msg && msg->len > mchan->req_buf_size) {
dev_err(dev, "channel %d message length %u > max %lu\n",
mchan->chan_type, (unsigned int)msg->len,
mchan->req_buf_size);
return -EINVAL;
}
if (msg && msg->len)
memcpy_toio(mchan->req_buf, msg->data, msg->len);
/* Kick IPI mailbox to send message */
arg0 = SMC_IPI_MAILBOX_NOTIFY;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
} else {
/* Send response message */
if (msg && msg->len > mchan->resp_buf_size) {
dev_err(dev, "channel %d message length %u > max %lu\n",
mchan->chan_type, (unsigned int)msg->len,
mchan->resp_buf_size);
return -EINVAL;
}
if (msg && msg->len)
memcpy_toio(mchan->resp_buf, msg->data, msg->len);
arg0 = SMC_IPI_MAILBOX_ACK;
zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
&res);
}
return 0;
}
/**
* zynqmp_ipi_startup - Startup the IPI channel
*
* @chan: Channel pointer
*
* Return: 0 if all goes good, else return corresponding error message
*/
static int zynqmp_ipi_startup(struct mbox_chan *chan)
{
struct device *dev = chan->mbox->dev;
struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
struct zynqmp_ipi_mchan *mchan = chan->con_priv;
u64 arg0;
struct arm_smccc_res res;
int ret = 0;
unsigned int nchan_type;
if (mchan->is_opened)
return 0;
/* If no channel has been opened, open the IPI mailbox */
nchan_type = (mchan->chan_type + 1) % 2;
if (!ipi_mbox->mchans[nchan_type].is_opened) {
arg0 = SMC_IPI_MAILBOX_OPEN;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
/* Check the SMC call status, a0 of the result */
ret = (int)(res.a0 & 0xFFFFFFFF);
if (ret < 0) {
dev_err(dev, "SMC to open the IPI channel failed.\n");
return ret;
}
ret = 0;
}
/* If it is RX channel, enable the IPI notification interrupt */
if (mchan->chan_type == IPI_MB_CHNL_RX) {
arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
}
mchan->is_opened = 1;
return ret;
}
/**
* zynqmp_ipi_shutdown - Shutdown the IPI channel
*
* @chan: Channel pointer
*/
static void zynqmp_ipi_shutdown(struct mbox_chan *chan)
{
struct device *dev = chan->mbox->dev;
struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
struct zynqmp_ipi_mchan *mchan = chan->con_priv;
u64 arg0;
struct arm_smccc_res res;
unsigned int chan_type;
if (!mchan->is_opened)
return;
/* If it is RX channel, disable notification interrupt */
chan_type = mchan->chan_type;
if (chan_type == IPI_MB_CHNL_RX) {
arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
}
/* Release IPI mailbox if no other channel is opened */
chan_type = (chan_type + 1) % 2;
if (!ipi_mbox->mchans[chan_type].is_opened) {
arg0 = SMC_IPI_MAILBOX_RELEASE;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
}
mchan->is_opened = 0;
}
/* ZynqMP IPI mailbox operations */
static const struct mbox_chan_ops zynqmp_ipi_chan_ops = {
.startup = zynqmp_ipi_startup,
.shutdown = zynqmp_ipi_shutdown,
.peek_data = zynqmp_ipi_peek_data,
.last_tx_done = zynqmp_ipi_last_tx_done,
.send_data = zynqmp_ipi_send_data,
};
/**
* zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel
*
* @mbox: mailbox controller pointer
* @p: phandle pointer
*
* Return: Mailbox channel, else return error pointer.
*/
static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *p)
{
struct mbox_chan *chan;
struct device *dev = mbox->dev;
unsigned int chan_type;
/* Only supports TX and RX channels */
chan_type = p->args[0];
if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) {
dev_err(dev, "req chnl failure: invalid chnl type %u.\n",
chan_type);
return ERR_PTR(-EINVAL);
}
chan = &mbox->chans[chan_type];
return chan;
}
static const struct of_device_id zynqmp_ipi_of_match[] = {
{ .compatible = "xlnx,zynqmp-ipi-mailbox" },
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
/**
* zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
*
* @node: IPI mbox device child node
* @name: name of the IPI buffer
* @res: pointer to where the resource information will be stored.
*
* Return: 0 for success, negative value for failure
*/
static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node,
const char *name,
struct resource *res)
{
int ret, index;
index = of_property_match_string(node, "reg-names", name);
if (index >= 0) {
ret = of_address_to_resource(node, index, res);
if (ret < 0)
return -EINVAL;
return 0;
}
return -ENODEV;
}
/**
* zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev
*
* @dev: the ipi mailbox device
*
* This is to avoid the no device release() function kernel warning.
*
*/
static void zynqmp_ipi_mbox_dev_release(struct device *dev)
{
(void)dev;
}
/**
* zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node
*
* @ipi_mbox: pointer to IPI mailbox private data structure
* @node: IPI mailbox device node
*
* Return: 0 for success, negative value for failure
*/
static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
struct device_node *node)
{
struct zynqmp_ipi_mchan *mchan;
struct mbox_chan *chans;
struct mbox_controller *mbox;
struct resource res;
struct device *dev, *mdev;
const char *name;
int ret;
dev = ipi_mbox->pdata->dev;
/* Initialize dev for IPI mailbox */
ipi_mbox->dev.parent = dev;
ipi_mbox->dev.release = NULL;
ipi_mbox->dev.of_node = node;
dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node));
dev_set_drvdata(&ipi_mbox->dev, ipi_mbox);
ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release;
ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver;
ret = device_register(&ipi_mbox->dev);
if (ret) {
dev_err(dev, "Failed to register ipi mbox dev.\n");
put_device(&ipi_mbox->dev);
return ret;
}
mdev = &ipi_mbox->dev;
mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
name = "local_request_region";
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
if (!ret) {
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
return ret;
}
name = "remote_response_region";
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
if (!ret) {
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
return ret;
}
mchan->rx_buf = devm_kzalloc(mdev,
mchan->resp_buf_size +
sizeof(struct zynqmp_ipi_message),
GFP_KERNEL);
if (!mchan->rx_buf)
return -ENOMEM;
mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
name = "remote_request_region";
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
if (!ret) {
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
return ret;
}
name = "local_response_region";
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
if (!ret) {
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
return ret;
}
mchan->rx_buf = devm_kzalloc(mdev,
mchan->resp_buf_size +
sizeof(struct zynqmp_ipi_message),
GFP_KERNEL);
if (!mchan->rx_buf)
return -ENOMEM;
/* Get the IPI remote agent ID */
ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
if (ret < 0) {
dev_err(dev, "No IPI remote ID is specified.\n");
return ret;
}
mbox = &ipi_mbox->mbox;
mbox->dev = mdev;
mbox->ops = &zynqmp_ipi_chan_ops;
mbox->num_chans = 2;
mbox->txdone_irq = false;
mbox->txdone_poll = true;
mbox->txpoll_period = 5;
mbox->of_xlate = zynqmp_ipi_of_xlate;
chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mbox->chans = chans;
chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
ret = devm_mbox_controller_register(mdev, mbox);
if (ret)
dev_err(mdev,
"Failed to register mbox_controller(%d)\n", ret);
else
dev_info(mdev,
"Registered ZynqMP IPI mbox with TX/RX channels.\n");
return ret;
}
/**
* zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
*
* @pdata: IPI private data
*/
static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
{
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
i = pdata->num_mboxes;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
if (ipi_mbox->dev.parent) {
mbox_controller_unregister(&ipi_mbox->mbox);
if (device_is_registered(&ipi_mbox->dev))
device_unregister(&ipi_mbox->dev);
}
}
}
static int zynqmp_ipi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *nc, *np = pdev->dev.of_node;
struct zynqmp_ipi_pdata *pdata;
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
num_mboxes = of_get_available_child_count(np);
if (num_mboxes == 0) {
dev_err(dev, "mailbox nodes not available\n");
return -EINVAL;
}
pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->dev = dev;
/* Get the IPI local agents ID */
ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id);
if (ret < 0) {
dev_err(dev, "No IPI local ID is specified.\n");
return ret;
}
pdata->num_mboxes = num_mboxes;
mbox = pdata->ipi_mboxes;
for_each_available_child_of_node(np, nc) {
mbox->pdata = pdata;
ret = zynqmp_ipi_mbox_probe(mbox, nc);
if (ret) {
of_node_put(nc);
dev_err(dev, "failed to probe subdev.\n");
ret = -EINVAL;
goto free_mbox_dev;
}
mbox++;
}
/* IPI IRQ */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto free_mbox_dev;
pdata->irq = ret;
ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
IRQF_SHARED, dev_name(dev), pdata);
if (ret) {
dev_err(dev, "IRQ %d is not requested successfully.\n",
pdata->irq);
goto free_mbox_dev;
}
platform_set_drvdata(pdev, pdata);
return ret;
free_mbox_dev:
zynqmp_ipi_free_mboxes(pdata);
return ret;
}
static int zynqmp_ipi_remove(struct platform_device *pdev)
{
struct zynqmp_ipi_pdata *pdata;
pdata = platform_get_drvdata(pdev);
zynqmp_ipi_free_mboxes(pdata);
return 0;
}
static struct platform_driver zynqmp_ipi_driver = {
.probe = zynqmp_ipi_probe,
.remove = zynqmp_ipi_remove,
.driver = {
.name = "zynqmp-ipi",
.of_match_table = of_match_ptr(zynqmp_ipi_of_match),
},
};
static int __init zynqmp_ipi_init(void)
{
return platform_driver_register(&zynqmp_ipi_driver);
}
subsys_initcall(zynqmp_ipi_init);
static void __exit zynqmp_ipi_exit(void)
{
platform_driver_unregister(&zynqmp_ipi_driver);
}
module_exit(zynqmp_ipi_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver");
MODULE_AUTHOR("Xilinx Inc.");
|
linux-master
|
drivers/mailbox/zynqmp-ipi-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016 Broadcom
*/
/*
* Broadcom PDC Mailbox Driver
* The PDC provides a ring based programming interface to one or more hardware
* offload engines. For example, the PDC driver works with both SPU-M and SPU2
* cryptographic offload hardware. In some chips the PDC is referred to as MDE,
* and in others the FA2/FA+ hardware is used with this PDC driver.
*
* The PDC driver registers with the Linux mailbox framework as a mailbox
* controller, once for each PDC instance. Ring 0 for each PDC is registered as
* a mailbox channel. The PDC driver uses interrupts to determine when data
* transfers to and from an offload engine are complete. The PDC driver uses
* threaded IRQs so that response messages are handled outside of interrupt
* context.
*
* The PDC driver allows multiple messages to be pending in the descriptor
* rings. The tx_msg_start descriptor index indicates where the last message
* starts. The txin_numd value at this index indicates how many descriptor
* indexes make up the message. Similar state is kept on the receive side. When
* an rx interrupt indicates a response is ready, the PDC driver processes numd
* descriptors from the tx and rx ring, thus processing one response at a time.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/brcm-message.h>
#include <linux/scatterlist.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#define PDC_SUCCESS 0
#define RING_ENTRY_SIZE sizeof(struct dma64dd)
/* # entries in PDC dma ring */
#define PDC_RING_ENTRIES 512
/*
* Minimum number of ring descriptor entries that must be free to tell mailbox
* framework that it can submit another request
*/
#define PDC_RING_SPACE_MIN 15
#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
/* Rings are 8k aligned */
#define RING_ALIGN_ORDER 13
#define RING_ALIGN BIT(RING_ALIGN_ORDER)
#define RX_BUF_ALIGN_ORDER 5
#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
/* descriptor bumping macros */
#define XXD(x, max_mask) ((x) & (max_mask))
#define TXD(x, max_mask) XXD((x), (max_mask))
#define RXD(x, max_mask) XXD((x), (max_mask))
#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
/* Length of BCM header at start of SPU msg, in bytes */
#define BCM_HDR_LEN 8
/*
* PDC driver reserves ringset 0 on each SPU for its own use. The driver does
* not currently support use of multiple ringsets on a single PDC engine.
*/
#define PDC_RINGSET 0
/*
* Interrupt mask and status definitions. Enable interrupts for tx and rx on
* ring 0
*/
#define PDC_RCVINT_0 (16 + PDC_RINGSET)
#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
#define PDC_INTMASK (PDC_RCVINTEN_0)
#define PDC_LAZY_FRAMECOUNT 1
#define PDC_LAZY_TIMEOUT 10000
#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
#define PDC_INTMASK_OFFSET 0x24
#define PDC_INTSTATUS_OFFSET 0x20
#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
#define FA_RCVLAZY0_OFFSET 0x100
/*
* For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
* before frame
*/
#define PDC_SPU2_RESP_HDR_LEN 17
#define PDC_CKSUM_CTRL BIT(27)
#define PDC_CKSUM_CTRL_OFFSET 0x400
#define PDC_SPUM_RESP_HDR_LEN 32
/*
* Sets the following bits for write to transmit control reg:
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
#define PDC_TX_CTL 0x000C0800
/* Bit in tx control reg to enable tx channel */
#define PDC_TX_ENABLE 0x1
/*
* Sets the following bits for write to receive control reg:
* 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
* 9 - SepRxHdrDescEn - place start of new frames only in descriptors
* that have StartOfFrame set
* 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
* remaining bytes in current frame, report error
* in rx frame status for current frame
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
#define PDC_RX_CTL 0x000C0E00
/* Bit in rx control reg to enable rx channel */
#define PDC_RX_ENABLE 0x1
#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
/* descriptor flags */
#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
#define D64_CTRL1_EOF BIT(30) /* end of frame */
#define D64_CTRL1_SOF BIT(31) /* start of frame */
#define RX_STATUS_OVERFLOW 0x00800000
#define RX_STATUS_LEN 0x0000FFFF
#define PDC_TXREGS_OFFSET 0x200
#define PDC_RXREGS_OFFSET 0x220
/* Maximum size buffer the DMA engine can handle */
#define PDC_DMA_BUF_MAX 16384
enum pdc_hw {
FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
};
struct pdc_dma_map {
void *ctx; /* opaque context associated with frame */
};
/* dma descriptor */
struct dma64dd {
u32 ctrl1; /* misc control bits */
u32 ctrl2; /* buffer count and address extension */
u32 addrlow; /* memory address of the date buffer, bits 31:0 */
u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
};
/* dma registers per channel(xmt or rcv) */
struct dma64_regs {
u32 control; /* enable, et al */
u32 ptr; /* last descriptor posted to chip */
u32 addrlow; /* descriptor ring base address low 32-bits */
u32 addrhigh; /* descriptor ring base address bits 63:32 */
u32 status0; /* last rx descriptor written by hw */
u32 status1; /* driver does not use */
};
/* cpp contortions to concatenate w/arg prescan */
#ifndef PAD
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
#endif /* PAD */
/* dma registers. matches hw layout. */
struct dma64 {
struct dma64_regs dmaxmt; /* dma tx */
u32 PAD[2];
struct dma64_regs dmarcv; /* dma rx */
u32 PAD[2];
};
/* PDC registers */
struct pdc_regs {
u32 devcontrol; /* 0x000 */
u32 devstatus; /* 0x004 */
u32 PAD;
u32 biststatus; /* 0x00c */
u32 PAD[4];
u32 intstatus; /* 0x020 */
u32 intmask; /* 0x024 */
u32 gptimer; /* 0x028 */
u32 PAD;
u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
u32 PAD[48];
u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
u32 flowctlthresh; /* 0x104 */
u32 wrrthresh; /* 0x108 */
u32 gmac_idle_cnt_thresh; /* 0x10c */
u32 PAD[4];
u32 ifioaccessaddr; /* 0x120 */
u32 ifioaccessbyte; /* 0x124 */
u32 ifioaccessdata; /* 0x128 */
u32 PAD[21];
u32 phyaccess; /* 0x180 */
u32 PAD;
u32 phycontrol; /* 0x188 */
u32 txqctl; /* 0x18c */
u32 rxqctl; /* 0x190 */
u32 gpioselect; /* 0x194 */
u32 gpio_output_en; /* 0x198 */
u32 PAD; /* 0x19c */
u32 txq_rxq_mem_ctl; /* 0x1a0 */
u32 memory_ecc_status; /* 0x1a4 */
u32 serdes_ctl; /* 0x1a8 */
u32 serdes_status0; /* 0x1ac */
u32 serdes_status1; /* 0x1b0 */
u32 PAD[11]; /* 0x1b4-1dc */
u32 clk_ctl_st; /* 0x1e0 */
u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
u32 pwrctl; /* 0x1e8 */
u32 PAD[5];
#define PDC_NUM_DMA_RINGS 4
struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
/* more registers follow, but we don't use them */
};
/* structure for allocating/freeing DMA rings */
struct pdc_ring_alloc {
dma_addr_t dmabase; /* DMA address of start of ring */
void *vbase; /* base kernel virtual address of ring */
u32 size; /* ring allocation size in bytes */
};
/*
* context associated with a receive descriptor.
* @rxp_ctx: opaque context associated with frame that starts at each
* rx ring index.
* @dst_sg: Scatterlist used to form reply frames beginning at a given ring
* index. Retained in order to unmap each sg after reply is processed.
* @rxin_numd: Number of rx descriptors associated with the message that starts
* at a descriptor index. Not set for every index. For example,
* if descriptor index i points to a scatterlist with 4 entries,
* then the next three descriptor indexes don't have a value set.
* @resp_hdr: Virtual address of buffer used to catch DMA rx status
* @resp_hdr_daddr: physical address of DMA rx status buffer
*/
struct pdc_rx_ctx {
void *rxp_ctx;
struct scatterlist *dst_sg;
u32 rxin_numd;
void *resp_hdr;
dma_addr_t resp_hdr_daddr;
};
/* PDC state structure */
struct pdc_state {
/* Index of the PDC whose state is in this structure instance */
u8 pdc_idx;
/* Platform device for this PDC instance */
struct platform_device *pdev;
/*
* Each PDC instance has a mailbox controller. PDC receives request
* messages through mailboxes, and sends response messages through the
* mailbox framework.
*/
struct mbox_controller mbc;
unsigned int pdc_irq;
/* tasklet for deferred processing after DMA rx interrupt */
struct tasklet_struct rx_tasklet;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
/* Whether a BCM header is prepended to each frame */
bool use_bcm_hdr;
/* Sum of length of BCM header and rx status header */
u32 pdc_resp_hdr_len;
/* The base virtual address of DMA hw registers */
void __iomem *pdc_reg_vbase;
/* Pool for allocation of DMA rings */
struct dma_pool *ring_pool;
/* Pool for allocation of metadata buffers for response messages */
struct dma_pool *rx_buf_pool;
/*
* The base virtual address of DMA tx/rx descriptor rings. Corresponding
* DMA address and size of ring allocation.
*/
struct pdc_ring_alloc tx_ring_alloc;
struct pdc_ring_alloc rx_ring_alloc;
struct pdc_regs *regs; /* start of PDC registers */
struct dma64_regs *txregs_64; /* dma tx engine registers */
struct dma64_regs *rxregs_64; /* dma rx engine registers */
/*
* Arrays of PDC_RING_ENTRIES descriptors
* To use multiple ringsets, this needs to be extended
*/
struct dma64dd *txd_64; /* tx descriptor ring */
struct dma64dd *rxd_64; /* rx descriptor ring */
/* descriptor ring sizes */
u32 ntxd; /* # tx descriptors */
u32 nrxd; /* # rx descriptors */
u32 nrxpost; /* # rx buffers to keep posted */
u32 ntxpost; /* max number of tx buffers that can be posted */
/*
* Index of next tx descriptor to reclaim. That is, the descriptor
* index of the oldest tx buffer for which the host has yet to process
* the corresponding response.
*/
u32 txin;
/*
* Index of the first receive descriptor for the sequence of
* message fragments currently under construction. Used to build up
* the rxin_numd count for a message. Updated to rxout when the host
* starts a new sequence of rx buffers for a new message.
*/
u32 tx_msg_start;
/* Index of next tx descriptor to post. */
u32 txout;
/*
* Number of tx descriptors associated with the message that starts
* at this tx descriptor index.
*/
u32 txin_numd[PDC_RING_ENTRIES];
/*
* Index of next rx descriptor to reclaim. This is the index of
* the next descriptor whose data has yet to be processed by the host.
*/
u32 rxin;
/*
* Index of the first receive descriptor for the sequence of
* message fragments currently under construction. Used to build up
* the rxin_numd count for a message. Updated to rxout when the host
* starts a new sequence of rx buffers for a new message.
*/
u32 rx_msg_start;
/*
* Saved value of current hardware rx descriptor index.
* The last rx buffer written by the hw is the index previous to
* this one.
*/
u32 last_rx_curr;
/* Index of next rx descriptor to post. */
u32 rxout;
struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
/*
* Scatterlists used to form request and reply frames beginning at a
* given ring index. Retained in order to unmap each sg after reply
* is processed
*/
struct scatterlist *src_sg[PDC_RING_ENTRIES];
/* counters */
u32 pdc_requests; /* number of request messages submitted */
u32 pdc_replies; /* number of reply messages received */
u32 last_tx_not_done; /* too few tx descriptors to indicate done */
u32 tx_ring_full; /* unable to accept msg because tx ring full */
u32 rx_ring_full; /* unable to accept msg because rx ring full */
u32 txnobuf; /* unable to create tx descriptor */
u32 rxnobuf; /* unable to create rx descriptor */
u32 rx_oflow; /* count of rx overflows */
/* hardware type - FA2 or PDC/MDE */
enum pdc_hw hw_type;
};
/* Global variables */
struct pdc_globals {
/* Actual number of SPUs in hardware, as reported by device tree */
u32 num_spu;
};
static struct pdc_globals pdcg;
/* top level debug FS directory for PDC driver */
static struct dentry *debugfs_dir;
static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct pdc_state *pdcs;
char *buf;
ssize_t ret, out_offset, out_count;
out_count = 512;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pdcs = filp->private_data;
out_offset = 0;
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC requests....................%u\n",
pdcs->pdc_requests);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC responses...................%u\n",
pdcs->pdc_replies);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx not done.....................%u\n",
pdcs->last_tx_not_done);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx ring full....................%u\n",
pdcs->tx_ring_full);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx ring full....................%u\n",
pdcs->rx_ring_full);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Receive overflow................%u\n",
pdcs->rx_oflow);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Num frags in rx ring............%u\n",
NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
pdcs->nrxpost));
if (out_offset > out_count)
out_offset = out_count;
ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
kfree(buf);
return ret;
}
static const struct file_operations pdc_debugfs_stats = {
.owner = THIS_MODULE,
.open = simple_open,
.read = pdc_debugfs_read,
};
/**
* pdc_setup_debugfs() - Create the debug FS directories. If the top-level
* directory has not yet been created, create it now. Create a stats file in
* this directory for a SPU.
* @pdcs: PDC state structure
*/
static void pdc_setup_debugfs(struct pdc_state *pdcs)
{
char spu_stats_name[16];
if (!debugfs_initialized())
return;
snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
if (!debugfs_dir)
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
/* S_IRUSR == 0400 */
debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
&pdc_debugfs_stats);
}
static void pdc_free_debugfs(void)
{
debugfs_remove_recursive(debugfs_dir);
debugfs_dir = NULL;
}
/**
* pdc_build_rxd() - Build DMA descriptor to receive SPU result.
* @pdcs: PDC state for SPU that will generate result
* @dma_addr: DMA address of buffer that descriptor is being built for
* @buf_len: Length of the receive buffer, in bytes
* @flags: Flags to be stored in descriptor
*/
static inline void
pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
u32 buf_len, u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
dev_dbg(dev,
"Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
rxd->ctrl1 = cpu_to_le32(flags);
rxd->ctrl2 = cpu_to_le32(buf_len);
/* bump ring index and return */
pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
}
/**
* pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
* hardware.
* @pdcs: PDC state for the SPU that will process this request
* @dma_addr: DMA address of packet to be transmitted
* @buf_len: Length of tx buffer, in bytes
* @flags: Flags to be stored in descriptor
*/
static inline void
pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
dev_dbg(dev,
"Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
pdcs->pdc_idx, pdcs->txout, buf_len, flags);
txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
txd->ctrl1 = cpu_to_le32(flags);
txd->ctrl2 = cpu_to_le32(buf_len);
/* bump ring index and return */
pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
}
/**
* pdc_receive_one() - Receive a response message from a given SPU.
* @pdcs: PDC state for the SPU to receive from
*
* When the return code indicates success, the response message is available in
* the receive buffers provided prior to submission of the request.
*
* Return: PDC_SUCCESS if one or more receive descriptors was processed
* -EAGAIN indicates that no response message is available
* -EIO an error occurred
*/
static int
pdc_receive_one(struct pdc_state *pdcs)
{
struct device *dev = &pdcs->pdev->dev;
struct mbox_controller *mbc;
struct mbox_chan *chan;
struct brcm_message mssg;
u32 len, rx_status;
u32 num_frags;
u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
u32 frags_rdy; /* number of fragments ready to read */
u32 rx_idx; /* ring index of start of receive frame */
dma_addr_t resp_hdr_daddr;
struct pdc_rx_ctx *rx_ctx;
mbc = &pdcs->mbc;
chan = &mbc->chans[0];
mssg.type = BRCM_MESSAGE_SPU;
/*
* return if a complete response message is not yet ready.
* rxin_numd[rxin] is the number of fragments in the next msg
* to read.
*/
frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
if ((frags_rdy == 0) ||
(frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
/* No response ready */
return -EAGAIN;
num_frags = pdcs->txin_numd[pdcs->txin];
WARN_ON(num_frags == 0);
dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
pdcs->pdc_idx, num_frags);
rx_idx = pdcs->rxin;
rx_ctx = &pdcs->rx_ctx[rx_idx];
num_frags = rx_ctx->rxin_numd;
/* Return opaque context with result */
mssg.ctx = rx_ctx->rxp_ctx;
rx_ctx->rxp_ctx = NULL;
resp_hdr = rx_ctx->resp_hdr;
resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
DMA_FROM_DEVICE);
pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
pdcs->pdc_idx, num_frags);
dev_dbg(dev,
"PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
pdcs->rxout, pdcs->last_rx_curr);
if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
/*
* For SPU-M, get length of response msg and rx overflow status.
*/
rx_status = *((u32 *)resp_hdr);
len = rx_status & RX_STATUS_LEN;
dev_dbg(dev,
"SPU response length %u bytes", len);
if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
if (rx_status & RX_STATUS_OVERFLOW) {
dev_err_ratelimited(dev,
"crypto receive overflow");
pdcs->rx_oflow++;
} else {
dev_info_ratelimited(dev, "crypto rx len = 0");
}
return -EIO;
}
}
dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
mbox_chan_received_data(chan, &mssg);
pdcs->pdc_replies++;
return PDC_SUCCESS;
}
/**
* pdc_receive() - Process as many responses as are available in the rx ring.
* @pdcs: PDC state
*
* Called within the hard IRQ.
* Return:
*/
static int
pdc_receive(struct pdc_state *pdcs)
{
int rx_status;
/* read last_rx_curr from register once */
pdcs->last_rx_curr =
(ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
do {
/* Could be many frames ready */
rx_status = pdc_receive_one(pdcs);
} while (rx_status == PDC_SUCCESS);
return 0;
}
/**
* pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
* descriptors for a given SPU. The scatterlist buffers contain the data for a
* SPU request message.
* @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers contain part of the SPU request
*
* If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
* are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
*
* Return: PDC_SUCCESS if successful
* < 0 otherwise
*/
static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
{
u32 flags = 0;
u32 eot;
u32 tx_avail;
/*
* Num descriptors needed. Conservatively assume we need a descriptor
* for every entry in sg.
*/
u32 num_desc;
u32 desc_w = 0; /* Number of tx descriptors written */
u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
dma_addr_t databufptr; /* DMA address to put in descriptor */
num_desc = (u32)sg_nents(sg);
/* check whether enough tx descriptors are available */
tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
pdcs->ntxpost);
if (unlikely(num_desc > tx_avail)) {
pdcs->txnobuf++;
return -ENOSPC;
}
/* build tx descriptors */
if (pdcs->tx_msg_start == pdcs->txout) {
/* Start of frame */
pdcs->txin_numd[pdcs->tx_msg_start] = 0;
pdcs->src_sg[pdcs->txout] = sg;
flags = D64_CTRL1_SOF;
}
while (sg) {
if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
eot = D64_CTRL1_EOT;
else
eot = 0;
/*
* If sg buffer larger than PDC limit, split across
* multiple descriptors
*/
bufcnt = sg_dma_len(sg);
databufptr = sg_dma_address(sg);
while (bufcnt > PDC_DMA_BUF_MAX) {
pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
flags | eot);
desc_w++;
bufcnt -= PDC_DMA_BUF_MAX;
databufptr += PDC_DMA_BUF_MAX;
if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
eot = D64_CTRL1_EOT;
else
eot = 0;
}
sg = sg_next(sg);
if (!sg)
/* Writing last descriptor for frame */
flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
desc_w++;
/* Clear start of frame after first descriptor */
flags &= ~D64_CTRL1_SOF;
}
pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
return PDC_SUCCESS;
}
/**
* pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
* ring.
* @pdcs: PDC state for SPU to process the request
*
* Sets the index of the last descriptor written in both the rx and tx ring.
*
* Return: PDC_SUCCESS
*/
static int pdc_tx_list_final(struct pdc_state *pdcs)
{
/*
* write barrier to ensure all register writes are complete
* before chip starts to process new request
*/
wmb();
iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
pdcs->pdc_requests++;
return PDC_SUCCESS;
}
/**
* pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
* @pdcs: PDC state for SPU handling request
* @dst_sg: scatterlist providing rx buffers for response to be returned to
* mailbox client
* @ctx: Opaque context for this request
*
* Posts a single receive descriptor to hold the metadata that precedes a
* response. For example, with SPU-M, the metadata is a 32-byte DMA header and
* an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
* rx to indicate the start of a new message.
*
* Return: PDC_SUCCESS if successful
* < 0 if an error (e.g., rx ring is full)
*/
static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
void *ctx)
{
u32 flags = 0;
u32 rx_avail;
u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
dma_addr_t daddr;
void *vaddr;
struct pdc_rx_ctx *rx_ctx;
rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
pdcs->nrxpost);
if (unlikely(rx_pkt_cnt > rx_avail)) {
pdcs->rxnobuf++;
return -ENOSPC;
}
/* allocate a buffer for the dma rx status */
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
if (unlikely(!vaddr))
return -ENOMEM;
/*
* Update msg_start indexes for both tx and rx to indicate the start
* of a new sequence of descriptor indexes that contain the fragments
* of the same message.
*/
pdcs->rx_msg_start = pdcs->rxout;
pdcs->tx_msg_start = pdcs->txout;
/* This is always the first descriptor in the receive sequence */
flags = D64_CTRL1_SOF;
pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
flags |= D64_CTRL1_EOT;
rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
rx_ctx->rxp_ctx = ctx;
rx_ctx->dst_sg = dst_sg;
rx_ctx->resp_hdr = vaddr;
rx_ctx->resp_hdr_daddr = daddr;
pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
return PDC_SUCCESS;
}
/**
* pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
* descriptors for a given SPU. The caller must have already DMA mapped the
* scatterlist.
* @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers are added to the receive ring
*
* If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
* multiple receive descriptors are written, each with a buffer <=
* PDC_DMA_BUF_MAX.
*
* Return: PDC_SUCCESS if successful
* < 0 otherwise (e.g., receive ring is full)
*/
static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
{
u32 flags = 0;
u32 rx_avail;
/*
* Num descriptors needed. Conservatively assume we need a descriptor
* for every entry from our starting point in the scatterlist.
*/
u32 num_desc;
u32 desc_w = 0; /* Number of tx descriptors written */
u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
dma_addr_t databufptr; /* DMA address to put in descriptor */
num_desc = (u32)sg_nents(sg);
rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
pdcs->nrxpost);
if (unlikely(num_desc > rx_avail)) {
pdcs->rxnobuf++;
return -ENOSPC;
}
while (sg) {
if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
flags = D64_CTRL1_EOT;
else
flags = 0;
/*
* If sg buffer larger than PDC limit, split across
* multiple descriptors
*/
bufcnt = sg_dma_len(sg);
databufptr = sg_dma_address(sg);
while (bufcnt > PDC_DMA_BUF_MAX) {
pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
desc_w++;
bufcnt -= PDC_DMA_BUF_MAX;
databufptr += PDC_DMA_BUF_MAX;
if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
flags = D64_CTRL1_EOT;
else
flags = 0;
}
pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
desc_w++;
sg = sg_next(sg);
}
pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
return PDC_SUCCESS;
}
/**
* pdc_irq_handler() - Interrupt handler called in interrupt context.
* @irq: Interrupt number that has fired
* @data: device struct for DMA engine that generated the interrupt
*
* We have to clear the device interrupt status flags here. So cache the
* status for later use in the thread function. Other than that, just return
* WAKE_THREAD to invoke the thread function.
*
* Return: IRQ_WAKE_THREAD if interrupt is ours
* IRQ_NONE otherwise
*/
static irqreturn_t pdc_irq_handler(int irq, void *data)
{
struct device *dev = (struct device *)data;
struct pdc_state *pdcs = dev_get_drvdata(dev);
u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
if (unlikely(intstatus == 0))
return IRQ_NONE;
/* Disable interrupts until soft handler runs */
iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
/* Clear interrupt flags in device */
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
tasklet_schedule(&pdcs->rx_tasklet);
return IRQ_HANDLED;
}
/**
* pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
* a DMA receive interrupt. Reenables the receive interrupt.
* @t: Pointer to the Altera sSGDMA channel structure
*/
static void pdc_tasklet_cb(struct tasklet_struct *t)
{
struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
pdc_receive(pdcs);
/* reenable interrupts */
iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
}
/**
* pdc_ring_init() - Allocate DMA rings and initialize constant fields of
* descriptors in one ringset.
* @pdcs: PDC instance state
* @ringset: index of ringset being used
*
* Return: PDC_SUCCESS if ring initialized
* < 0 otherwise
*/
static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
{
int i;
int err = PDC_SUCCESS;
struct dma64 *dma_reg;
struct device *dev = &pdcs->pdev->dev;
struct pdc_ring_alloc tx;
struct pdc_ring_alloc rx;
/* Allocate tx ring */
tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
if (unlikely(!tx.vbase)) {
err = -ENOMEM;
goto done;
}
/* Allocate rx ring */
rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
if (unlikely(!rx.vbase)) {
err = -ENOMEM;
goto fail_dealloc;
}
dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
pdcs->rxin = 0;
pdcs->rx_msg_start = 0;
pdcs->last_rx_curr = 0;
pdcs->rxout = 0;
pdcs->txin = 0;
pdcs->tx_msg_start = 0;
pdcs->txout = 0;
/* Set descriptor array base addresses */
pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
/* Tell device the base DMA address of each ring */
dma_reg = &pdcs->regs->dmaregs[ringset];
/* But first disable DMA and set curptr to 0 for both TX & RX */
iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
&dma_reg->dmarcv.control);
iowrite32(0, &dma_reg->dmaxmt.ptr);
iowrite32(0, &dma_reg->dmarcv.ptr);
/* Set base DMA addresses */
iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
&dma_reg->dmaxmt.addrlow);
iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
&dma_reg->dmaxmt.addrhigh);
iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
&dma_reg->dmarcv.addrlow);
iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
&dma_reg->dmarcv.addrhigh);
/* Re-enable DMA */
iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
&dma_reg->dmarcv.control);
/* Initialize descriptors */
for (i = 0; i < PDC_RING_ENTRIES; i++) {
/* Every tx descriptor can be used for start of frame. */
if (i != pdcs->ntxpost) {
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
&pdcs->txd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
}
/* Every rx descriptor can be used for start of frame */
if (i != pdcs->nrxpost) {
iowrite32(D64_CTRL1_SOF,
&pdcs->rxd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
&pdcs->rxd_64[i].ctrl1);
}
}
return PDC_SUCCESS;
fail_dealloc:
dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
done:
return err;
}
static void pdc_ring_free(struct pdc_state *pdcs)
{
if (pdcs->tx_ring_alloc.vbase) {
dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
pdcs->tx_ring_alloc.dmabase);
pdcs->tx_ring_alloc.vbase = NULL;
}
if (pdcs->rx_ring_alloc.vbase) {
dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
pdcs->rx_ring_alloc.dmabase);
pdcs->rx_ring_alloc.vbase = NULL;
}
}
/**
* pdc_desc_count() - Count the number of DMA descriptors that will be required
* for a given scatterlist. Account for the max length of a DMA buffer.
* @sg: Scatterlist to be DMA'd
* Return: Number of descriptors required
*/
static u32 pdc_desc_count(struct scatterlist *sg)
{
u32 cnt = 0;
while (sg) {
cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
sg = sg_next(sg);
}
return cnt;
}
/**
* pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
* and the rx ring has room for rx_cnt descriptors.
* @pdcs: PDC state
* @tx_cnt: The number of descriptors required in the tx ring
* @rx_cnt: The number of descriptors required i the rx ring
*
* Return: true if one of the rings does not have enough space
* false if sufficient space is available in both rings
*/
static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
{
u32 rx_avail;
u32 tx_avail;
bool full = false;
/* Check if the tx and rx rings are likely to have enough space */
rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
pdcs->nrxpost);
if (unlikely(rx_cnt > rx_avail)) {
pdcs->rx_ring_full++;
full = true;
}
if (likely(!full)) {
tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
pdcs->ntxpost);
if (unlikely(tx_cnt > tx_avail)) {
pdcs->tx_ring_full++;
full = true;
}
}
return full;
}
/**
* pdc_last_tx_done() - If both the tx and rx rings have at least
* PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
* framework can submit another message.
* @chan: mailbox channel to check
* Return: true if PDC can accept another message on this channel
*/
static bool pdc_last_tx_done(struct mbox_chan *chan)
{
struct pdc_state *pdcs = chan->con_priv;
bool ret;
if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
PDC_RING_SPACE_MIN))) {
pdcs->last_tx_not_done++;
ret = false;
} else {
ret = true;
}
return ret;
}
/**
* pdc_send_data() - mailbox send_data function
* @chan: The mailbox channel on which the data is sent. The channel
* corresponds to a DMA ringset.
* @data: The mailbox message to be sent. The message must be a
* brcm_message structure.
*
* This function is registered as the send_data function for the mailbox
* controller. From the destination scatterlist in the mailbox message, it
* creates a sequence of receive descriptors in the rx ring. From the source
* scatterlist, it creates a sequence of transmit descriptors in the tx ring.
* After creating the descriptors, it writes the rx ptr and tx ptr registers to
* initiate the DMA transfer.
*
* This function does the DMA map and unmap of the src and dst scatterlists in
* the mailbox message.
*
* Return: 0 if successful
* -ENOTSUPP if the mailbox message is a type this driver does not
* support
* < 0 if an error
*/
static int pdc_send_data(struct mbox_chan *chan, void *data)
{
struct pdc_state *pdcs = chan->con_priv;
struct device *dev = &pdcs->pdev->dev;
struct brcm_message *mssg = data;
int err = PDC_SUCCESS;
int src_nent;
int dst_nent;
int nent;
u32 tx_desc_req;
u32 rx_desc_req;
if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
return -ENOTSUPP;
src_nent = sg_nents(mssg->spu.src);
if (likely(src_nent)) {
nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
if (unlikely(nent == 0))
return -EIO;
}
dst_nent = sg_nents(mssg->spu.dst);
if (likely(dst_nent)) {
nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
DMA_FROM_DEVICE);
if (unlikely(nent == 0)) {
dma_unmap_sg(dev, mssg->spu.src, src_nent,
DMA_TO_DEVICE);
return -EIO;
}
}
/*
* Check if the tx and rx rings have enough space. Do this prior to
* writing any tx or rx descriptors. Need to ensure that we do not write
* a partial set of descriptors, or write just rx descriptors but
* corresponding tx descriptors don't fit. Note that we want this check
* and the entire sequence of descriptor to happen without another
* thread getting in. The channel spin lock in the mailbox framework
* ensures this.
*/
tx_desc_req = pdc_desc_count(mssg->spu.src);
rx_desc_req = pdc_desc_count(mssg->spu.dst);
if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
return -ENOSPC;
/* Create rx descriptors to SPU catch response */
err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
/* Create tx descriptors to submit SPU request */
err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
err |= pdc_tx_list_final(pdcs); /* initiate transfer */
if (unlikely(err))
dev_err(&pdcs->pdev->dev,
"%s failed with error %d", __func__, err);
return err;
}
static int pdc_startup(struct mbox_chan *chan)
{
return pdc_ring_init(chan->con_priv, PDC_RINGSET);
}
static void pdc_shutdown(struct mbox_chan *chan)
{
struct pdc_state *pdcs = chan->con_priv;
if (!pdcs)
return;
dev_dbg(&pdcs->pdev->dev,
"Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
pdc_ring_free(pdcs);
}
/**
* pdc_hw_init() - Use the given initialization parameters to initialize the
* state for one of the PDCs.
* @pdcs: state of the PDC
*/
static
void pdc_hw_init(struct pdc_state *pdcs)
{
struct platform_device *pdev;
struct device *dev;
struct dma64 *dma_reg;
int ringset = PDC_RINGSET;
pdev = pdcs->pdev;
dev = &pdev->dev;
dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
dev_dbg(dev, "state structure: %p",
pdcs);
dev_dbg(dev, " - base virtual addr of hw regs %p",
pdcs->pdc_reg_vbase);
/* initialize data structures */
pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
pdcs->txregs_64 = (struct dma64_regs *)
(((u8 *)pdcs->pdc_reg_vbase) +
PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->rxregs_64 = (struct dma64_regs *)
(((u8 *)pdcs->pdc_reg_vbase) +
PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->ntxd = PDC_RING_ENTRIES;
pdcs->nrxd = PDC_RING_ENTRIES;
pdcs->ntxpost = PDC_RING_ENTRIES - 1;
pdcs->nrxpost = PDC_RING_ENTRIES - 1;
iowrite32(0, &pdcs->regs->intmask);
dma_reg = &pdcs->regs->dmaregs[ringset];
/* Configure DMA but will enable later in pdc_ring_init() */
iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
&dma_reg->dmarcv.control);
/* Reset current index pointers after making sure DMA is disabled */
iowrite32(0, &dma_reg->dmaxmt.ptr);
iowrite32(0, &dma_reg->dmarcv.ptr);
if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
iowrite32(PDC_CKSUM_CTRL,
pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
}
/**
* pdc_hw_disable() - Disable the tx and rx control in the hw.
* @pdcs: PDC state structure
*
*/
static void pdc_hw_disable(struct pdc_state *pdcs)
{
struct dma64 *dma_reg;
dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
&dma_reg->dmarcv.control);
}
/**
* pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
* header returned with each response message.
* @pdcs: PDC state structure
*
* The metadata is not returned to the mailbox client. So the PDC driver
* manages these buffers.
*
* Return: PDC_SUCCESS
* -ENOMEM if pool creation fails
*/
static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
{
struct platform_device *pdev;
struct device *dev;
pdev = pdcs->pdev;
dev = &pdev->dev;
pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
if (pdcs->use_bcm_hdr)
pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
pdcs->pdc_resp_hdr_len,
RX_BUF_ALIGN, 0);
if (!pdcs->rx_buf_pool)
return -ENOMEM;
return PDC_SUCCESS;
}
/**
* pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
* specify a threaded IRQ handler for deferred handling of interrupts outside of
* interrupt context.
* @pdcs: PDC state
*
* Set the interrupt mask for transmit and receive done.
* Set the lazy interrupt frame count to generate an interrupt for just one pkt.
*
* Return: PDC_SUCCESS
* <0 if threaded irq request fails
*/
static int pdc_interrupts_init(struct pdc_state *pdcs)
{
struct platform_device *pdev = pdcs->pdev;
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
int err;
/* interrupt configuration */
iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
if (pdcs->hw_type == FA_HW)
iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
FA_RCVLAZY0_OFFSET);
else
iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
PDC_RCVLAZY0_OFFSET);
/* read irq from device tree */
pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
dev_name(dev), pdcs->pdc_irq, pdcs);
err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
dev_name(dev), dev);
if (err) {
dev_err(dev, "IRQ %u request failed with err %d\n",
pdcs->pdc_irq, err);
return err;
}
return PDC_SUCCESS;
}
static const struct mbox_chan_ops pdc_mbox_chan_ops = {
.send_data = pdc_send_data,
.last_tx_done = pdc_last_tx_done,
.startup = pdc_startup,
.shutdown = pdc_shutdown
};
/**
* pdc_mb_init() - Initialize the mailbox controller.
* @pdcs: PDC state
*
* Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
* driver only uses one ringset and thus one mb channel. PDC uses the transmit
* complete interrupt to determine when a mailbox message has successfully been
* transmitted.
*
* Return: 0 on success
* < 0 if there is an allocation or registration failure
*/
static int pdc_mb_init(struct pdc_state *pdcs)
{
struct device *dev = &pdcs->pdev->dev;
struct mbox_controller *mbc;
int chan_index;
int err;
mbc = &pdcs->mbc;
mbc->dev = dev;
mbc->ops = &pdc_mbox_chan_ops;
mbc->num_chans = 1;
mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
GFP_KERNEL);
if (!mbc->chans)
return -ENOMEM;
mbc->txdone_irq = false;
mbc->txdone_poll = true;
mbc->txpoll_period = 1;
for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
mbc->chans[chan_index].con_priv = pdcs;
/* Register mailbox controller */
err = devm_mbox_controller_register(dev, mbc);
if (err) {
dev_crit(dev,
"Failed to register PDC mailbox controller. Error %d.",
err);
return err;
}
return 0;
}
/* Device tree API */
static const int pdc_hw = PDC_HW;
static const int fa_hw = FA_HW;
static const struct of_device_id pdc_mbox_of_match[] = {
{.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
{.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
/**
* pdc_dt_read() - Read application-specific data from device tree.
* @pdev: Platform device
* @pdcs: PDC state
*
* Reads the number of bytes of receive status that precede each received frame.
* Reads whether transmit and received frames should be preceded by an 8-byte
* BCM header.
*
* Return: 0 if successful
* -ENODEV if device not available
*/
static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
{
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
const struct of_device_id *match;
const int *hw_type;
int err;
err = of_property_read_u32(dn, "brcm,rx-status-len",
&pdcs->rx_status_len);
if (err < 0)
dev_err(dev,
"%s failed to get DMA receive status length from device tree",
__func__);
pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
pdcs->hw_type = PDC_HW;
match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
if (match != NULL) {
hw_type = match->data;
pdcs->hw_type = *hw_type;
}
return 0;
}
/**
* pdc_probe() - Probe function for PDC driver.
* @pdev: PDC platform device
*
* Reserve and map register regions defined in device tree.
* Allocate and initialize tx and rx DMA rings.
* Initialize a mailbox controller for each PDC.
*
* Return: 0 if successful
* < 0 if an error
*/
static int pdc_probe(struct platform_device *pdev)
{
int err = 0;
struct device *dev = &pdev->dev;
struct resource *pdc_regs;
struct pdc_state *pdcs;
/* PDC state for one SPU */
pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
if (!pdcs) {
err = -ENOMEM;
goto cleanup;
}
pdcs->pdev = pdev;
platform_set_drvdata(pdev, pdcs);
pdcs->pdc_idx = pdcg.num_spu;
pdcg.num_spu++;
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
if (err) {
dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
goto cleanup;
}
/* Create DMA pool for tx ring */
pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
RING_ALIGN, 0);
if (!pdcs->ring_pool) {
err = -ENOMEM;
goto cleanup;
}
err = pdc_dt_read(pdev, pdcs);
if (err)
goto cleanup_ring_pool;
pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
if (IS_ERR(pdcs->pdc_reg_vbase)) {
err = PTR_ERR(pdcs->pdc_reg_vbase);
goto cleanup_ring_pool;
}
dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
&pdc_regs->start, &pdc_regs->end);
/* create rx buffer pool after dt read to know how big buffers are */
err = pdc_rx_buf_pool_create(pdcs);
if (err)
goto cleanup_ring_pool;
pdc_hw_init(pdcs);
/* Init tasklet for deferred DMA rx processing */
tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
err = pdc_interrupts_init(pdcs);
if (err)
goto cleanup_buf_pool;
/* Initialize mailbox controller */
err = pdc_mb_init(pdcs);
if (err)
goto cleanup_buf_pool;
pdc_setup_debugfs(pdcs);
dev_dbg(dev, "pdc_probe() successful");
return PDC_SUCCESS;
cleanup_buf_pool:
tasklet_kill(&pdcs->rx_tasklet);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
dma_pool_destroy(pdcs->ring_pool);
cleanup:
return err;
}
static int pdc_remove(struct platform_device *pdev)
{
struct pdc_state *pdcs = platform_get_drvdata(pdev);
pdc_free_debugfs();
tasklet_kill(&pdcs->rx_tasklet);
pdc_hw_disable(pdcs);
dma_pool_destroy(pdcs->rx_buf_pool);
dma_pool_destroy(pdcs->ring_pool);
return 0;
}
static struct platform_driver pdc_mbox_driver = {
.probe = pdc_probe,
.remove = pdc_remove,
.driver = {
.name = "brcm-iproc-pdc-mbox",
.of_match_table = pdc_mbox_of_match,
},
};
module_platform_driver(pdc_mbox_driver);
MODULE_AUTHOR("Rob Rice <[email protected]>");
MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/bcm-pdc-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments' Message Manager Driver
*
* Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/soc/ti/ti-msgmgr.h>
#define Q_DATA_OFFSET(proxy, queue, reg) \
((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
(SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
/**
* struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
* @queue_id: Queue Number for this path
* @proxy_id: Proxy ID representing the processor in SoC
* @is_tx: Is this a receive path?
*/
struct ti_msgmgr_valid_queue_desc {
u8 queue_id;
u8 proxy_id;
bool is_tx;
};
/**
* struct ti_msgmgr_desc - Description of message manager integration
* @queue_count: Number of Queues
* @max_message_size: Message size in bytes
* @max_messages: Number of messages
* @data_first_reg: First data register for proxy data region
* @data_last_reg: Last data register for proxy data region
* @status_cnt_mask: Mask for getting the status value
* @status_err_mask: Mask for getting the error value, if applicable
* @tx_polled: Do I need to use polled mechanism for tx
* @tx_poll_timeout_ms: Timeout in ms if polled
* @valid_queues: List of Valid queues that the processor can access
* @data_region_name: Name of the proxy data region
* @status_region_name: Name of the proxy status region
* @ctrl_region_name: Name of the proxy control region
* @num_valid_queues: Number of valid queues
* @is_sproxy: Is this an Secure Proxy instance?
*
* This structure is used in of match data to describe how integration
* for a specific compatible SoC is done.
*/
struct ti_msgmgr_desc {
u8 queue_count;
u8 max_message_size;
u8 max_messages;
u8 data_first_reg;
u8 data_last_reg;
u32 status_cnt_mask;
u32 status_err_mask;
bool tx_polled;
int tx_poll_timeout_ms;
const struct ti_msgmgr_valid_queue_desc *valid_queues;
const char *data_region_name;
const char *status_region_name;
const char *ctrl_region_name;
int num_valid_queues;
bool is_sproxy;
};
/**
* struct ti_queue_inst - Description of a queue instance
* @name: Queue Name
* @queue_id: Queue Identifier as mapped on SoC
* @proxy_id: Proxy Identifier as mapped on SoC
* @irq: IRQ for Rx Queue
* @is_tx: 'true' if transmit queue, else, 'false'
* @queue_buff_start: First register of Data Buffer
* @queue_buff_end: Last (or confirmation) register of Data buffer
* @queue_state: Queue status register
* @queue_ctrl: Queue Control register
* @chan: Mailbox channel
* @rx_buff: Receive buffer pointer allocated at probe, max_message_size
* @polled_rx_mode: Use polling for rx instead of interrupts
*/
struct ti_queue_inst {
char name[30];
u8 queue_id;
u8 proxy_id;
int irq;
bool is_tx;
void __iomem *queue_buff_start;
void __iomem *queue_buff_end;
void __iomem *queue_state;
void __iomem *queue_ctrl;
struct mbox_chan *chan;
u32 *rx_buff;
bool polled_rx_mode;
};
/**
* struct ti_msgmgr_inst - Description of a Message Manager Instance
* @dev: device pointer corresponding to the Message Manager instance
* @desc: Description of the SoC integration
* @queue_proxy_region: Queue proxy region where queue buffers are located
* @queue_state_debug_region: Queue status register regions
* @queue_ctrl_region: Queue Control register regions
* @num_valid_queues: Number of valid queues defined for the processor
* Note: other queues are probably reserved for other processors
* in the SoC.
* @qinsts: Array of valid Queue Instances for the Processor
* @mbox: Mailbox Controller
* @chans: Array for channels corresponding to the Queue Instances.
*/
struct ti_msgmgr_inst {
struct device *dev;
const struct ti_msgmgr_desc *desc;
void __iomem *queue_proxy_region;
void __iomem *queue_state_debug_region;
void __iomem *queue_ctrl_region;
u8 num_valid_queues;
struct ti_queue_inst *qinsts;
struct mbox_controller mbox;
struct mbox_chan *chans;
};
/**
* ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
* @d: Description of message manager
* @qinst: Queue instance for which we check the number of pending messages
*
* Return: number of messages pending in the queue (0 == no pending messages)
*/
static inline int
ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
struct ti_queue_inst *qinst)
{
u32 val;
u32 status_cnt_mask = d->status_cnt_mask;
/*
* We cannot use relaxed operation here - update may happen
* real-time.
*/
val = readl(qinst->queue_state) & status_cnt_mask;
val >>= __ffs(status_cnt_mask);
return val;
}
/**
* ti_msgmgr_queue_is_error() - Check to see if there is queue error
* @d: Description of message manager
* @qinst: Queue instance for which we check the number of pending messages
*
* Return: true if error, else false
*/
static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
struct ti_queue_inst *qinst)
{
u32 val;
/* Msgmgr has no error detection */
if (!d->is_sproxy)
return false;
/*
* We cannot use relaxed operation here - update may happen
* real-time.
*/
val = readl(qinst->queue_state) & d->status_err_mask;
return val ? true : false;
}
static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst,
const struct ti_msgmgr_desc *desc)
{
int num_words;
struct ti_msgmgr_message message;
void __iomem *data_reg;
u32 *word_data;
/*
* I have no idea about the protocol being used to communicate with the
* remote producer - 0 could be valid data, so I wont make a judgement
* of how many bytes I should be reading. Let the client figure this
* out.. I just read the full message and pass it on..
*/
message.len = desc->max_message_size;
message.buf = (u8 *)qinst->rx_buff;
/*
* NOTE about register access involved here:
* the hardware block is implemented with 32bit access operations and no
* support for data splitting. We don't want the hardware to misbehave
* with sub 32bit access - For example: if the last register read is
* split into byte wise access, it can result in the queue getting
* stuck or indeterminate behavior. An out of order read operation may
* result in weird data results as well.
* Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
* we depend on readl for the purpose.
*
* Also note that the final register read automatically marks the
* queue message as read.
*/
for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
num_words = (desc->max_message_size / sizeof(u32));
num_words; num_words--, data_reg += sizeof(u32), word_data++)
*word_data = readl(data_reg);
/*
* Last register read automatically clears the IRQ if only 1 message
* is pending - so send the data up the stack..
* NOTE: Client is expected to be as optimal as possible, since
* we invoke the handler in IRQ context.
*/
mbox_chan_received_data(chan, (void *)&message);
return 0;
}
static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us)
{
struct device *dev = chan->mbox->dev;
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
struct ti_queue_inst *qinst = chan->con_priv;
const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
int ret;
ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count,
(msg_count & desc->status_cnt_mask),
10, timeout_us);
if (ret != 0)
return ret;
ti_msgmgr_queue_rx_data(chan, qinst, desc);
return 0;
}
/**
* ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
* @irq: Interrupt number
* @p: Channel Pointer
*
* Return: -EINVAL if there is no instance
* IRQ_NONE if the interrupt is not ours.
* IRQ_HANDLED if the rx interrupt was successfully handled.
*/
static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = p;
struct device *dev = chan->mbox->dev;
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
struct ti_queue_inst *qinst = chan->con_priv;
const struct ti_msgmgr_desc *desc;
int msg_count;
if (WARN_ON(!inst)) {
dev_err(dev, "no platform drv data??\n");
return -EINVAL;
}
/* Do I have an invalid interrupt source? */
if (qinst->is_tx) {
dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
qinst->name);
return IRQ_NONE;
}
desc = inst->desc;
if (ti_msgmgr_queue_is_error(desc, qinst)) {
dev_err(dev, "Error on Rx channel %s\n", qinst->name);
return IRQ_NONE;
}
/* Do I actually have messages to read? */
msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
if (!msg_count) {
/* Shared IRQ? */
dev_dbg(dev, "Spurious event - 0 pending data!\n");
return IRQ_NONE;
}
ti_msgmgr_queue_rx_data(chan, qinst, desc);
return IRQ_HANDLED;
}
/**
* ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
* @chan: Channel Pointer
*
* Return: 'true' if there is pending rx data, 'false' if there is none.
*/
static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
struct device *dev = chan->mbox->dev;
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
if (qinst->is_tx)
return false;
if (ti_msgmgr_queue_is_error(desc, qinst)) {
dev_err(dev, "Error on channel %s\n", qinst->name);
return false;
}
msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
return msg_count ? true : false;
}
/**
* ti_msgmgr_last_tx_done() - See if all the tx messages are sent
* @chan: Channel pointer
*
* Return: 'true' is no pending tx data, 'false' if there are any.
*/
static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
struct device *dev = chan->mbox->dev;
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
if (!qinst->is_tx)
return false;
if (ti_msgmgr_queue_is_error(desc, qinst)) {
dev_err(dev, "Error on channel %s\n", qinst->name);
return false;
}
msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
if (desc->is_sproxy) {
/* In secure proxy, msg_count indicates how many we can send */
return msg_count ? true : false;
}
/* if we have any messages pending.. */
return msg_count ? false : true;
}
static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst;
if (!chan)
return false;
qinst = chan->con_priv;
return qinst->polled_rx_mode;
}
/**
* ti_msgmgr_send_data() - Send data
* @chan: Channel Pointer
* @data: ti_msgmgr_message * Message Pointer
*
* Return: 0 if all goes good, else appropriate error messages.
*/
static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
{
struct device *dev = chan->mbox->dev;
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
const struct ti_msgmgr_desc *desc;
struct ti_queue_inst *qinst = chan->con_priv;
int num_words, trail_bytes;
struct ti_msgmgr_message *message = data;
void __iomem *data_reg;
u32 *word_data;
int ret = 0;
if (WARN_ON(!inst)) {
dev_err(dev, "no platform drv data??\n");
return -EINVAL;
}
desc = inst->desc;
if (ti_msgmgr_queue_is_error(desc, qinst)) {
dev_err(dev, "Error on channel %s\n", qinst->name);
return false;
}
if (desc->max_message_size < message->len) {
dev_err(dev, "Queue %s message length %zu > max %d\n",
qinst->name, message->len, desc->max_message_size);
return -EINVAL;
}
/* NOTE: Constraints similar to rx path exists here as well */
for (data_reg = qinst->queue_buff_start,
num_words = message->len / sizeof(u32),
word_data = (u32 *)message->buf;
num_words; num_words--, data_reg += sizeof(u32), word_data++)
writel(*word_data, data_reg);
trail_bytes = message->len % sizeof(u32);
if (trail_bytes) {
u32 data_trail = *word_data;
/* Ensure all unused data is 0 */
data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
writel(data_trail, data_reg);
data_reg += sizeof(u32);
}
/*
* 'data_reg' indicates next register to write. If we did not already
* write on tx complete reg(last reg), we must do so for transmit
* In addition, we also need to make sure all intermediate data
* registers(if any required), are reset to 0 for TISCI backward
* compatibility to be maintained.
*/
while (data_reg <= qinst->queue_buff_end) {
writel(0, data_reg);
data_reg += sizeof(u32);
}
/* If we are in polled mode, wait for a response before proceeding */
if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx,
message->timeout_rx_ms * 1000);
return ret;
}
/**
* ti_msgmgr_queue_rx_irq_req() - RX IRQ request
* @dev: device pointer
* @d: descriptor for ti_msgmgr
* @qinst: Queue instance
* @chan: Channel pointer
*/
static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
const struct ti_msgmgr_desc *d,
struct ti_queue_inst *qinst,
struct mbox_chan *chan)
{
int ret = 0;
char of_rx_irq_name[7];
struct device_node *np;
snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
"rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
/* Get the IRQ if not found */
if (qinst->irq < 0) {
np = of_node_get(dev->of_node);
if (!np)
return -ENODATA;
qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
of_node_put(np);
if (qinst->irq < 0) {
dev_err(dev,
"QID %d PID %d:No IRQ[%s]: %d\n",
qinst->queue_id, qinst->proxy_id,
of_rx_irq_name, qinst->irq);
return qinst->irq;
}
}
/* With the expectation that the IRQ might be shared in SoC */
ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
IRQF_SHARED, qinst->name, chan);
if (ret) {
dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
qinst->irq, qinst->name, ret);
}
return ret;
}
/**
* ti_msgmgr_queue_startup() - Startup queue
* @chan: Channel pointer
*
* Return: 0 if all goes good, else return corresponding error message
*/
static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
{
struct device *dev = chan->mbox->dev;
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
struct ti_queue_inst *qinst = chan->con_priv;
const struct ti_msgmgr_desc *d = inst->desc;
int ret;
int msg_count;
/*
* If sproxy is starting and can send messages, we are a Tx thread,
* else Rx
*/
if (d->is_sproxy) {
qinst->is_tx = (readl(qinst->queue_ctrl) &
SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
if (!msg_count && qinst->is_tx) {
dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
qinst->name);
return -EINVAL;
}
}
if (!qinst->is_tx) {
/* Allocate usage buffer for rx */
qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
if (!qinst->rx_buff)
return -ENOMEM;
/* Request IRQ */
ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
if (ret) {
kfree(qinst->rx_buff);
return ret;
}
}
return 0;
}
/**
* ti_msgmgr_queue_shutdown() - Shutdown the queue
* @chan: Channel pointer
*/
static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
if (!qinst->is_tx) {
free_irq(qinst->irq, chan);
kfree(qinst->rx_buff);
}
}
/**
* ti_msgmgr_of_xlate() - Translation of phandle to queue
* @mbox: Mailbox controller
* @p: phandle pointer
*
* Return: Mailbox channel corresponding to the queue, else return error
* pointer.
*/
static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *p)
{
struct ti_msgmgr_inst *inst;
int req_qid, req_pid;
struct ti_queue_inst *qinst;
const struct ti_msgmgr_desc *d;
int i, ncells;
inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
if (WARN_ON(!inst))
return ERR_PTR(-EINVAL);
d = inst->desc;
if (d->is_sproxy)
ncells = 1;
else
ncells = 2;
if (p->args_count != ncells) {
dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
p->args_count, ncells);
return ERR_PTR(-EINVAL);
}
if (ncells == 1) {
req_qid = 0;
req_pid = p->args[0];
} else {
req_qid = p->args[0];
req_pid = p->args[1];
}
if (d->is_sproxy) {
if (req_pid >= d->num_valid_queues)
goto err;
qinst = &inst->qinsts[req_pid];
return qinst->chan;
}
for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
i++, qinst++) {
if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
return qinst->chan;
}
err:
dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n",
req_qid, req_pid, p->np);
return ERR_PTR(-ENOENT);
}
/**
* ti_msgmgr_queue_setup() - Setup data structures for each queue instance
* @idx: index of the queue
* @dev: pointer to the message manager device
* @np: pointer to the of node
* @inst: Queue instance pointer
* @d: Message Manager instance description data
* @qd: Queue description data
* @qinst: Queue instance pointer
* @chan: pointer to mailbox channel
*
* Return: 0 if all went well, else return corresponding error
*/
static int ti_msgmgr_queue_setup(int idx, struct device *dev,
struct device_node *np,
struct ti_msgmgr_inst *inst,
const struct ti_msgmgr_desc *d,
const struct ti_msgmgr_valid_queue_desc *qd,
struct ti_queue_inst *qinst,
struct mbox_chan *chan)
{
char *dir;
qinst->proxy_id = qd->proxy_id;
qinst->queue_id = qd->queue_id;
if (qinst->queue_id > d->queue_count) {
dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
idx, qinst->queue_id, d->queue_count);
return -ERANGE;
}
if (d->is_sproxy) {
qinst->queue_buff_start = inst->queue_proxy_region +
SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
d->data_first_reg);
qinst->queue_buff_end = inst->queue_proxy_region +
SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
d->data_last_reg);
qinst->queue_state = inst->queue_state_debug_region +
SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
qinst->queue_ctrl = inst->queue_ctrl_region +
SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
/* XXX: DONOT read registers here!.. Some may be unusable */
dir = "thr";
snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
dev_name(dev), dir, qinst->proxy_id);
} else {
qinst->queue_buff_start = inst->queue_proxy_region +
Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
d->data_first_reg);
qinst->queue_buff_end = inst->queue_proxy_region +
Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
d->data_last_reg);
qinst->queue_state =
inst->queue_state_debug_region +
Q_STATE_OFFSET(qinst->queue_id);
qinst->is_tx = qd->is_tx;
dir = qinst->is_tx ? "tx" : "rx";
snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
}
qinst->chan = chan;
/* Setup an error value for IRQ - Lazy allocation */
qinst->irq = -EINVAL;
chan->con_priv = qinst;
dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
qinst->queue_buff_start, qinst->queue_buff_end);
return 0;
}
static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable)
{
if (enable) {
disable_irq(qinst->irq);
qinst->polled_rx_mode = true;
} else {
enable_irq(qinst->irq);
qinst->polled_rx_mode = false;
}
return 0;
}
static int ti_msgmgr_suspend(struct device *dev)
{
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
struct ti_queue_inst *qinst;
int i;
/*
* We must switch operation to polled mode now as drivers and the genpd
* layer may make late TI SCI calls to change clock and device states
* from the noirq phase of suspend.
*/
for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
if (!qinst->is_tx)
ti_msgmgr_queue_rx_set_polled_mode(qinst, true);
}
return 0;
}
static int ti_msgmgr_resume(struct device *dev)
{
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
struct ti_queue_inst *qinst;
int i;
for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
if (!qinst->is_tx)
ti_msgmgr_queue_rx_set_polled_mode(qinst, false);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume);
/* Queue operations */
static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
.startup = ti_msgmgr_queue_startup,
.shutdown = ti_msgmgr_queue_shutdown,
.peek_data = ti_msgmgr_queue_peek_data,
.last_tx_done = ti_msgmgr_last_tx_done,
.send_data = ti_msgmgr_send_data,
};
/* Keystone K2G SoC integration details */
static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
{.queue_id = 0, .proxy_id = 0, .is_tx = true,},
{.queue_id = 1, .proxy_id = 0, .is_tx = true,},
{.queue_id = 2, .proxy_id = 0, .is_tx = true,},
{.queue_id = 3, .proxy_id = 0, .is_tx = true,},
{.queue_id = 5, .proxy_id = 2, .is_tx = false,},
{.queue_id = 56, .proxy_id = 1, .is_tx = true,},
{.queue_id = 57, .proxy_id = 2, .is_tx = false,},
{.queue_id = 58, .proxy_id = 3, .is_tx = true,},
{.queue_id = 59, .proxy_id = 4, .is_tx = true,},
{.queue_id = 60, .proxy_id = 5, .is_tx = true,},
{.queue_id = 61, .proxy_id = 6, .is_tx = true,},
};
static const struct ti_msgmgr_desc k2g_desc = {
.queue_count = 64,
.max_message_size = 64,
.max_messages = 128,
.data_region_name = "queue_proxy_region",
.status_region_name = "queue_state_debug_region",
.data_first_reg = 16,
.data_last_reg = 31,
.status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
.tx_polled = false,
.valid_queues = k2g_valid_queues,
.num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
.is_sproxy = false,
};
static const struct ti_msgmgr_desc am654_desc = {
.queue_count = 190,
.num_valid_queues = 190,
.max_message_size = 60,
.data_region_name = "target_data",
.status_region_name = "rt",
.ctrl_region_name = "scfg",
.data_first_reg = 0,
.data_last_reg = 14,
.status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
.tx_polled = false,
.is_sproxy = true,
};
static const struct of_device_id ti_msgmgr_of_match[] = {
{.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
{.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
static int ti_msgmgr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct device_node *np;
const struct ti_msgmgr_desc *desc;
struct ti_msgmgr_inst *inst;
struct ti_queue_inst *qinst;
struct mbox_controller *mbox;
struct mbox_chan *chans;
int queue_count;
int i;
int ret = -EINVAL;
const struct ti_msgmgr_valid_queue_desc *queue_desc;
if (!dev->of_node) {
dev_err(dev, "no OF information\n");
return -EINVAL;
}
np = dev->of_node;
of_id = of_match_device(ti_msgmgr_of_match, dev);
if (!of_id) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
desc = of_id->data;
inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
inst->dev = dev;
inst->desc = desc;
inst->queue_proxy_region =
devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
if (IS_ERR(inst->queue_proxy_region))
return PTR_ERR(inst->queue_proxy_region);
inst->queue_state_debug_region =
devm_platform_ioremap_resource_byname(pdev, desc->status_region_name);
if (IS_ERR(inst->queue_state_debug_region))
return PTR_ERR(inst->queue_state_debug_region);
if (desc->is_sproxy) {
inst->queue_ctrl_region =
devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name);
if (IS_ERR(inst->queue_ctrl_region))
return PTR_ERR(inst->queue_ctrl_region);
}
dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
inst->queue_proxy_region, inst->queue_state_debug_region);
queue_count = desc->num_valid_queues;
if (!queue_count || queue_count > desc->queue_count) {
dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
queue_count, desc->queue_count);
return -ERANGE;
}
inst->num_valid_queues = queue_count;
qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL);
if (!qinst)
return -ENOMEM;
inst->qinsts = qinst;
chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
inst->chans = chans;
if (desc->is_sproxy) {
struct ti_msgmgr_valid_queue_desc sproxy_desc;
/* All proxies may be valid in Secure Proxy instance */
for (i = 0; i < queue_count; i++, qinst++, chans++) {
sproxy_desc.queue_id = 0;
sproxy_desc.proxy_id = i;
ret = ti_msgmgr_queue_setup(i, dev, np, inst,
desc, &sproxy_desc, qinst,
chans);
if (ret)
return ret;
}
} else {
/* Only Some proxies are valid in Message Manager */
for (i = 0, queue_desc = desc->valid_queues;
i < queue_count; i++, qinst++, chans++, queue_desc++) {
ret = ti_msgmgr_queue_setup(i, dev, np, inst,
desc, queue_desc, qinst,
chans);
if (ret)
return ret;
}
}
mbox = &inst->mbox;
mbox->dev = dev;
mbox->ops = &ti_msgmgr_chan_ops;
mbox->chans = inst->chans;
mbox->num_chans = inst->num_valid_queues;
mbox->txdone_irq = false;
mbox->txdone_poll = desc->tx_polled;
if (desc->tx_polled)
mbox->txpoll_period = desc->tx_poll_timeout_ms;
mbox->of_xlate = ti_msgmgr_of_xlate;
platform_set_drvdata(pdev, inst);
ret = devm_mbox_controller_register(dev, mbox);
if (ret)
dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
return ret;
}
static struct platform_driver ti_msgmgr_driver = {
.probe = ti_msgmgr_probe,
.driver = {
.name = "ti-msgmgr",
.of_match_table = of_match_ptr(ti_msgmgr_of_match),
.pm = &ti_msgmgr_pm_ops,
},
};
module_platform_driver(ti_msgmgr_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI message manager driver");
MODULE_AUTHOR("Nishanth Menon");
MODULE_ALIAS("platform:ti-msgmgr");
|
linux-master
|
drivers/mailbox/ti-msgmgr.c
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Apple mailbox driver
*
* Copyright (C) 2021 The Asahi Linux Contributors
*
* This driver adds support for two mailbox variants (called ASC and M3 by
* Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
* exchange 64+32 bit messages between the main CPU and a co-processor.
* Various coprocessors implement different IPC protocols based on these simple
* messages and shared memory buffers.
*
* Both the main CPU and the co-processor see the same set of registers but
* the first FIFO (A2I) is always used to transfer messages from the application
* processor (us) to the I/O processor and the second one (I2A) for the
* other direction.
*/
#include <linux/apple-mailbox.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
#define APPLE_ASC_MBOX_A2I_SEND0 0x800
#define APPLE_ASC_MBOX_A2I_SEND1 0x808
#define APPLE_ASC_MBOX_A2I_RECV0 0x810
#define APPLE_ASC_MBOX_A2I_RECV1 0x818
#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
#define APPLE_ASC_MBOX_I2A_SEND0 0x820
#define APPLE_ASC_MBOX_I2A_SEND1 0x828
#define APPLE_ASC_MBOX_I2A_RECV0 0x830
#define APPLE_ASC_MBOX_I2A_RECV1 0x838
#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
#define APPLE_M3_MBOX_A2I_CONTROL 0x50
#define APPLE_M3_MBOX_A2I_SEND0 0x60
#define APPLE_M3_MBOX_A2I_SEND1 0x68
#define APPLE_M3_MBOX_A2I_RECV0 0x70
#define APPLE_M3_MBOX_A2I_RECV1 0x78
#define APPLE_M3_MBOX_I2A_CONTROL 0x80
#define APPLE_M3_MBOX_I2A_SEND0 0x90
#define APPLE_M3_MBOX_I2A_SEND1 0x98
#define APPLE_M3_MBOX_I2A_RECV0 0xa0
#define APPLE_M3_MBOX_I2A_RECV1 0xa8
#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
#define APPLE_M3_MBOX_IRQ_ACK 0x4c
#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
struct apple_mbox_hw {
unsigned int control_full;
unsigned int control_empty;
unsigned int a2i_control;
unsigned int a2i_send0;
unsigned int a2i_send1;
unsigned int i2a_control;
unsigned int i2a_recv0;
unsigned int i2a_recv1;
bool has_irq_controls;
unsigned int irq_enable;
unsigned int irq_ack;
unsigned int irq_bit_recv_not_empty;
unsigned int irq_bit_send_empty;
};
struct apple_mbox {
void __iomem *regs;
const struct apple_mbox_hw *hw;
int irq_recv_not_empty;
int irq_send_empty;
struct mbox_chan chan;
struct device *dev;
struct mbox_controller controller;
spinlock_t rx_lock;
};
static const struct of_device_id apple_mbox_of_match[];
static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
{
u32 mbox_ctrl =
readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
return !(mbox_ctrl & apple_mbox->hw->control_full);
}
static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox)
{
u32 mbox_ctrl =
readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
return mbox_ctrl & apple_mbox->hw->control_empty;
}
static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
struct apple_mbox_msg *msg)
{
if (!apple_mbox_hw_can_send(apple_mbox))
return -EBUSY;
dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
apple_mbox->regs + apple_mbox->hw->a2i_send1);
return 0;
}
static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
{
u32 mbox_ctrl =
readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
return !(mbox_ctrl & apple_mbox->hw->control_empty);
}
static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
struct apple_mbox_msg *msg)
{
if (!apple_mbox_hw_can_recv(apple_mbox))
return -ENOMSG;
msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
msg->msg1 = FIELD_GET(
APPLE_MBOX_MSG1_MSG,
readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
return 0;
}
static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
{
struct apple_mbox *apple_mbox = chan->con_priv;
struct apple_mbox_msg *msg = data;
int ret;
ret = apple_mbox_hw_send(apple_mbox, msg);
if (ret)
return ret;
/*
* The interrupt is level triggered and will keep firing as long as the
* FIFO is empty. It will also keep firing if the FIFO was empty
* at any point in the past until it has been acknowledged at the
* mailbox level. By acknowledging it here we can ensure that we will
* only get the interrupt once the FIFO has been cleared again.
* If the FIFO is already empty before the ack it will fire again
* immediately after the ack.
*/
if (apple_mbox->hw->has_irq_controls) {
writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
apple_mbox->regs + apple_mbox->hw->irq_ack);
}
enable_irq(apple_mbox->irq_send_empty);
return 0;
}
static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
{
struct apple_mbox *apple_mbox = data;
/*
* We don't need to acknowledge the interrupt at the mailbox level
* here even if supported by the hardware. It will keep firing but that
* doesn't matter since it's disabled at the main interrupt controller.
* apple_mbox_chan_send_data will acknowledge it before enabling
* it at the main controller again.
*/
disable_irq_nosync(apple_mbox->irq_send_empty);
mbox_chan_txdone(&apple_mbox->chan, 0);
return IRQ_HANDLED;
}
static int apple_mbox_poll(struct apple_mbox *apple_mbox)
{
struct apple_mbox_msg msg;
int ret = 0;
while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) {
mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
ret++;
}
/*
* The interrupt will keep firing even if there are no more messages
* unless we also acknowledge it at the mailbox level here.
* There's no race if a message comes in between the check in the while
* loop above and the ack below: If a new messages arrives inbetween
* those two the interrupt will just fire again immediately after the
* ack since it's level triggered.
*/
if (apple_mbox->hw->has_irq_controls) {
writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
apple_mbox->regs + apple_mbox->hw->irq_ack);
}
return ret;
}
static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
{
struct apple_mbox *apple_mbox = data;
spin_lock(&apple_mbox->rx_lock);
apple_mbox_poll(apple_mbox);
spin_unlock(&apple_mbox->rx_lock);
return IRQ_HANDLED;
}
static bool apple_mbox_chan_peek_data(struct mbox_chan *chan)
{
struct apple_mbox *apple_mbox = chan->con_priv;
unsigned long flags;
int ret;
spin_lock_irqsave(&apple_mbox->rx_lock, flags);
ret = apple_mbox_poll(apple_mbox);
spin_unlock_irqrestore(&apple_mbox->rx_lock, flags);
return ret > 0;
}
static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct apple_mbox *apple_mbox = chan->con_priv;
unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, deadline)) {
if (apple_mbox_hw_send_empty(apple_mbox)) {
mbox_chan_txdone(&apple_mbox->chan, 0);
return 0;
}
udelay(1);
}
return -ETIME;
}
static int apple_mbox_chan_startup(struct mbox_chan *chan)
{
struct apple_mbox *apple_mbox = chan->con_priv;
/*
* Only some variants of this mailbox HW provide interrupt control
* at the mailbox level. We therefore need to handle enabling/disabling
* interrupts at the main interrupt controller anyway for hardware that
* doesn't. Just always keep the interrupts we care about enabled at
* the mailbox level so that both hardware revisions behave almost
* the same.
*/
if (apple_mbox->hw->has_irq_controls) {
writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
apple_mbox->hw->irq_bit_send_empty,
apple_mbox->regs + apple_mbox->hw->irq_enable);
}
enable_irq(apple_mbox->irq_recv_not_empty);
return 0;
}
static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
{
struct apple_mbox *apple_mbox = chan->con_priv;
disable_irq(apple_mbox->irq_recv_not_empty);
}
static const struct mbox_chan_ops apple_mbox_ops = {
.send_data = apple_mbox_chan_send_data,
.peek_data = apple_mbox_chan_peek_data,
.flush = apple_mbox_chan_flush,
.startup = apple_mbox_chan_startup,
.shutdown = apple_mbox_chan_shutdown,
};
static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *args)
{
if (args->args_count != 0)
return ERR_PTR(-EINVAL);
return &mbox->chans[0];
}
static int apple_mbox_probe(struct platform_device *pdev)
{
int ret;
const struct of_device_id *match;
char *irqname;
struct apple_mbox *mbox;
struct device *dev = &pdev->dev;
match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
if (!match)
return -EINVAL;
if (!match->data)
return -EINVAL;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
platform_set_drvdata(pdev, mbox);
mbox->dev = dev;
mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs))
return PTR_ERR(mbox->regs);
mbox->hw = match->data;
mbox->irq_recv_not_empty =
platform_get_irq_byname(pdev, "recv-not-empty");
if (mbox->irq_recv_not_empty < 0)
return -ENODEV;
mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
if (mbox->irq_send_empty < 0)
return -ENODEV;
mbox->controller.dev = mbox->dev;
mbox->controller.num_chans = 1;
mbox->controller.chans = &mbox->chan;
mbox->controller.ops = &apple_mbox_ops;
mbox->controller.txdone_irq = true;
mbox->controller.of_xlate = apple_mbox_of_xlate;
mbox->chan.con_priv = mbox;
spin_lock_init(&mbox->rx_lock);
irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
if (!irqname)
return -ENOMEM;
ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
apple_mbox_recv_irq,
IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
mbox);
if (ret)
return ret;
irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
if (!irqname)
return -ENOMEM;
ret = devm_request_irq(dev, mbox->irq_send_empty,
apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
irqname, mbox);
if (ret)
return ret;
return devm_mbox_controller_register(dev, &mbox->controller);
}
static const struct apple_mbox_hw apple_mbox_asc_hw = {
.control_full = APPLE_ASC_MBOX_CONTROL_FULL,
.control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
.a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
.a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
.a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
.i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
.i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
.i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
.has_irq_controls = false,
};
static const struct apple_mbox_hw apple_mbox_m3_hw = {
.control_full = APPLE_M3_MBOX_CONTROL_FULL,
.control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
.a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
.a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
.a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
.i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
.i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
.i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
.has_irq_controls = true,
.irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
.irq_ack = APPLE_M3_MBOX_IRQ_ACK,
.irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
.irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
};
static const struct of_device_id apple_mbox_of_match[] = {
{ .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
{ .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
{}
};
MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
static struct platform_driver apple_mbox_driver = {
.driver = {
.name = "apple-mailbox",
.of_match_table = apple_mbox_of_match,
},
.probe = apple_mbox_probe,
};
module_platform_driver(apple_mbox_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Sven Peter <[email protected]>");
MODULE_DESCRIPTION("Apple Mailbox driver");
|
linux-master
|
drivers/mailbox/apple-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <soc/tegra/fuse.h>
#include <dt-bindings/mailbox/tegra186-hsp.h>
#include "mailbox.h"
#define HSP_INT_IE(x) (0x100 + ((x) * 4))
#define HSP_INT_IV 0x300
#define HSP_INT_IR 0x304
#define HSP_INT_EMPTY_SHIFT 0
#define HSP_INT_EMPTY_MASK 0xff
#define HSP_INT_FULL_SHIFT 8
#define HSP_INT_FULL_MASK 0xff
#define HSP_INT_DIMENSIONING 0x380
#define HSP_nSM_SHIFT 0
#define HSP_nSS_SHIFT 4
#define HSP_nAS_SHIFT 8
#define HSP_nDB_SHIFT 12
#define HSP_nSI_SHIFT 16
#define HSP_nINT_MASK 0xf
#define HSP_DB_TRIGGER 0x0
#define HSP_DB_ENABLE 0x4
#define HSP_DB_RAW 0x8
#define HSP_DB_PENDING 0xc
#define HSP_SM_SHRD_MBOX 0x0
#define HSP_SM_SHRD_MBOX_FULL BIT(31)
#define HSP_SM_SHRD_MBOX_FULL_INT_IE 0x04
#define HSP_SM_SHRD_MBOX_EMPTY_INT_IE 0x08
#define HSP_SHRD_MBOX_TYPE1_TAG 0x40
#define HSP_SHRD_MBOX_TYPE1_DATA0 0x48
#define HSP_SHRD_MBOX_TYPE1_DATA1 0x4c
#define HSP_SHRD_MBOX_TYPE1_DATA2 0x50
#define HSP_SHRD_MBOX_TYPE1_DATA3 0x54
#define HSP_DB_CCPLEX 1
#define HSP_DB_BPMP 3
#define HSP_DB_MAX 7
#define HSP_MBOX_TYPE_MASK 0xff
struct tegra_hsp_channel;
struct tegra_hsp;
struct tegra_hsp_channel {
struct tegra_hsp *hsp;
struct mbox_chan *chan;
void __iomem *regs;
};
struct tegra_hsp_doorbell {
struct tegra_hsp_channel channel;
struct list_head list;
const char *name;
unsigned int master;
unsigned int index;
};
struct tegra_hsp_sm_ops {
void (*send)(struct tegra_hsp_channel *channel, void *data);
void (*recv)(struct tegra_hsp_channel *channel);
};
struct tegra_hsp_mailbox {
struct tegra_hsp_channel channel;
const struct tegra_hsp_sm_ops *ops;
unsigned int index;
bool producer;
};
struct tegra_hsp_db_map {
const char *name;
unsigned int master;
unsigned int index;
};
struct tegra_hsp_soc {
const struct tegra_hsp_db_map *map;
bool has_per_mb_ie;
bool has_128_bit_mb;
unsigned int reg_stride;
};
struct tegra_hsp {
struct device *dev;
const struct tegra_hsp_soc *soc;
struct mbox_controller mbox_db;
struct mbox_controller mbox_sm;
void __iomem *regs;
unsigned int doorbell_irq;
unsigned int *shared_irqs;
unsigned int shared_irq;
unsigned int num_sm;
unsigned int num_as;
unsigned int num_ss;
unsigned int num_db;
unsigned int num_si;
spinlock_t lock;
struct lock_class_key lock_key;
struct list_head doorbells;
struct tegra_hsp_mailbox *mailboxes;
unsigned long mask;
};
static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
{
return readl(hsp->regs + offset);
}
static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
unsigned int offset)
{
writel(value, hsp->regs + offset);
}
static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
unsigned int offset)
{
return readl(channel->regs + offset);
}
static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
u32 value, unsigned int offset)
{
writel(value, channel->regs + offset);
}
static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
{
u32 value;
value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
}
static struct tegra_hsp_doorbell *
__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
{
struct tegra_hsp_doorbell *entry;
list_for_each_entry(entry, &hsp->doorbells, list)
if (entry->master == master)
return entry;
return NULL;
}
static struct tegra_hsp_doorbell *
tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
{
struct tegra_hsp_doorbell *db;
unsigned long flags;
spin_lock_irqsave(&hsp->lock, flags);
db = __tegra_hsp_doorbell_get(hsp, master);
spin_unlock_irqrestore(&hsp->lock, flags);
return db;
}
static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
{
struct tegra_hsp *hsp = data;
struct tegra_hsp_doorbell *db;
unsigned long master, value;
db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
if (!db)
return IRQ_NONE;
value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
spin_lock(&hsp->lock);
for_each_set_bit(master, &value, hsp->mbox_db.num_chans) {
struct tegra_hsp_doorbell *db;
db = __tegra_hsp_doorbell_get(hsp, master);
/*
* Depending on the bootloader chain, the CCPLEX doorbell will
* have some doorbells enabled, which means that requesting an
* interrupt will immediately fire.
*
* In that case, db->channel.chan will still be NULL here and
* cause a crash if not properly guarded.
*
* It remains to be seen if ignoring the doorbell in that case
* is the correct solution.
*/
if (db && db->channel.chan)
mbox_chan_received_data(db->channel.chan, NULL);
}
spin_unlock(&hsp->lock);
return IRQ_HANDLED;
}
static irqreturn_t tegra_hsp_shared_irq(int irq, void *data)
{
struct tegra_hsp *hsp = data;
unsigned long bit, mask;
u32 status;
status = tegra_hsp_readl(hsp, HSP_INT_IR) & hsp->mask;
/* process EMPTY interrupts first */
mask = (status >> HSP_INT_EMPTY_SHIFT) & HSP_INT_EMPTY_MASK;
for_each_set_bit(bit, &mask, hsp->num_sm) {
struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
if (mb->producer) {
/*
* Disable EMPTY interrupts until data is sent with
* the next message. These interrupts are level-
* triggered, so if we kept them enabled they would
* constantly trigger until we next write data into
* the message.
*/
spin_lock(&hsp->lock);
hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
tegra_hsp_writel(hsp, hsp->mask,
HSP_INT_IE(hsp->shared_irq));
spin_unlock(&hsp->lock);
mbox_chan_txdone(mb->channel.chan, 0);
}
}
/* process FULL interrupts */
mask = (status >> HSP_INT_FULL_SHIFT) & HSP_INT_FULL_MASK;
for_each_set_bit(bit, &mask, hsp->num_sm) {
struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
if (!mb->producer)
mb->ops->recv(&mb->channel);
}
return IRQ_HANDLED;
}
static struct tegra_hsp_channel *
tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
unsigned int master, unsigned int index)
{
struct tegra_hsp_doorbell *db;
unsigned int offset;
unsigned long flags;
db = devm_kzalloc(hsp->dev, sizeof(*db), GFP_KERNEL);
if (!db)
return ERR_PTR(-ENOMEM);
offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K;
offset += index * hsp->soc->reg_stride;
db->channel.regs = hsp->regs + offset;
db->channel.hsp = hsp;
db->name = devm_kstrdup_const(hsp->dev, name, GFP_KERNEL);
db->master = master;
db->index = index;
spin_lock_irqsave(&hsp->lock, flags);
list_add_tail(&db->list, &hsp->doorbells);
spin_unlock_irqrestore(&hsp->lock, flags);
return &db->channel;
}
static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
{
struct tegra_hsp_doorbell *db = chan->con_priv;
tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
return 0;
}
static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
{
struct tegra_hsp_doorbell *db = chan->con_priv;
struct tegra_hsp *hsp = db->channel.hsp;
struct tegra_hsp_doorbell *ccplex;
unsigned long flags;
u32 value;
if (db->master >= chan->mbox->num_chans) {
dev_err(chan->mbox->dev,
"invalid master ID %u for HSP channel\n",
db->master);
return -EINVAL;
}
ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
if (!ccplex)
return -ENODEV;
/*
* On simulation platforms the BPMP hasn't had a chance yet to mark
* the doorbell as ringable by the CCPLEX, so we want to skip extra
* checks here.
*/
if (tegra_is_silicon() && !tegra_hsp_doorbell_can_ring(db))
return -ENODEV;
spin_lock_irqsave(&hsp->lock, flags);
value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
value |= BIT(db->master);
tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
spin_unlock_irqrestore(&hsp->lock, flags);
return 0;
}
static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
{
struct tegra_hsp_doorbell *db = chan->con_priv;
struct tegra_hsp *hsp = db->channel.hsp;
struct tegra_hsp_doorbell *ccplex;
unsigned long flags;
u32 value;
ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
if (!ccplex)
return;
spin_lock_irqsave(&hsp->lock, flags);
value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
value &= ~BIT(db->master);
tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
spin_unlock_irqrestore(&hsp->lock, flags);
}
static const struct mbox_chan_ops tegra_hsp_db_ops = {
.send_data = tegra_hsp_doorbell_send_data,
.startup = tegra_hsp_doorbell_startup,
.shutdown = tegra_hsp_doorbell_shutdown,
};
static void tegra_hsp_sm_send32(struct tegra_hsp_channel *channel, void *data)
{
u32 value;
/* copy data and mark mailbox full */
value = (u32)(unsigned long)data;
value |= HSP_SM_SHRD_MBOX_FULL;
tegra_hsp_channel_writel(channel, value, HSP_SM_SHRD_MBOX);
}
static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
{
u32 value;
void *msg;
value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
value &= ~HSP_SM_SHRD_MBOX_FULL;
msg = (void *)(unsigned long)value;
mbox_chan_received_data(channel->chan, msg);
/*
* Need to clear all bits here since some producers, such as TCU, depend
* on fields in the register getting cleared by the consumer.
*
* The mailbox API doesn't give the consumers a way of doing that
* explicitly, so we have to make sure we cover all possible cases.
*/
tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
.send = tegra_hsp_sm_send32,
.recv = tegra_hsp_sm_recv32,
};
static void tegra_hsp_sm_send128(struct tegra_hsp_channel *channel, void *data)
{
u32 value[4];
memcpy(value, data, sizeof(value));
/* Copy data */
tegra_hsp_channel_writel(channel, value[0], HSP_SHRD_MBOX_TYPE1_DATA0);
tegra_hsp_channel_writel(channel, value[1], HSP_SHRD_MBOX_TYPE1_DATA1);
tegra_hsp_channel_writel(channel, value[2], HSP_SHRD_MBOX_TYPE1_DATA2);
tegra_hsp_channel_writel(channel, value[3], HSP_SHRD_MBOX_TYPE1_DATA3);
/* Update tag to mark mailbox full */
tegra_hsp_channel_writel(channel, HSP_SM_SHRD_MBOX_FULL,
HSP_SHRD_MBOX_TYPE1_TAG);
}
static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
{
u32 value[4];
void *msg;
value[0] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA0);
value[1] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA1);
value[2] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA2);
value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
msg = (void *)(unsigned long)value;
mbox_chan_received_data(channel->chan, msg);
/*
* Clear data registers and tag.
*/
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA0);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA1);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
.send = tegra_hsp_sm_send128,
.recv = tegra_hsp_sm_recv128,
};
static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data)
{
struct tegra_hsp_mailbox *mb = chan->con_priv;
struct tegra_hsp *hsp = mb->channel.hsp;
unsigned long flags;
if (WARN_ON(!mb->producer))
return -EPERM;
mb->ops->send(&mb->channel, data);
/* enable EMPTY interrupt for the shared mailbox */
spin_lock_irqsave(&hsp->lock, flags);
hsp->mask |= BIT(HSP_INT_EMPTY_SHIFT + mb->index);
tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
spin_unlock_irqrestore(&hsp->lock, flags);
return 0;
}
static int tegra_hsp_mailbox_flush(struct mbox_chan *chan,
unsigned long timeout)
{
struct tegra_hsp_mailbox *mb = chan->con_priv;
struct tegra_hsp_channel *ch = &mb->channel;
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX);
if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) {
mbox_chan_txdone(chan, 0);
/* Wait until channel is empty */
if (chan->active_req != NULL)
continue;
return 0;
}
udelay(1);
}
return -ETIME;
}
static int tegra_hsp_mailbox_startup(struct mbox_chan *chan)
{
struct tegra_hsp_mailbox *mb = chan->con_priv;
struct tegra_hsp_channel *ch = &mb->channel;
struct tegra_hsp *hsp = mb->channel.hsp;
unsigned long flags;
chan->txdone_method = TXDONE_BY_IRQ;
/*
* Shared mailboxes start out as consumers by default. FULL and EMPTY
* interrupts are coalesced at the same shared interrupt.
*
* Keep EMPTY interrupts disabled at startup and only enable them when
* the mailbox is actually full. This is required because the FULL and
* EMPTY interrupts are level-triggered, so keeping EMPTY interrupts
* enabled all the time would cause an interrupt storm while mailboxes
* are idle.
*/
spin_lock_irqsave(&hsp->lock, flags);
if (mb->producer)
hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
else
hsp->mask |= BIT(HSP_INT_FULL_SHIFT + mb->index);
tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
spin_unlock_irqrestore(&hsp->lock, flags);
if (hsp->soc->has_per_mb_ie) {
if (mb->producer)
tegra_hsp_channel_writel(ch, 0x0,
HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
else
tegra_hsp_channel_writel(ch, 0x1,
HSP_SM_SHRD_MBOX_FULL_INT_IE);
}
return 0;
}
static void tegra_hsp_mailbox_shutdown(struct mbox_chan *chan)
{
struct tegra_hsp_mailbox *mb = chan->con_priv;
struct tegra_hsp_channel *ch = &mb->channel;
struct tegra_hsp *hsp = mb->channel.hsp;
unsigned long flags;
if (hsp->soc->has_per_mb_ie) {
if (mb->producer)
tegra_hsp_channel_writel(ch, 0x0,
HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
else
tegra_hsp_channel_writel(ch, 0x0,
HSP_SM_SHRD_MBOX_FULL_INT_IE);
}
spin_lock_irqsave(&hsp->lock, flags);
if (mb->producer)
hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
else
hsp->mask &= ~BIT(HSP_INT_FULL_SHIFT + mb->index);
tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
spin_unlock_irqrestore(&hsp->lock, flags);
}
static const struct mbox_chan_ops tegra_hsp_sm_ops = {
.send_data = tegra_hsp_mailbox_send_data,
.flush = tegra_hsp_mailbox_flush,
.startup = tegra_hsp_mailbox_startup,
.shutdown = tegra_hsp_mailbox_shutdown,
};
static struct mbox_chan *tegra_hsp_db_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *args)
{
struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_db);
unsigned int type = args->args[0], master = args->args[1];
struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
struct tegra_hsp_doorbell *db;
struct mbox_chan *chan;
unsigned long flags;
unsigned int i;
if (type != TEGRA_HSP_MBOX_TYPE_DB || !hsp->doorbell_irq)
return ERR_PTR(-ENODEV);
db = tegra_hsp_doorbell_get(hsp, master);
if (db)
channel = &db->channel;
if (IS_ERR(channel))
return ERR_CAST(channel);
spin_lock_irqsave(&hsp->lock, flags);
for (i = 0; i < mbox->num_chans; i++) {
chan = &mbox->chans[i];
if (!chan->con_priv) {
channel->chan = chan;
chan->con_priv = db;
break;
}
chan = NULL;
}
spin_unlock_irqrestore(&hsp->lock, flags);
return chan ?: ERR_PTR(-EBUSY);
}
static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *args)
{
struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_sm);
unsigned int type = args->args[0], index;
struct tegra_hsp_mailbox *mb;
index = args->args[1] & TEGRA_HSP_SM_MASK;
if ((type & HSP_MBOX_TYPE_MASK) != TEGRA_HSP_MBOX_TYPE_SM ||
!hsp->shared_irqs || index >= hsp->num_sm)
return ERR_PTR(-ENODEV);
mb = &hsp->mailboxes[index];
if (type & TEGRA_HSP_MBOX_TYPE_SM_128BIT) {
if (!hsp->soc->has_128_bit_mb)
return ERR_PTR(-ENODEV);
mb->ops = &tegra_hsp_sm_128bit_ops;
} else {
mb->ops = &tegra_hsp_sm_32bit_ops;
}
if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0)
mb->producer = false;
else
mb->producer = true;
return mb->channel.chan;
}
static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
{
const struct tegra_hsp_db_map *map = hsp->soc->map;
struct tegra_hsp_channel *channel;
while (map->name) {
channel = tegra_hsp_doorbell_create(hsp, map->name,
map->master, map->index);
if (IS_ERR(channel))
return PTR_ERR(channel);
map++;
}
return 0;
}
static int tegra_hsp_add_mailboxes(struct tegra_hsp *hsp, struct device *dev)
{
int i;
hsp->mailboxes = devm_kcalloc(dev, hsp->num_sm, sizeof(*hsp->mailboxes),
GFP_KERNEL);
if (!hsp->mailboxes)
return -ENOMEM;
for (i = 0; i < hsp->num_sm; i++) {
struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
mb->index = i;
mb->channel.hsp = hsp;
mb->channel.regs = hsp->regs + SZ_64K + i * SZ_32K;
mb->channel.chan = &hsp->mbox_sm.chans[i];
mb->channel.chan->con_priv = mb;
}
return 0;
}
static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
{
unsigned int i, irq = 0;
int err;
for (i = 0; i < hsp->num_si; i++) {
irq = hsp->shared_irqs[i];
if (irq <= 0)
continue;
err = devm_request_irq(hsp->dev, irq, tegra_hsp_shared_irq, 0,
dev_name(hsp->dev), hsp);
if (err < 0) {
dev_err(hsp->dev, "failed to request interrupt: %d\n",
err);
continue;
}
hsp->shared_irq = i;
/* disable all interrupts */
tegra_hsp_writel(hsp, 0, HSP_INT_IE(hsp->shared_irq));
dev_dbg(hsp->dev, "interrupt requested: %u\n", irq);
break;
}
if (i == hsp->num_si) {
dev_err(hsp->dev, "failed to find available interrupt\n");
return -ENOENT;
}
return 0;
}
static int tegra_hsp_probe(struct platform_device *pdev)
{
struct tegra_hsp *hsp;
unsigned int i;
u32 value;
int err;
hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
if (!hsp)
return -ENOMEM;
hsp->dev = &pdev->dev;
hsp->soc = of_device_get_match_data(&pdev->dev);
INIT_LIST_HEAD(&hsp->doorbells);
spin_lock_init(&hsp->lock);
hsp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsp->regs))
return PTR_ERR(hsp->regs);
value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
hsp->doorbell_irq = err;
if (hsp->num_si > 0) {
unsigned int count = 0;
hsp->shared_irqs = devm_kcalloc(&pdev->dev, hsp->num_si,
sizeof(*hsp->shared_irqs),
GFP_KERNEL);
if (!hsp->shared_irqs)
return -ENOMEM;
for (i = 0; i < hsp->num_si; i++) {
char *name;
name = kasprintf(GFP_KERNEL, "shared%u", i);
if (!name)
return -ENOMEM;
err = platform_get_irq_byname_optional(pdev, name);
if (err >= 0) {
hsp->shared_irqs[i] = err;
count++;
}
kfree(name);
}
if (count == 0) {
devm_kfree(&pdev->dev, hsp->shared_irqs);
hsp->shared_irqs = NULL;
}
}
/* setup the doorbell controller */
hsp->mbox_db.of_xlate = tegra_hsp_db_xlate;
hsp->mbox_db.num_chans = 32;
hsp->mbox_db.dev = &pdev->dev;
hsp->mbox_db.ops = &tegra_hsp_db_ops;
hsp->mbox_db.chans = devm_kcalloc(&pdev->dev, hsp->mbox_db.num_chans,
sizeof(*hsp->mbox_db.chans),
GFP_KERNEL);
if (!hsp->mbox_db.chans)
return -ENOMEM;
if (hsp->doorbell_irq) {
err = tegra_hsp_add_doorbells(hsp);
if (err < 0) {
dev_err(&pdev->dev, "failed to add doorbells: %d\n",
err);
return err;
}
}
err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_db);
if (err < 0) {
dev_err(&pdev->dev, "failed to register doorbell mailbox: %d\n",
err);
return err;
}
/* setup the shared mailbox controller */
hsp->mbox_sm.of_xlate = tegra_hsp_sm_xlate;
hsp->mbox_sm.num_chans = hsp->num_sm;
hsp->mbox_sm.dev = &pdev->dev;
hsp->mbox_sm.ops = &tegra_hsp_sm_ops;
hsp->mbox_sm.chans = devm_kcalloc(&pdev->dev, hsp->mbox_sm.num_chans,
sizeof(*hsp->mbox_sm.chans),
GFP_KERNEL);
if (!hsp->mbox_sm.chans)
return -ENOMEM;
if (hsp->shared_irqs) {
err = tegra_hsp_add_mailboxes(hsp, &pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to add mailboxes: %d\n",
err);
return err;
}
}
err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_sm);
if (err < 0) {
dev_err(&pdev->dev, "failed to register shared mailbox: %d\n",
err);
return err;
}
platform_set_drvdata(pdev, hsp);
if (hsp->doorbell_irq) {
err = devm_request_irq(&pdev->dev, hsp->doorbell_irq,
tegra_hsp_doorbell_irq, IRQF_NO_SUSPEND,
dev_name(&pdev->dev), hsp);
if (err < 0) {
dev_err(&pdev->dev,
"failed to request doorbell IRQ#%u: %d\n",
hsp->doorbell_irq, err);
return err;
}
}
if (hsp->shared_irqs) {
err = tegra_hsp_request_shared_irq(hsp);
if (err < 0)
return err;
}
lockdep_register_key(&hsp->lock_key);
lockdep_set_class(&hsp->lock, &hsp->lock_key);
return 0;
}
static int tegra_hsp_remove(struct platform_device *pdev)
{
struct tegra_hsp *hsp = platform_get_drvdata(pdev);
lockdep_unregister_key(&hsp->lock_key);
return 0;
}
static int __maybe_unused tegra_hsp_resume(struct device *dev)
{
struct tegra_hsp *hsp = dev_get_drvdata(dev);
unsigned int i;
struct tegra_hsp_doorbell *db;
list_for_each_entry(db, &hsp->doorbells, list) {
if (db->channel.chan)
tegra_hsp_doorbell_startup(db->channel.chan);
}
if (hsp->mailboxes) {
for (i = 0; i < hsp->num_sm; i++) {
struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
if (mb->channel.chan->cl)
tegra_hsp_mailbox_startup(mb->channel.chan);
}
}
return 0;
}
static const struct dev_pm_ops tegra_hsp_pm_ops = {
.resume_noirq = tegra_hsp_resume,
};
static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
{ "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
{ "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
{ /* sentinel */ }
};
static const struct tegra_hsp_soc tegra186_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = false,
.reg_stride = 0x100,
};
static const struct tegra_hsp_soc tegra194_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = true,
.has_128_bit_mb = false,
.reg_stride = 0x100,
};
static const struct tegra_hsp_soc tegra234_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = true,
.reg_stride = 0x100,
};
static const struct tegra_hsp_soc tegra264_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = true,
.reg_stride = 0x1000,
};
static const struct of_device_id tegra_hsp_match[] = {
{ .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
{ .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc },
{ .compatible = "nvidia,tegra234-hsp", .data = &tegra234_hsp_soc },
{ .compatible = "nvidia,tegra264-hsp", .data = &tegra264_hsp_soc },
{ }
};
static struct platform_driver tegra_hsp_driver = {
.driver = {
.name = "tegra-hsp",
.of_match_table = tegra_hsp_match,
.pm = &tegra_hsp_pm_ops,
},
.probe = tegra_hsp_probe,
.remove = tegra_hsp_remove,
};
static int __init tegra_hsp_init(void)
{
return platform_driver_register(&tegra_hsp_driver);
}
core_initcall(tegra_hsp_init);
|
linux-master
|
drivers/mailbox/tegra-hsp.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Pengutronix, Oleksij Rempel <[email protected]>
* Copyright 2022 NXP, Peng Fan <[email protected]>
*/
#include <linux/clk.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/s4.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/slab.h>
#define IMX_MU_CHANS 17
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
#define IMX_MU_NUM_RR 4
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
/* Please not change TX & RX */
enum imx_mu_chan_type {
IMX_MU_TYPE_TX = 0, /* Tx */
IMX_MU_TYPE_RX = 1, /* Rx */
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
IMX_MU_TYPE_RST = 4, /* Reset */
};
enum imx_mu_xcr {
IMX_MU_CR,
IMX_MU_GIER,
IMX_MU_GCR,
IMX_MU_TCR,
IMX_MU_RCR,
IMX_MU_xCR_MAX,
};
enum imx_mu_xsr {
IMX_MU_SR,
IMX_MU_GSR,
IMX_MU_TSR,
IMX_MU_RSR,
IMX_MU_xSR_MAX,
};
struct imx_sc_rpc_msg_max {
struct imx_sc_rpc_msg hdr;
u32 data[30];
};
struct imx_s4_rpc_msg_max {
struct imx_s4_rpc_msg hdr;
u32 data[254];
};
struct imx_mu_con_priv {
unsigned int idx;
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
enum imx_mu_chan_type type;
struct mbox_chan *chan;
struct tasklet_struct txdb_tasklet;
};
struct imx_mu_priv {
struct device *dev;
void __iomem *base;
void *msg;
spinlock_t xcr_lock; /* control register lock */
struct mbox_controller mbox;
struct mbox_chan mbox_chans[IMX_MU_CHANS];
struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
const struct imx_mu_dcfg *dcfg;
struct clk *clk;
int irq[IMX_MU_CHANS];
bool suspend;
u32 xcr[IMX_MU_xCR_MAX];
bool side_b;
};
enum imx_mu_type {
IMX_MU_V1,
IMX_MU_V2 = BIT(1),
IMX_MU_V2_S4 = BIT(15),
IMX_MU_V2_IRQ = BIT(16),
};
struct imx_mu_dcfg {
int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
void (*init)(struct imx_mu_priv *priv);
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
};
#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Enable */
#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
/* Receive Interrupt Enable */
#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
/* Transmit Interrupt Enable */
#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
/* MU reset */
#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
{
return container_of(mbox, struct imx_mu_priv, mbox);
}
static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
{
iowrite32(val, priv->base + offs);
}
static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
{
return ioread32(priv->base + offs);
}
static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx)
{
u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
u32 status;
u32 can_write;
dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
do {
status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4);
} while (!can_write && time_is_after_jiffies64(timeout_time));
if (!can_write) {
dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
val, idx, status);
return -ETIME;
}
imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4);
return 0;
}
static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx)
{
u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
u32 status;
u32 can_read;
dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
do {
status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4);
} while (!can_read && time_is_after_jiffies64(timeout_time));
if (!can_read) {
dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
idx, status);
return -ETIME;
}
*val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4);
dev_dbg(priv->dev, "Read %.8x\n", *val);
return 0;
}
static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&priv->xcr_lock, flags);
val = imx_mu_read(priv, priv->dcfg->xCR[type]);
val &= ~clr;
val |= set;
imx_mu_write(priv, val, priv->dcfg->xCR[type]);
spin_unlock_irqrestore(&priv->xcr_lock, flags);
return val;
}
static int imx_mu_generic_tx(struct imx_mu_priv *priv,
struct imx_mu_con_priv *cp,
void *data)
{
u32 *arg = data;
switch (cp->type) {
case IMX_MU_TYPE_TX:
imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
break;
case IMX_MU_TYPE_TXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
tasklet_schedule(&cp->txdb_tasklet);
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
return -EINVAL;
}
return 0;
}
static int imx_mu_generic_rx(struct imx_mu_priv *priv,
struct imx_mu_con_priv *cp)
{
u32 dat;
dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
mbox_chan_received_data(cp->chan, (void *)&dat);
return 0;
}
static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
struct imx_mu_con_priv *cp)
{
imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
priv->dcfg->xSR[IMX_MU_GSR]);
mbox_chan_received_data(cp->chan, NULL);
return 0;
}
static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
{
u32 *arg = data;
int i, ret;
u32 xsr;
u32 size, max_size, num_tr;
if (priv->dcfg->type & IMX_MU_V2_S4) {
size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
max_size = sizeof(struct imx_s4_rpc_msg_max);
num_tr = 8;
} else {
size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
max_size = sizeof(struct imx_sc_rpc_msg_max);
num_tr = 4;
}
switch (cp->type) {
case IMX_MU_TYPE_TX:
/*
* msg->hdr.size specifies the number of u32 words while
* sizeof yields bytes.
*/
if (size > max_size / 4) {
/*
* The real message size can be different to
* struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
*/
dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
for (i = 0; i < num_tr && i < size; i++)
imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
for (; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
xsr,
xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
0, 5 * USEC_PER_SEC);
if (ret) {
dev_err(priv->dev, "Send data index: %d timeout\n", i);
return ret;
}
imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
return -EINVAL;
}
return 0;
}
static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
{
u32 *data;
int i, ret;
u32 xsr;
u32 size, max_size;
data = (u32 *)priv->msg;
imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR);
if (priv->dcfg->type & IMX_MU_V2_S4) {
size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
max_size = sizeof(struct imx_s4_rpc_msg_max);
} else {
size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
max_size = sizeof(struct imx_sc_rpc_msg_max);
}
if (size > max_size / 4) {
dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
for (i = 1; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0,
5 * USEC_PER_SEC);
if (ret) {
dev_err(priv->dev, "timeout read idx %d\n", i);
return ret;
}
*data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
mbox_chan_received_data(cp->chan, (void *)priv->msg);
return 0;
}
static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
void *data)
{
struct imx_sc_rpc_msg_max *msg = data;
u32 *arg = data;
u32 byte_size;
int err;
int i;
dev_dbg(priv->dev, "Sending message\n");
switch (cp->type) {
case IMX_MU_TYPE_TXDB:
byte_size = msg->hdr.size * sizeof(u32);
if (byte_size > sizeof(*msg)) {
/*
* The real message size can be different to
* struct imx_sc_rpc_msg_max size
*/
dev_err(priv->dev,
"Exceed max msg size (%zu) on TX, got: %i\n",
sizeof(*msg), byte_size);
return -EINVAL;
}
print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
data, byte_size, false);
/* Send first word */
dev_dbg(priv->dev, "Sending header\n");
imx_mu_write(priv, *arg++, priv->dcfg->xTR);
/* Send signaling */
dev_dbg(priv->dev, "Sending signaling\n");
imx_mu_xcr_rmw(priv, IMX_MU_GCR,
IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
/* Send words to fill the mailbox */
for (i = 1; i < 4 && i < msg->hdr.size; i++) {
dev_dbg(priv->dev, "Sending word %d\n", i);
imx_mu_write(priv, *arg++,
priv->dcfg->xTR + (i % 4) * 4);
}
/* Send rest of message waiting for remote read */
for (; i < msg->hdr.size; i++) {
dev_dbg(priv->dev, "Sending word %d\n", i);
err = imx_mu_tx_waiting_write(priv, *arg++, i);
if (err) {
dev_err(priv->dev, "Timeout tx %d\n", i);
return err;
}
}
/* Simulate hack for mbox framework */
tasklet_schedule(&cp->txdb_tasklet);
break;
default:
dev_warn_ratelimited(priv->dev,
"Send data on wrong channel type: %d\n",
cp->type);
return -EINVAL;
}
return 0;
}
static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
{
struct imx_sc_rpc_msg_max msg;
u32 *data = (u32 *)&msg;
u32 byte_size;
int err = 0;
int i;
dev_dbg(priv->dev, "Receiving message\n");
/* Read header */
dev_dbg(priv->dev, "Receiving header\n");
*data++ = imx_mu_read(priv, priv->dcfg->xRR);
byte_size = msg.hdr.size * sizeof(u32);
if (byte_size > sizeof(msg)) {
dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
sizeof(msg), byte_size);
err = -EINVAL;
goto error;
}
/* Read message waiting they are written */
for (i = 1; i < msg.hdr.size; i++) {
dev_dbg(priv->dev, "Receiving word %d\n", i);
err = imx_mu_rx_waiting_read(priv, data++, i);
if (err) {
dev_err(priv->dev, "Timeout rx %d\n", i);
goto error;
}
}
/* Clear GIP */
imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
priv->dcfg->xSR[IMX_MU_GSR]);
print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
&msg, byte_size, false);
/* send data to client */
dev_dbg(priv->dev, "Sending message to client\n");
mbox_chan_received_data(cp->chan, (void *)&msg);
goto exit;
error:
mbox_chan_received_data(cp->chan, ERR_PTR(err));
exit:
return err;
}
static void imx_mu_txdb_tasklet(unsigned long data)
{
struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
mbox_chan_txdone(cp->chan, 0);
}
static irqreturn_t imx_mu_isr(int irq, void *p)
{
struct mbox_chan *chan = p;
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
u32 val, ctrl;
switch (cp->type) {
case IMX_MU_TYPE_TX:
ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RX:
ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RXDB:
ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RST:
return IRQ_NONE;
default:
dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
cp->type);
return IRQ_NONE;
}
if (!val)
return IRQ_NONE;
if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
(cp->type == IMX_MU_TYPE_TX)) {
imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
mbox_chan_txdone(chan, 0);
} else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
(cp->type == IMX_MU_TYPE_RX)) {
priv->dcfg->rx(priv, cp);
} else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
(cp->type == IMX_MU_TYPE_RXDB)) {
priv->dcfg->rxdb(priv, cp);
} else {
dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
return IRQ_NONE;
}
if (priv->suspend)
pm_system_wakeup();
return IRQ_HANDLED;
}
static int imx_mu_send_data(struct mbox_chan *chan, void *data)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
return priv->dcfg->tx(priv, cp, data);
}
static int imx_mu_startup(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
unsigned long irq_flag = 0;
int ret;
pm_runtime_get_sync(priv->dev);
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
(unsigned long)cp);
return 0;
}
/* IPC MU should be with IRQF_NO_SUSPEND set */
if (!priv->dev->pm_domain)
irq_flag |= IRQF_NO_SUSPEND;
if (!(priv->dcfg->type & IMX_MU_V2_IRQ))
irq_flag |= IRQF_SHARED;
ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan);
if (ret) {
dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]);
return ret;
}
switch (cp->type) {
case IMX_MU_TYPE_RX:
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
break;
case IMX_MU_TYPE_RXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
break;
default:
break;
}
return 0;
}
static void imx_mu_shutdown(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
int ret;
u32 sr;
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
pm_runtime_put_sync(priv->dev);
return;
}
switch (cp->type) {
case IMX_MU_TYPE_TX:
imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RX:
imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RST:
imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
!(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
if (ret)
dev_warn(priv->dev, "RST channel timeout\n");
break;
default:
break;
}
free_irq(priv->irq[cp->type], chan);
pm_runtime_put_sync(priv->dev);
}
static const struct mbox_chan_ops imx_mu_ops = {
.send_data = imx_mu_send_data,
.startup = imx_mu_startup,
.shutdown = imx_mu_shutdown,
};
static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
u32 type, idx, chan;
if (sp->args_count != 2) {
dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
return ERR_PTR(-EINVAL);
}
type = sp->args[0]; /* channel type */
idx = sp->args[1]; /* index */
switch (type) {
case IMX_MU_TYPE_TX:
case IMX_MU_TYPE_RX:
if (idx != 0)
dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
chan = type;
break;
case IMX_MU_TYPE_RXDB:
chan = 2 + idx;
break;
default:
dev_err(mbox->dev, "Invalid chan type: %d\n", type);
return ERR_PTR(-EINVAL);
}
if (chan >= mbox->num_chans) {
dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
return ERR_PTR(-EINVAL);
}
return &mbox->chans[chan];
}
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
u32 type, idx, chan;
if (sp->args_count != 2) {
dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
return ERR_PTR(-EINVAL);
}
type = sp->args[0]; /* channel type */
idx = sp->args[1]; /* index */
chan = type * 4 + idx;
if (chan >= mbox->num_chans) {
dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
return ERR_PTR(-EINVAL);
}
return &mbox->chans[chan];
}
static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
u32 type;
if (sp->args_count < 1) {
dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
return ERR_PTR(-EINVAL);
}
type = sp->args[0]; /* channel type */
/* Only supports TXDB and RXDB */
if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
dev_err(mbox->dev, "Invalid type: %d\n", type);
return ERR_PTR(-EINVAL);
}
return imx_mu_xlate(mbox, sp);
}
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
unsigned int val;
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
cp->idx = i % 4;
cp->type = i >> 2;
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
"imx_mu_chan[%i-%i]", cp->type, cp->idx);
}
priv->mbox.num_chans = IMX_MU_CHANS;
priv->mbox.of_xlate = imx_mu_xlate;
if (priv->side_b)
return;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
/* Clear any pending GIP */
val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
/* Clear any pending RSR */
for (i = 0; i < IMX_MU_NUM_RR; i++)
imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
static void imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
for (i = 0; i < num_chans; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
cp->idx = i < 2 ? 0 : i - 2;
cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
"imx_mu_chan[%i-%i]", cp->type, cp->idx);
}
priv->mbox.num_chans = num_chans;
priv->mbox.of_xlate = imx_mu_specific_xlate;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
}
static void imx_mu_init_seco(struct imx_mu_priv *priv)
{
imx_mu_init_generic(priv);
priv->mbox.of_xlate = imx_mu_seco_xlate;
}
static int imx_mu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
int i, ret;
u32 size;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
dcfg = of_device_get_match_data(dev);
if (!dcfg)
return -EINVAL;
priv->dcfg = dcfg;
if (priv->dcfg->type & IMX_MU_V2_IRQ) {
priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx");
if (priv->irq[IMX_MU_TYPE_TX] < 0)
return priv->irq[IMX_MU_TYPE_TX];
priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx");
if (priv->irq[IMX_MU_TYPE_RX] < 0)
return priv->irq[IMX_MU_TYPE_RX];
} else {
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
for (i = 0; i < IMX_MU_CHANS; i++)
priv->irq[i] = ret;
}
if (priv->dcfg->type & IMX_MU_V2_S4)
size = sizeof(struct imx_s4_rpc_msg_max);
else
size = sizeof(struct imx_sc_rpc_msg_max);
priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
if (!priv->msg)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
if (PTR_ERR(priv->clk) != -ENOENT)
return PTR_ERR(priv->clk);
priv->clk = NULL;
}
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "Failed to enable clock\n");
return ret;
}
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
priv->dcfg->init(priv);
spin_lock_init(&priv->xcr_lock);
priv->mbox.dev = dev;
priv->mbox.ops = &imx_mu_ops;
priv->mbox.chans = priv->mbox_chans;
priv->mbox.txdone_irq = true;
platform_set_drvdata(pdev, priv);
ret = devm_mbox_controller_register(dev, &priv->mbox);
if (ret) {
clk_disable_unprepare(priv->clk);
return ret;
}
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto disable_runtime_pm;
ret = pm_runtime_put_sync(dev);
if (ret < 0)
goto disable_runtime_pm;
clk_disable_unprepare(priv->clk);
return 0;
disable_runtime_pm:
pm_runtime_disable(dev);
clk_disable_unprepare(priv->clk);
return ret;
}
static int imx_mu_remove(struct platform_device *pdev)
{
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
pm_runtime_disable(priv->dev);
return 0;
}
static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
.tx = imx_mu_generic_tx,
.rx = imx_mu_generic_rx,
.rxdb = imx_mu_generic_rxdb,
.init = imx_mu_init_generic,
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
.xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
.tx = imx_mu_generic_tx,
.rx = imx_mu_generic_rx,
.rxdb = imx_mu_generic_rxdb,
.init = imx_mu_init_generic,
.xTR = 0x20,
.xRR = 0x40,
.xSR = {0x60, 0x60, 0x60, 0x60},
.xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.tx = imx_mu_generic_tx,
.rx = imx_mu_generic_rx,
.rxdb = imx_mu_generic_rxdb,
.init = imx_mu_init_generic,
.type = IMX_MU_V2,
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
.xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
.tx = imx_mu_specific_tx,
.rx = imx_mu_specific_rx,
.init = imx_mu_init_specific,
.type = IMX_MU_V2 | IMX_MU_V2_S4,
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
.xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
.tx = imx_mu_specific_tx,
.rx = imx_mu_specific_rx,
.init = imx_mu_init_specific,
.type = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ,
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
.xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
.tx = imx_mu_specific_tx,
.rx = imx_mu_specific_rx,
.init = imx_mu_init_specific,
.rxdb = imx_mu_generic_rxdb,
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
.xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
.tx = imx_mu_seco_tx,
.rx = imx_mu_generic_rx,
.rxdb = imx_mu_seco_rxdb,
.init = imx_mu_init_seco,
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
.xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
{ .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
{ .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
{ },
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
{
struct imx_mu_priv *priv = dev_get_drvdata(dev);
int i;
if (!priv->clk) {
for (i = 0; i < IMX_MU_xCR_MAX; i++)
priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
}
priv->suspend = true;
return 0;
}
static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
{
struct imx_mu_priv *priv = dev_get_drvdata(dev);
int i;
/*
* ONLY restore MU when context lost, the TIE could
* be set during noirq resume as there is MU data
* communication going on, and restore the saved
* value will overwrite the TIE and cause MU data
* send failed, may lead to system freeze. This issue
* is observed by testing freeze mode suspend.
*/
if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
}
priv->suspend = false;
return 0;
}
static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
{
struct imx_mu_priv *priv = dev_get_drvdata(dev);
clk_disable_unprepare(priv->clk);
return 0;
}
static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
{
struct imx_mu_priv *priv = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(priv->clk);
if (ret)
dev_err(dev, "failed to enable clock\n");
return ret;
}
static const struct dev_pm_ops imx_mu_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
imx_mu_resume_noirq)
SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
imx_mu_runtime_resume, NULL)
};
static struct platform_driver imx_mu_driver = {
.probe = imx_mu_probe,
.remove = imx_mu_remove,
.driver = {
.name = "imx_mu",
.of_match_table = imx_mu_dt_ids,
.pm = &imx_mu_pm_ops,
},
};
module_platform_driver(imx_mu_driver);
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_DESCRIPTION("Message Unit driver for i.MX");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/imx-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2017 Broadcom
/*
* Broadcom FlexRM Mailbox Driver
*
* Each Broadcom FlexSparx4 offload engine is implemented as an
* extension to Broadcom FlexRM ring manager. The FlexRM ring
* manager provides a set of rings which can be used to submit
* work to a FlexSparx4 offload engine.
*
* This driver creates a mailbox controller using a set of FlexRM
* rings where each mailbox channel represents a separate FlexRM ring.
*/
#include <asm/barrier.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox/brcm-message.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
/* ====== FlexRM register defines ===== */
/* FlexRM configuration */
#define RING_REGS_SIZE 0x10000
#define RING_DESC_SIZE 8
#define RING_DESC_INDEX(offset) \
((offset) / RING_DESC_SIZE)
#define RING_DESC_OFFSET(index) \
((index) * RING_DESC_SIZE)
#define RING_MAX_REQ_COUNT 1024
#define RING_BD_ALIGN_ORDER 12
#define RING_BD_ALIGN_CHECK(addr) \
(!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
#define RING_BD_TOGGLE_INVALID(offset) \
(((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
#define RING_BD_TOGGLE_VALID(offset) \
(!RING_BD_TOGGLE_INVALID(offset))
#define RING_BD_DESC_PER_REQ 32
#define RING_BD_DESC_COUNT \
(RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
#define RING_BD_SIZE \
(RING_BD_DESC_COUNT * RING_DESC_SIZE)
#define RING_CMPL_ALIGN_ORDER 13
#define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
#define RING_CMPL_SIZE \
(RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
#define RING_VER_MAGIC 0x76303031
/* Per-Ring register offsets */
#define RING_VER 0x000
#define RING_BD_START_ADDR 0x004
#define RING_BD_READ_PTR 0x008
#define RING_BD_WRITE_PTR 0x00c
#define RING_BD_READ_PTR_DDR_LS 0x010
#define RING_BD_READ_PTR_DDR_MS 0x014
#define RING_CMPL_START_ADDR 0x018
#define RING_CMPL_WRITE_PTR 0x01c
#define RING_NUM_REQ_RECV_LS 0x020
#define RING_NUM_REQ_RECV_MS 0x024
#define RING_NUM_REQ_TRANS_LS 0x028
#define RING_NUM_REQ_TRANS_MS 0x02c
#define RING_NUM_REQ_OUTSTAND 0x030
#define RING_CONTROL 0x034
#define RING_FLUSH_DONE 0x038
#define RING_MSI_ADDR_LS 0x03c
#define RING_MSI_ADDR_MS 0x040
#define RING_MSI_CONTROL 0x048
#define RING_BD_READ_PTR_DDR_CONTROL 0x04c
#define RING_MSI_DATA_VALUE 0x064
/* Register RING_BD_START_ADDR fields */
#define BD_LAST_UPDATE_HW_SHIFT 28
#define BD_LAST_UPDATE_HW_MASK 0x1
#define BD_START_ADDR_VALUE(pa) \
((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
#define BD_START_ADDR_DECODE(val) \
((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
/* Register RING_CMPL_START_ADDR fields */
#define CMPL_START_ADDR_VALUE(pa) \
((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
/* Register RING_CONTROL fields */
#define CONTROL_MASK_DISABLE_CONTROL 12
#define CONTROL_FLUSH_SHIFT 5
#define CONTROL_ACTIVE_SHIFT 4
#define CONTROL_RATE_ADAPT_MASK 0xf
#define CONTROL_RATE_DYNAMIC 0x0
#define CONTROL_RATE_FAST 0x8
#define CONTROL_RATE_MEDIUM 0x9
#define CONTROL_RATE_SLOW 0xa
#define CONTROL_RATE_IDLE 0xb
/* Register RING_FLUSH_DONE fields */
#define FLUSH_DONE_MASK 0x1
/* Register RING_MSI_CONTROL fields */
#define MSI_TIMER_VAL_SHIFT 16
#define MSI_TIMER_VAL_MASK 0xffff
#define MSI_ENABLE_SHIFT 15
#define MSI_ENABLE_MASK 0x1
#define MSI_COUNT_SHIFT 0
#define MSI_COUNT_MASK 0x3ff
/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
#define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
#define BD_READ_PTR_DDR_ENABLE_SHIFT 15
#define BD_READ_PTR_DDR_ENABLE_MASK 0x1
/* ====== FlexRM ring descriptor defines ===== */
/* Completion descriptor format */
#define CMPL_OPAQUE_SHIFT 0
#define CMPL_OPAQUE_MASK 0xffff
#define CMPL_ENGINE_STATUS_SHIFT 16
#define CMPL_ENGINE_STATUS_MASK 0xffff
#define CMPL_DME_STATUS_SHIFT 32
#define CMPL_DME_STATUS_MASK 0xffff
#define CMPL_RM_STATUS_SHIFT 48
#define CMPL_RM_STATUS_MASK 0xffff
/* Completion DME status code */
#define DME_STATUS_MEM_COR_ERR BIT(0)
#define DME_STATUS_MEM_UCOR_ERR BIT(1)
#define DME_STATUS_FIFO_UNDERFLOW BIT(2)
#define DME_STATUS_FIFO_OVERFLOW BIT(3)
#define DME_STATUS_RRESP_ERR BIT(4)
#define DME_STATUS_BRESP_ERR BIT(5)
#define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
DME_STATUS_MEM_UCOR_ERR | \
DME_STATUS_FIFO_UNDERFLOW | \
DME_STATUS_FIFO_OVERFLOW | \
DME_STATUS_RRESP_ERR | \
DME_STATUS_BRESP_ERR)
/* Completion RM status code */
#define RM_STATUS_CODE_SHIFT 0
#define RM_STATUS_CODE_MASK 0x3ff
#define RM_STATUS_CODE_GOOD 0x0
#define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
/* General descriptor format */
#define DESC_TYPE_SHIFT 60
#define DESC_TYPE_MASK 0xf
#define DESC_PAYLOAD_SHIFT 0
#define DESC_PAYLOAD_MASK 0x0fffffffffffffff
/* Null descriptor format */
#define NULL_TYPE 0
#define NULL_TOGGLE_SHIFT 58
#define NULL_TOGGLE_MASK 0x1
/* Header descriptor format */
#define HEADER_TYPE 1
#define HEADER_TOGGLE_SHIFT 58
#define HEADER_TOGGLE_MASK 0x1
#define HEADER_ENDPKT_SHIFT 57
#define HEADER_ENDPKT_MASK 0x1
#define HEADER_STARTPKT_SHIFT 56
#define HEADER_STARTPKT_MASK 0x1
#define HEADER_BDCOUNT_SHIFT 36
#define HEADER_BDCOUNT_MASK 0x1f
#define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
#define HEADER_FLAGS_SHIFT 16
#define HEADER_FLAGS_MASK 0xffff
#define HEADER_OPAQUE_SHIFT 0
#define HEADER_OPAQUE_MASK 0xffff
/* Source (SRC) descriptor format */
#define SRC_TYPE 2
#define SRC_LENGTH_SHIFT 44
#define SRC_LENGTH_MASK 0xffff
#define SRC_ADDR_SHIFT 0
#define SRC_ADDR_MASK 0x00000fffffffffff
/* Destination (DST) descriptor format */
#define DST_TYPE 3
#define DST_LENGTH_SHIFT 44
#define DST_LENGTH_MASK 0xffff
#define DST_ADDR_SHIFT 0
#define DST_ADDR_MASK 0x00000fffffffffff
/* Immediate (IMM) descriptor format */
#define IMM_TYPE 4
#define IMM_DATA_SHIFT 0
#define IMM_DATA_MASK 0x0fffffffffffffff
/* Next pointer (NPTR) descriptor format */
#define NPTR_TYPE 5
#define NPTR_TOGGLE_SHIFT 58
#define NPTR_TOGGLE_MASK 0x1
#define NPTR_ADDR_SHIFT 0
#define NPTR_ADDR_MASK 0x00000fffffffffff
/* Mega source (MSRC) descriptor format */
#define MSRC_TYPE 6
#define MSRC_LENGTH_SHIFT 44
#define MSRC_LENGTH_MASK 0xffff
#define MSRC_ADDR_SHIFT 0
#define MSRC_ADDR_MASK 0x00000fffffffffff
/* Mega destination (MDST) descriptor format */
#define MDST_TYPE 7
#define MDST_LENGTH_SHIFT 44
#define MDST_LENGTH_MASK 0xffff
#define MDST_ADDR_SHIFT 0
#define MDST_ADDR_MASK 0x00000fffffffffff
/* Source with tlast (SRCT) descriptor format */
#define SRCT_TYPE 8
#define SRCT_LENGTH_SHIFT 44
#define SRCT_LENGTH_MASK 0xffff
#define SRCT_ADDR_SHIFT 0
#define SRCT_ADDR_MASK 0x00000fffffffffff
/* Destination with tlast (DSTT) descriptor format */
#define DSTT_TYPE 9
#define DSTT_LENGTH_SHIFT 44
#define DSTT_LENGTH_MASK 0xffff
#define DSTT_ADDR_SHIFT 0
#define DSTT_ADDR_MASK 0x00000fffffffffff
/* Immediate with tlast (IMMT) descriptor format */
#define IMMT_TYPE 10
#define IMMT_DATA_SHIFT 0
#define IMMT_DATA_MASK 0x0fffffffffffffff
/* Descriptor helper macros */
#define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
#define DESC_ENC(_d, _v, _s, _m) \
do { \
(_d) &= ~((u64)(_m) << (_s)); \
(_d) |= (((u64)(_v) & (_m)) << (_s)); \
} while (0)
/* ====== FlexRM data structures ===== */
struct flexrm_ring {
/* Unprotected members */
int num;
struct flexrm_mbox *mbox;
void __iomem *regs;
bool irq_requested;
unsigned int irq;
cpumask_t irq_aff_hint;
unsigned int msi_timer_val;
unsigned int msi_count_threshold;
struct brcm_message *requests[RING_MAX_REQ_COUNT];
void *bd_base;
dma_addr_t bd_dma_base;
u32 bd_write_offset;
void *cmpl_base;
dma_addr_t cmpl_dma_base;
/* Atomic stats */
atomic_t msg_send_count;
atomic_t msg_cmpl_count;
/* Protected members */
spinlock_t lock;
DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
u32 cmpl_read_offset;
};
struct flexrm_mbox {
struct device *dev;
void __iomem *regs;
u32 num_rings;
struct flexrm_ring *rings;
struct dma_pool *bd_pool;
struct dma_pool *cmpl_pool;
struct dentry *root;
struct mbox_controller controller;
};
/* ====== FlexRM ring descriptor helper routines ===== */
static u64 flexrm_read_desc(void *desc_ptr)
{
return le64_to_cpu(*((u64 *)desc_ptr));
}
static void flexrm_write_desc(void *desc_ptr, u64 desc)
{
*((u64 *)desc_ptr) = cpu_to_le64(desc);
}
static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
{
return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
}
static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
{
u32 status;
status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
CMPL_DME_STATUS_MASK);
if (status & DME_STATUS_ERROR_MASK)
return -EIO;
status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
CMPL_RM_STATUS_MASK);
status &= RM_STATUS_CODE_MASK;
if (status == RM_STATUS_CODE_AE_TIMEOUT)
return -ETIMEDOUT;
return 0;
}
static bool flexrm_is_next_table_desc(void *desc_ptr)
{
u64 desc = flexrm_read_desc(desc_ptr);
u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
return (type == NPTR_TYPE) ? true : false;
}
static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
{
u64 desc = 0;
DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
return desc;
}
static u64 flexrm_null_desc(u32 toggle)
{
u64 desc = 0;
DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
return desc;
}
static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
{
u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
if (!(nhcnt % HEADER_BDCOUNT_MAX))
hcnt += 1;
return hcnt;
}
static void flexrm_flip_header_toggle(void *desc_ptr)
{
u64 desc = flexrm_read_desc(desc_ptr);
if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
else
desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
flexrm_write_desc(desc_ptr, desc);
}
static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
u32 bdcount, u32 flags, u32 opaque)
{
u64 desc = 0;
DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
return desc;
}
static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
u64 desc, void **desc_ptr, u32 *toggle,
void *start_desc, void *end_desc)
{
u64 d;
u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
/* Sanity check */
if (nhcnt <= nhpos)
return;
/*
* Each request or packet start with a HEADER descriptor followed
* by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
* DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
* following a HEADER descriptor is represented by BDCOUNT field
* of HEADER descriptor. The max value of BDCOUNT field is 31 which
* means we can only have 31 non-HEADER descriptors following one
* HEADER descriptor.
*
* In general use, number of non-HEADER descriptors can easily go
* beyond 31. To tackle this situation, we have packet (or request)
* extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
*
* To use packet extension, the first HEADER descriptor of request
* (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
* HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
* HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
* TOGGLE bit of the first HEADER will be set to invalid state to
* ensure that FlexRM does not start fetching descriptors till all
* descriptors are enqueued. The user of this function will flip
* the TOGGLE bit of first HEADER after all descriptors are
* enqueued.
*/
if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
/* Prepare the header descriptor */
nhavail = (nhcnt - nhpos);
_toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
_startpkt = (nhpos == 0) ? 0x1 : 0x0;
_endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
_bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
nhavail : HEADER_BDCOUNT_MAX;
if (nhavail <= HEADER_BDCOUNT_MAX)
_bdcount = nhavail;
else
_bdcount = HEADER_BDCOUNT_MAX;
d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
_bdcount, 0x0, reqid);
/* Write header descriptor */
flexrm_write_desc(*desc_ptr, d);
/* Point to next descriptor */
*desc_ptr += sizeof(desc);
if (*desc_ptr == end_desc)
*desc_ptr = start_desc;
/* Skip next pointer descriptors */
while (flexrm_is_next_table_desc(*desc_ptr)) {
*toggle = (*toggle) ? 0 : 1;
*desc_ptr += sizeof(desc);
if (*desc_ptr == end_desc)
*desc_ptr = start_desc;
}
}
/* Write desired descriptor */
flexrm_write_desc(*desc_ptr, desc);
/* Point to next descriptor */
*desc_ptr += sizeof(desc);
if (*desc_ptr == end_desc)
*desc_ptr = start_desc;
/* Skip next pointer descriptors */
while (flexrm_is_next_table_desc(*desc_ptr)) {
*toggle = (*toggle) ? 0 : 1;
*desc_ptr += sizeof(desc);
if (*desc_ptr == end_desc)
*desc_ptr = start_desc;
}
}
static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
{
u64 desc = 0;
DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
return desc;
}
static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
{
u64 desc = 0;
DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
return desc;
}
static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
{
u64 desc = 0;
DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
return desc;
}
static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
{
u64 desc = 0;
DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
return desc;
}
static u64 flexrm_imm_desc(u64 data)
{
u64 desc = 0;
DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
return desc;
}
static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
{
u64 desc = 0;
DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
return desc;
}
static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
{
u64 desc = 0;
DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
return desc;
}
static u64 flexrm_immt_desc(u64 data)
{
u64 desc = 0;
DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
return desc;
}
static bool flexrm_spu_sanity_check(struct brcm_message *msg)
{
struct scatterlist *sg;
if (!msg->spu.src || !msg->spu.dst)
return false;
for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
if (sg->length & 0xf) {
if (sg->length > SRC_LENGTH_MASK)
return false;
} else {
if (sg->length > (MSRC_LENGTH_MASK * 16))
return false;
}
}
for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
if (sg->length & 0xf) {
if (sg->length > DST_LENGTH_MASK)
return false;
} else {
if (sg->length > (MDST_LENGTH_MASK * 16))
return false;
}
}
return true;
}
static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
{
u32 cnt = 0;
unsigned int dst_target = 0;
struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
while (src_sg || dst_sg) {
if (src_sg) {
cnt++;
dst_target = src_sg->length;
src_sg = sg_next(src_sg);
} else
dst_target = UINT_MAX;
while (dst_target && dst_sg) {
cnt++;
if (dst_sg->length < dst_target)
dst_target -= dst_sg->length;
else
dst_target = 0;
dst_sg = sg_next(dst_sg);
}
}
return cnt;
}
static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
{
int rc;
rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
if (!rc)
return -EIO;
rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
if (!rc) {
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
return -EIO;
}
return 0;
}
static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
{
dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
}
static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
u32 reqid, void *desc_ptr, u32 toggle,
void *start_desc, void *end_desc)
{
u64 d;
u32 nhpos = 0;
void *orig_desc_ptr = desc_ptr;
unsigned int dst_target = 0;
struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
while (src_sg || dst_sg) {
if (src_sg) {
if (sg_dma_len(src_sg) & 0xf)
d = flexrm_src_desc(sg_dma_address(src_sg),
sg_dma_len(src_sg));
else
d = flexrm_msrc_desc(sg_dma_address(src_sg),
sg_dma_len(src_sg)/16);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
dst_target = sg_dma_len(src_sg);
src_sg = sg_next(src_sg);
} else
dst_target = UINT_MAX;
while (dst_target && dst_sg) {
if (sg_dma_len(dst_sg) & 0xf)
d = flexrm_dst_desc(sg_dma_address(dst_sg),
sg_dma_len(dst_sg));
else
d = flexrm_mdst_desc(sg_dma_address(dst_sg),
sg_dma_len(dst_sg)/16);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
if (sg_dma_len(dst_sg) < dst_target)
dst_target -= sg_dma_len(dst_sg);
else
dst_target = 0;
dst_sg = sg_next(dst_sg);
}
}
/* Null descriptor with invalid toggle bit */
flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
/* Ensure that descriptors have been written to memory */
wmb();
/* Flip toggle bit in header */
flexrm_flip_header_toggle(orig_desc_ptr);
return desc_ptr;
}
static bool flexrm_sba_sanity_check(struct brcm_message *msg)
{
u32 i;
if (!msg->sba.cmds || !msg->sba.cmds_count)
return false;
for (i = 0; i < msg->sba.cmds_count; i++) {
if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
(msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
(msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
return false;
if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
(msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
return false;
if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
(msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
return false;
if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
(msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
return false;
if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
(msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
return false;
}
return true;
}
static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
{
u32 i, cnt;
cnt = 0;
for (i = 0; i < msg->sba.cmds_count; i++) {
cnt++;
if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
(msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
cnt++;
if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
cnt++;
if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
cnt++;
}
return cnt;
}
static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
u32 reqid, void *desc_ptr, u32 toggle,
void *start_desc, void *end_desc)
{
u64 d;
u32 i, nhpos = 0;
struct brcm_sba_command *c;
void *orig_desc_ptr = desc_ptr;
/* Convert SBA commands into descriptors */
for (i = 0; i < msg->sba.cmds_count; i++) {
c = &msg->sba.cmds[i];
if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
(c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
/* Destination response descriptor */
d = flexrm_dst_desc(c->resp, c->resp_len);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
} else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
/* Destination response with tlast descriptor */
d = flexrm_dstt_desc(c->resp, c->resp_len);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
}
if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
/* Destination with tlast descriptor */
d = flexrm_dstt_desc(c->data, c->data_len);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
}
if (c->flags & BRCM_SBA_CMD_TYPE_B) {
/* Command as immediate descriptor */
d = flexrm_imm_desc(c->cmd);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
} else {
/* Command as immediate descriptor with tlast */
d = flexrm_immt_desc(c->cmd);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
}
if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
(c->flags & BRCM_SBA_CMD_TYPE_C)) {
/* Source with tlast descriptor */
d = flexrm_srct_desc(c->data, c->data_len);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
}
}
/* Null descriptor with invalid toggle bit */
flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
/* Ensure that descriptors have been written to memory */
wmb();
/* Flip toggle bit in header */
flexrm_flip_header_toggle(orig_desc_ptr);
return desc_ptr;
}
static bool flexrm_sanity_check(struct brcm_message *msg)
{
if (!msg)
return false;
switch (msg->type) {
case BRCM_MESSAGE_SPU:
return flexrm_spu_sanity_check(msg);
case BRCM_MESSAGE_SBA:
return flexrm_sba_sanity_check(msg);
default:
return false;
};
}
static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
{
if (!msg)
return 0;
switch (msg->type) {
case BRCM_MESSAGE_SPU:
return flexrm_spu_estimate_nonheader_desc_count(msg);
case BRCM_MESSAGE_SBA:
return flexrm_sba_estimate_nonheader_desc_count(msg);
default:
return 0;
};
}
static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
{
if (!dev || !msg)
return -EINVAL;
switch (msg->type) {
case BRCM_MESSAGE_SPU:
return flexrm_spu_dma_map(dev, msg);
default:
break;
}
return 0;
}
static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
{
if (!dev || !msg)
return;
switch (msg->type) {
case BRCM_MESSAGE_SPU:
flexrm_spu_dma_unmap(dev, msg);
break;
default:
break;
}
}
static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
u32 reqid, void *desc_ptr, u32 toggle,
void *start_desc, void *end_desc)
{
if (!msg || !desc_ptr || !start_desc || !end_desc)
return ERR_PTR(-ENOTSUPP);
if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
return ERR_PTR(-ERANGE);
switch (msg->type) {
case BRCM_MESSAGE_SPU:
return flexrm_spu_write_descs(msg, nhcnt, reqid,
desc_ptr, toggle,
start_desc, end_desc);
case BRCM_MESSAGE_SBA:
return flexrm_sba_write_descs(msg, nhcnt, reqid,
desc_ptr, toggle,
start_desc, end_desc);
default:
return ERR_PTR(-ENOTSUPP);
};
}
/* ====== FlexRM driver helper routines ===== */
static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
struct seq_file *file)
{
int i;
const char *state;
struct flexrm_ring *ring;
seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
"Ring#", "State", "BD_Addr", "BD_Size",
"Cmpl_Addr", "Cmpl_Size");
for (i = 0; i < mbox->num_rings; i++) {
ring = &mbox->rings[i];
if (readl(ring->regs + RING_CONTROL) &
BIT(CONTROL_ACTIVE_SHIFT))
state = "active";
else
state = "inactive";
seq_printf(file,
"%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
ring->num, state,
(unsigned long long)ring->bd_dma_base,
(u32)RING_BD_SIZE,
(unsigned long long)ring->cmpl_dma_base,
(u32)RING_CMPL_SIZE);
}
}
static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
struct seq_file *file)
{
int i;
u32 val, bd_read_offset;
struct flexrm_ring *ring;
seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
"Ring#", "BD_Read", "BD_Write",
"Cmpl_Read", "Submitted", "Completed");
for (i = 0; i < mbox->num_rings; i++) {
ring = &mbox->rings[i];
bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
bd_read_offset *= RING_DESC_SIZE;
bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
ring->bd_dma_base);
seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
ring->num,
(u32)bd_read_offset,
(u32)ring->bd_write_offset,
(u32)ring->cmpl_read_offset,
(u32)atomic_read(&ring->msg_send_count),
(u32)atomic_read(&ring->msg_cmpl_count));
}
}
static int flexrm_new_request(struct flexrm_ring *ring,
struct brcm_message *batch_msg,
struct brcm_message *msg)
{
void *next;
unsigned long flags;
u32 val, count, nhcnt;
u32 read_offset, write_offset;
bool exit_cleanup = false;
int ret = 0, reqid;
/* Do sanity check on message */
if (!flexrm_sanity_check(msg))
return -EIO;
msg->error = 0;
/* If no requests possible then save data pointer and goto done. */
spin_lock_irqsave(&ring->lock, flags);
reqid = bitmap_find_free_region(ring->requests_bmap,
RING_MAX_REQ_COUNT, 0);
spin_unlock_irqrestore(&ring->lock, flags);
if (reqid < 0)
return -ENOSPC;
ring->requests[reqid] = msg;
/* Do DMA mappings for the message */
ret = flexrm_dma_map(ring->mbox->dev, msg);
if (ret < 0) {
ring->requests[reqid] = NULL;
spin_lock_irqsave(&ring->lock, flags);
bitmap_release_region(ring->requests_bmap, reqid, 0);
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
/* Determine current HW BD read offset */
read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
read_offset *= RING_DESC_SIZE;
read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
/*
* Number required descriptors = number of non-header descriptors +
* number of header descriptors +
* 1x null descriptor
*/
nhcnt = flexrm_estimate_nonheader_desc_count(msg);
count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
/* Check for available descriptor space. */
write_offset = ring->bd_write_offset;
while (count) {
if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
count--;
write_offset += RING_DESC_SIZE;
if (write_offset == RING_BD_SIZE)
write_offset = 0x0;
if (write_offset == read_offset)
break;
}
if (count) {
ret = -ENOSPC;
exit_cleanup = true;
goto exit;
}
/* Write descriptors to ring */
next = flexrm_write_descs(msg, nhcnt, reqid,
ring->bd_base + ring->bd_write_offset,
RING_BD_TOGGLE_VALID(ring->bd_write_offset),
ring->bd_base, ring->bd_base + RING_BD_SIZE);
if (IS_ERR(next)) {
ret = PTR_ERR(next);
exit_cleanup = true;
goto exit;
}
/* Save ring BD write offset */
ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
/* Increment number of messages sent */
atomic_inc_return(&ring->msg_send_count);
exit:
/* Update error status in message */
msg->error = ret;
/* Cleanup if we failed */
if (exit_cleanup) {
flexrm_dma_unmap(ring->mbox->dev, msg);
ring->requests[reqid] = NULL;
spin_lock_irqsave(&ring->lock, flags);
bitmap_release_region(ring->requests_bmap, reqid, 0);
spin_unlock_irqrestore(&ring->lock, flags);
}
return ret;
}
static int flexrm_process_completions(struct flexrm_ring *ring)
{
u64 desc;
int err, count = 0;
unsigned long flags;
struct brcm_message *msg = NULL;
u32 reqid, cmpl_read_offset, cmpl_write_offset;
struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
spin_lock_irqsave(&ring->lock, flags);
/*
* Get current completion read and write offset
*
* Note: We should read completion write pointer at least once
* after we get a MSI interrupt because HW maintains internal
* MSI status which will allow next MSI interrupt only after
* completion write pointer is read.
*/
cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
cmpl_write_offset *= RING_DESC_SIZE;
cmpl_read_offset = ring->cmpl_read_offset;
ring->cmpl_read_offset = cmpl_write_offset;
spin_unlock_irqrestore(&ring->lock, flags);
/* For each completed request notify mailbox clients */
reqid = 0;
while (cmpl_read_offset != cmpl_write_offset) {
/* Dequeue next completion descriptor */
desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
/* Next read offset */
cmpl_read_offset += RING_DESC_SIZE;
if (cmpl_read_offset == RING_CMPL_SIZE)
cmpl_read_offset = 0;
/* Decode error from completion descriptor */
err = flexrm_cmpl_desc_to_error(desc);
if (err < 0) {
dev_warn(ring->mbox->dev,
"ring%d got completion desc=0x%lx with error %d\n",
ring->num, (unsigned long)desc, err);
}
/* Determine request id from completion descriptor */
reqid = flexrm_cmpl_desc_to_reqid(desc);
/* Determine message pointer based on reqid */
msg = ring->requests[reqid];
if (!msg) {
dev_warn(ring->mbox->dev,
"ring%d null msg pointer for completion desc=0x%lx\n",
ring->num, (unsigned long)desc);
continue;
}
/* Release reqid for recycling */
ring->requests[reqid] = NULL;
spin_lock_irqsave(&ring->lock, flags);
bitmap_release_region(ring->requests_bmap, reqid, 0);
spin_unlock_irqrestore(&ring->lock, flags);
/* Unmap DMA mappings */
flexrm_dma_unmap(ring->mbox->dev, msg);
/* Give-back message to mailbox client */
msg->error = err;
mbox_chan_received_data(chan, msg);
/* Increment number of completions processed */
atomic_inc_return(&ring->msg_cmpl_count);
count++;
}
return count;
}
/* ====== FlexRM Debugfs callbacks ====== */
static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
{
struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
/* Write config in file */
flexrm_write_config_in_seqfile(mbox, file);
return 0;
}
static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
{
struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
/* Write stats in file */
flexrm_write_stats_in_seqfile(mbox, file);
return 0;
}
/* ====== FlexRM interrupt handler ===== */
static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
{
/* We only have MSI for completions so just wakeup IRQ thread */
/* Ring related errors will be informed via completion descriptors */
return IRQ_WAKE_THREAD;
}
static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
{
flexrm_process_completions(dev_id);
return IRQ_HANDLED;
}
/* ====== FlexRM mailbox callbacks ===== */
static int flexrm_send_data(struct mbox_chan *chan, void *data)
{
int i, rc;
struct flexrm_ring *ring = chan->con_priv;
struct brcm_message *msg = data;
if (msg->type == BRCM_MESSAGE_BATCH) {
for (i = msg->batch.msgs_queued;
i < msg->batch.msgs_count; i++) {
rc = flexrm_new_request(ring, msg,
&msg->batch.msgs[i]);
if (rc) {
msg->error = rc;
return rc;
}
msg->batch.msgs_queued++;
}
return 0;
}
return flexrm_new_request(ring, NULL, data);
}
static bool flexrm_peek_data(struct mbox_chan *chan)
{
int cnt = flexrm_process_completions(chan->con_priv);
return (cnt > 0) ? true : false;
}
static int flexrm_startup(struct mbox_chan *chan)
{
u64 d;
u32 val, off;
int ret = 0;
dma_addr_t next_addr;
struct flexrm_ring *ring = chan->con_priv;
/* Allocate BD memory */
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
GFP_KERNEL, &ring->bd_dma_base);
if (!ring->bd_base) {
dev_err(ring->mbox->dev,
"can't allocate BD memory for ring%d\n",
ring->num);
ret = -ENOMEM;
goto fail;
}
/* Configure next table pointer entries in BD memory */
for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
next_addr = off + RING_DESC_SIZE;
if (next_addr == RING_BD_SIZE)
next_addr = 0;
next_addr += ring->bd_dma_base;
if (RING_BD_ALIGN_CHECK(next_addr))
d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
next_addr);
else
d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
flexrm_write_desc(ring->bd_base + off, d);
}
/* Allocate completion memory */
ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
GFP_KERNEL, &ring->cmpl_dma_base);
if (!ring->cmpl_base) {
dev_err(ring->mbox->dev,
"can't allocate completion memory for ring%d\n",
ring->num);
ret = -ENOMEM;
goto fail_free_bd_memory;
}
/* Request IRQ */
if (ring->irq == UINT_MAX) {
dev_err(ring->mbox->dev,
"ring%d IRQ not available\n", ring->num);
ret = -ENODEV;
goto fail_free_cmpl_memory;
}
ret = request_threaded_irq(ring->irq,
flexrm_irq_event,
flexrm_irq_thread,
0, dev_name(ring->mbox->dev), ring);
if (ret) {
dev_err(ring->mbox->dev,
"failed to request ring%d IRQ\n", ring->num);
goto fail_free_cmpl_memory;
}
ring->irq_requested = true;
/* Set IRQ affinity hint */
ring->irq_aff_hint = CPU_MASK_NONE;
val = ring->mbox->num_rings;
val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
cpumask_set_cpu((ring->num / val) % num_online_cpus(),
&ring->irq_aff_hint);
ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) {
dev_err(ring->mbox->dev,
"failed to set IRQ affinity hint for ring%d\n",
ring->num);
goto fail_free_irq;
}
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
/* Program BD start address */
val = BD_START_ADDR_VALUE(ring->bd_dma_base);
writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
/* BD write pointer will be same as HW write pointer */
ring->bd_write_offset =
readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
ring->bd_write_offset *= RING_DESC_SIZE;
/* Program completion start address */
val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
/* Completion read pointer will be same as HW write pointer */
ring->cmpl_read_offset =
readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
ring->cmpl_read_offset *= RING_DESC_SIZE;
/* Read ring Tx, Rx, and Outstanding counts to clear */
readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
/* Configure RING_MSI_CONTROL */
val = 0;
val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
val |= BIT(MSI_ENABLE_SHIFT);
val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
/* Enable/activate ring */
val = BIT(CONTROL_ACTIVE_SHIFT);
writel_relaxed(val, ring->regs + RING_CONTROL);
/* Reset stats to zero */
atomic_set(&ring->msg_send_count, 0);
atomic_set(&ring->msg_cmpl_count, 0);
return 0;
fail_free_irq:
free_irq(ring->irq, ring);
ring->irq_requested = false;
fail_free_cmpl_memory:
dma_pool_free(ring->mbox->cmpl_pool,
ring->cmpl_base, ring->cmpl_dma_base);
ring->cmpl_base = NULL;
fail_free_bd_memory:
dma_pool_free(ring->mbox->bd_pool,
ring->bd_base, ring->bd_dma_base);
ring->bd_base = NULL;
fail:
return ret;
}
static void flexrm_shutdown(struct mbox_chan *chan)
{
u32 reqid;
unsigned int timeout;
struct brcm_message *msg;
struct flexrm_ring *ring = chan->con_priv;
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
/* Set ring flush state */
timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
ring->regs + RING_CONTROL);
do {
if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
FLUSH_DONE_MASK)
break;
mdelay(1);
} while (--timeout);
if (!timeout)
dev_err(ring->mbox->dev,
"setting ring%d flush state timedout\n", ring->num);
/* Clear ring flush state */
timeout = 1000; /* timeout of 1s */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
do {
if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
FLUSH_DONE_MASK))
break;
mdelay(1);
} while (--timeout);
if (!timeout)
dev_err(ring->mbox->dev,
"clearing ring%d flush state timedout\n", ring->num);
/* Abort all in-flight requests */
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
msg = ring->requests[reqid];
if (!msg)
continue;
/* Release reqid for recycling */
ring->requests[reqid] = NULL;
/* Unmap DMA mappings */
flexrm_dma_unmap(ring->mbox->dev, msg);
/* Give-back message to mailbox client */
msg->error = -EIO;
mbox_chan_received_data(chan, msg);
}
/* Clear requests bitmap */
bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
/* Release IRQ */
if (ring->irq_requested) {
irq_update_affinity_hint(ring->irq, NULL);
free_irq(ring->irq, ring);
ring->irq_requested = false;
}
/* Free-up completion descriptor ring */
if (ring->cmpl_base) {
dma_pool_free(ring->mbox->cmpl_pool,
ring->cmpl_base, ring->cmpl_dma_base);
ring->cmpl_base = NULL;
}
/* Free-up BD descriptor ring */
if (ring->bd_base) {
dma_pool_free(ring->mbox->bd_pool,
ring->bd_base, ring->bd_dma_base);
ring->bd_base = NULL;
}
}
static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
.send_data = flexrm_send_data,
.startup = flexrm_startup,
.shutdown = flexrm_shutdown,
.peek_data = flexrm_peek_data,
};
static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
const struct of_phandle_args *pa)
{
struct mbox_chan *chan;
struct flexrm_ring *ring;
if (pa->args_count < 3)
return ERR_PTR(-EINVAL);
if (pa->args[0] >= cntlr->num_chans)
return ERR_PTR(-ENOENT);
if (pa->args[1] > MSI_COUNT_MASK)
return ERR_PTR(-EINVAL);
if (pa->args[2] > MSI_TIMER_VAL_MASK)
return ERR_PTR(-EINVAL);
chan = &cntlr->chans[pa->args[0]];
ring = chan->con_priv;
ring->msi_count_threshold = pa->args[1];
ring->msi_timer_val = pa->args[2];
return chan;
}
/* ====== FlexRM platform driver ===== */
static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
struct flexrm_mbox *mbox = dev_get_drvdata(dev);
struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
/* Configure per-Ring MSI registers */
writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
}
static int flexrm_mbox_probe(struct platform_device *pdev)
{
int index, ret = 0;
void __iomem *regs;
void __iomem *regs_end;
struct resource *iomem;
struct flexrm_ring *ring;
struct flexrm_mbox *mbox;
struct device *dev = &pdev->dev;
/* Allocate driver mailbox struct */
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox) {
ret = -ENOMEM;
goto fail;
}
mbox->dev = dev;
platform_set_drvdata(pdev, mbox);
/* Get resource for registers and map registers of all rings */
mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
ret = -ENODEV;
goto fail;
} else if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
goto fail;
}
regs_end = mbox->regs + resource_size(iomem);
/* Scan and count available rings */
mbox->num_rings = 0;
for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
mbox->num_rings++;
}
if (!mbox->num_rings) {
ret = -ENODEV;
goto fail;
}
/* Allocate driver ring structs */
ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
if (!ring) {
ret = -ENOMEM;
goto fail;
}
mbox->rings = ring;
/* Initialize members of driver ring structs */
regs = mbox->regs;
for (index = 0; index < mbox->num_rings; index++) {
ring = &mbox->rings[index];
ring->num = index;
ring->mbox = mbox;
while ((regs < regs_end) &&
(readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
regs += RING_REGS_SIZE;
if (regs_end <= regs) {
ret = -ENODEV;
goto fail;
}
ring->regs = regs;
regs += RING_REGS_SIZE;
ring->irq = UINT_MAX;
ring->irq_requested = false;
ring->msi_timer_val = MSI_TIMER_VAL_MASK;
ring->msi_count_threshold = 0x1;
memset(ring->requests, 0, sizeof(ring->requests));
ring->bd_base = NULL;
ring->bd_dma_base = 0;
ring->cmpl_base = NULL;
ring->cmpl_dma_base = 0;
atomic_set(&ring->msg_send_count, 0);
atomic_set(&ring->msg_cmpl_count, 0);
spin_lock_init(&ring->lock);
bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
ring->cmpl_read_offset = 0;
}
/* FlexRM is capable of 40-bit physical addresses only */
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
if (ret) {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto fail;
}
/* Create DMA pool for ring BD memory */
mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
1 << RING_BD_ALIGN_ORDER, 0);
if (!mbox->bd_pool) {
ret = -ENOMEM;
goto fail;
}
/* Create DMA pool for ring completion memory */
mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
1 << RING_CMPL_ALIGN_ORDER, 0);
if (!mbox->cmpl_pool) {
ret = -ENOMEM;
goto fail_destroy_bd_pool;
}
/* Allocate platform MSIs for each ring */
ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
flexrm_mbox_msi_write);
if (ret)
goto fail_destroy_cmpl_pool;
/* Save alloced IRQ numbers for each ring */
for (index = 0; index < mbox->num_rings; index++)
mbox->rings[index].irq = msi_get_virq(dev, index);
/* Check availability of debugfs */
if (!debugfs_initialized())
goto skip_debugfs;
/* Create debugfs root entry */
mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
/* Create debugfs config entry */
debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root,
flexrm_debugfs_conf_show);
/* Create debugfs stats entry */
debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root,
flexrm_debugfs_stats_show);
skip_debugfs:
/* Initialize mailbox controller */
mbox->controller.txdone_irq = false;
mbox->controller.txdone_poll = false;
mbox->controller.ops = &flexrm_mbox_chan_ops;
mbox->controller.dev = dev;
mbox->controller.num_chans = mbox->num_rings;
mbox->controller.of_xlate = flexrm_mbox_of_xlate;
mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
sizeof(*mbox->controller.chans), GFP_KERNEL);
if (!mbox->controller.chans) {
ret = -ENOMEM;
goto fail_free_debugfs_root;
}
for (index = 0; index < mbox->num_rings; index++)
mbox->controller.chans[index].con_priv = &mbox->rings[index];
/* Register mailbox controller */
ret = devm_mbox_controller_register(dev, &mbox->controller);
if (ret)
goto fail_free_debugfs_root;
dev_info(dev, "registered flexrm mailbox with %d channels\n",
mbox->controller.num_chans);
return 0;
fail_free_debugfs_root:
debugfs_remove_recursive(mbox->root);
platform_msi_domain_free_irqs(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
fail_destroy_bd_pool:
dma_pool_destroy(mbox->bd_pool);
fail:
return ret;
}
static int flexrm_mbox_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
debugfs_remove_recursive(mbox->root);
platform_msi_domain_free_irqs(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
return 0;
}
static const struct of_device_id flexrm_mbox_of_match[] = {
{ .compatible = "brcm,iproc-flexrm-mbox", },
{},
};
MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
static struct platform_driver flexrm_mbox_driver = {
.driver = {
.name = "brcm-flexrm-mbox",
.of_match_table = flexrm_mbox_of_match,
},
.probe = flexrm_mbox_probe,
.remove = flexrm_mbox_remove,
};
module_platform_driver(flexrm_mbox_driver);
MODULE_AUTHOR("Anup Patel <[email protected]>");
MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/bcm-flexrm-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
* Copyright (C) 2015 Linaro Ltd.
* Based on ARM MHU driver by Jassi Brar <[email protected]>
* Copyright (C) 2020 ARM Ltd.
*/
#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
#define INTR_CLR_OFS 0x10
#define MHU_LP_OFFSET 0x0
#define MHU_HP_OFFSET 0x20
#define MHU_SEC_OFFSET 0x200
#define TX_REG_OFFSET 0x100
#define MHU_CHANS 3 /* Secure, Non-Secure High and Low Priority */
#define MHU_CHAN_MAX 20 /* Max channels to save on unused RAM */
#define MHU_NUM_DOORBELLS 32
struct mhu_db_link {
unsigned int irq;
void __iomem *tx_reg;
void __iomem *rx_reg;
};
struct arm_mhu {
void __iomem *base;
struct mhu_db_link mlink[MHU_CHANS];
struct mbox_controller mbox;
struct device *dev;
};
/**
* struct mhu_db_channel - ARM MHU Mailbox allocated channel information
*
* @mhu: Pointer to parent mailbox device
* @pchan: Physical channel within which this doorbell resides in
* @doorbell: doorbell number pertaining to this channel
*/
struct mhu_db_channel {
struct arm_mhu *mhu;
unsigned int pchan;
unsigned int doorbell;
};
static inline struct mbox_chan *
mhu_db_mbox_to_channel(struct mbox_controller *mbox, unsigned int pchan,
unsigned int doorbell)
{
int i;
struct mhu_db_channel *chan_info;
for (i = 0; i < mbox->num_chans; i++) {
chan_info = mbox->chans[i].con_priv;
if (chan_info && chan_info->pchan == pchan &&
chan_info->doorbell == doorbell)
return &mbox->chans[i];
}
return NULL;
}
static void mhu_db_mbox_clear_irq(struct mbox_chan *chan)
{
struct mhu_db_channel *chan_info = chan->con_priv;
void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].rx_reg;
writel_relaxed(BIT(chan_info->doorbell), base + INTR_CLR_OFS);
}
static unsigned int mhu_db_mbox_irq_to_pchan_num(struct arm_mhu *mhu, int irq)
{
unsigned int pchan;
for (pchan = 0; pchan < MHU_CHANS; pchan++)
if (mhu->mlink[pchan].irq == irq)
break;
return pchan;
}
static struct mbox_chan *
mhu_db_mbox_irq_to_channel(struct arm_mhu *mhu, unsigned int pchan)
{
unsigned long bits;
unsigned int doorbell;
struct mbox_chan *chan = NULL;
struct mbox_controller *mbox = &mhu->mbox;
void __iomem *base = mhu->mlink[pchan].rx_reg;
bits = readl_relaxed(base + INTR_STAT_OFS);
if (!bits)
/* No IRQs fired in specified physical channel */
return NULL;
/* An IRQ has fired, find the associated channel */
for (doorbell = 0; bits; doorbell++) {
if (!test_and_clear_bit(doorbell, &bits))
continue;
chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
if (chan)
break;
dev_err(mbox->dev,
"Channel not registered: pchan: %d doorbell: %d\n",
pchan, doorbell);
}
return chan;
}
static irqreturn_t mhu_db_mbox_rx_handler(int irq, void *data)
{
struct mbox_chan *chan;
struct arm_mhu *mhu = data;
unsigned int pchan = mhu_db_mbox_irq_to_pchan_num(mhu, irq);
while (NULL != (chan = mhu_db_mbox_irq_to_channel(mhu, pchan))) {
mbox_chan_received_data(chan, NULL);
mhu_db_mbox_clear_irq(chan);
}
return IRQ_HANDLED;
}
static bool mhu_db_last_tx_done(struct mbox_chan *chan)
{
struct mhu_db_channel *chan_info = chan->con_priv;
void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
if (readl_relaxed(base + INTR_STAT_OFS) & BIT(chan_info->doorbell))
return false;
return true;
}
static int mhu_db_send_data(struct mbox_chan *chan, void *data)
{
struct mhu_db_channel *chan_info = chan->con_priv;
void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
/* Send event to co-processor */
writel_relaxed(BIT(chan_info->doorbell), base + INTR_SET_OFS);
return 0;
}
static int mhu_db_startup(struct mbox_chan *chan)
{
mhu_db_mbox_clear_irq(chan);
return 0;
}
static void mhu_db_shutdown(struct mbox_chan *chan)
{
struct mhu_db_channel *chan_info = chan->con_priv;
struct mbox_controller *mbox = &chan_info->mhu->mbox;
int i;
for (i = 0; i < mbox->num_chans; i++)
if (chan == &mbox->chans[i])
break;
if (mbox->num_chans == i) {
dev_warn(mbox->dev, "Request to free non-existent channel\n");
return;
}
/* Reset channel */
mhu_db_mbox_clear_irq(chan);
devm_kfree(mbox->dev, chan->con_priv);
chan->con_priv = NULL;
}
static struct mbox_chan *mhu_db_mbox_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *spec)
{
struct arm_mhu *mhu = dev_get_drvdata(mbox->dev);
struct mhu_db_channel *chan_info;
struct mbox_chan *chan;
unsigned int pchan = spec->args[0];
unsigned int doorbell = spec->args[1];
int i;
/* Bounds checking */
if (pchan >= MHU_CHANS || doorbell >= MHU_NUM_DOORBELLS) {
dev_err(mbox->dev,
"Invalid channel requested pchan: %d doorbell: %d\n",
pchan, doorbell);
return ERR_PTR(-EINVAL);
}
/* Is requested channel free? */
chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
if (chan) {
dev_err(mbox->dev, "Channel in use: pchan: %d doorbell: %d\n",
pchan, doorbell);
return ERR_PTR(-EBUSY);
}
/* Find the first free slot */
for (i = 0; i < mbox->num_chans; i++)
if (!mbox->chans[i].con_priv)
break;
if (mbox->num_chans == i) {
dev_err(mbox->dev, "No free channels left\n");
return ERR_PTR(-EBUSY);
}
chan = &mbox->chans[i];
chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
if (!chan_info)
return ERR_PTR(-ENOMEM);
chan_info->mhu = mhu;
chan_info->pchan = pchan;
chan_info->doorbell = doorbell;
chan->con_priv = chan_info;
dev_dbg(mbox->dev, "mbox: created channel phys: %d doorbell: %d\n",
pchan, doorbell);
return chan;
}
static const struct mbox_chan_ops mhu_db_ops = {
.send_data = mhu_db_send_data,
.startup = mhu_db_startup,
.shutdown = mhu_db_shutdown,
.last_tx_done = mhu_db_last_tx_done,
};
static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
{
u32 cell_count;
int i, err, max_chans;
struct arm_mhu *mhu;
struct mbox_chan *chans;
struct device *dev = &adev->dev;
struct device_node *np = dev->of_node;
int mhu_reg[MHU_CHANS] = {
MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET,
};
if (!of_device_is_compatible(np, "arm,mhu-doorbell"))
return -ENODEV;
err = of_property_read_u32(np, "#mbox-cells", &cell_count);
if (err) {
dev_err(dev, "failed to read #mbox-cells in '%pOF'\n", np);
return err;
}
if (cell_count == 2) {
max_chans = MHU_CHAN_MAX;
} else {
dev_err(dev, "incorrect value of #mbox-cells in '%pOF'\n", np);
return -EINVAL;
}
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
return -ENOMEM;
mhu->base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(mhu->base))
return PTR_ERR(mhu->base);
chans = devm_kcalloc(dev, max_chans, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mhu->dev = dev;
mhu->mbox.dev = dev;
mhu->mbox.chans = chans;
mhu->mbox.num_chans = max_chans;
mhu->mbox.txdone_irq = false;
mhu->mbox.txdone_poll = true;
mhu->mbox.txpoll_period = 1;
mhu->mbox.of_xlate = mhu_db_mbox_xlate;
amba_set_drvdata(adev, mhu);
mhu->mbox.ops = &mhu_db_ops;
err = devm_mbox_controller_register(dev, &mhu->mbox);
if (err) {
dev_err(dev, "Failed to register mailboxes %d\n", err);
return err;
}
for (i = 0; i < MHU_CHANS; i++) {
int irq = mhu->mlink[i].irq = adev->irq[i];
if (irq <= 0) {
dev_dbg(dev, "No IRQ found for Channel %d\n", i);
continue;
}
mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
err = devm_request_threaded_irq(dev, irq, NULL,
mhu_db_mbox_rx_handler,
IRQF_ONESHOT, "mhu_db_link", mhu);
if (err) {
dev_err(dev, "Can't claim IRQ %d\n", irq);
mbox_controller_unregister(&mhu->mbox);
return err;
}
}
dev_info(dev, "ARM MHU Doorbell mailbox registered\n");
return 0;
}
static struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, mhu_ids);
static struct amba_driver arm_mhu_db_driver = {
.drv = {
.name = "mhu-doorbell",
},
.id_table = mhu_ids,
.probe = mhu_db_probe,
};
module_amba_driver(arm_mhu_db_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ARM MHU Doorbell Driver");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
|
linux-master
|
drivers/mailbox/arm_mhu_db.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
/* IPCC Register offsets */
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18
#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
#define IPCC_REG_CLIENT_CLEAR 0x38
#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
#define IPCC_NO_PENDING_IRQ GENMASK(31, 0)
/**
* struct qcom_ipcc_chan_info - Per-mailbox-channel info
* @client_id: The client-id to which the interrupt has to be triggered
* @signal_id: The signal-id to which the interrupt has to be triggered
*/
struct qcom_ipcc_chan_info {
u16 client_id;
u16 signal_id;
};
/**
* struct qcom_ipcc - Holder for the mailbox driver
* @dev: Device associated with this instance
* @base: Base address of the IPCC frame associated to APSS
* @irq_domain: The irq_domain associated with this instance
* @chans: The mailbox channels array
* @mchan: The per-mailbox channel info array
* @mbox: The mailbox controller
* @num_chans: Number of @chans elements
* @irq: Summary irq
*/
struct qcom_ipcc {
struct device *dev;
void __iomem *base;
struct irq_domain *irq_domain;
struct mbox_chan *chans;
struct qcom_ipcc_chan_info *mchan;
struct mbox_controller mbox;
int num_chans;
int irq;
};
static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
{
return container_of(mbox, struct qcom_ipcc, mbox);
}
static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
{
return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
}
static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
{
struct qcom_ipcc *ipcc = data;
u32 hwirq;
int virq;
for (;;) {
hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
if (hwirq == IPCC_NO_PENDING_IRQ)
break;
virq = irq_find_mapping(ipcc->irq_domain, hwirq);
writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
generic_handle_irq(virq);
}
return IRQ_HANDLED;
}
static void qcom_ipcc_mask_irq(struct irq_data *irqd)
{
struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
}
static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
{
struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
}
static struct irq_chip qcom_ipcc_irq_chip = {
.name = "ipcc",
.irq_mask = qcom_ipcc_mask_irq,
.irq_unmask = qcom_ipcc_unmask_irq,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct qcom_ipcc *ipcc = d->host_data;
irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
irq_set_chip_data(irq, ipcc);
irq_set_noprobe(irq);
return 0;
}
static int qcom_ipcc_domain_xlate(struct irq_domain *d,
struct device_node *node, const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (intsize != 3)
return -EINVAL;
*out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
static const struct irq_domain_ops qcom_ipcc_irq_ops = {
.map = qcom_ipcc_domain_map,
.xlate = qcom_ipcc_domain_xlate,
};
static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
struct qcom_ipcc_chan_info *mchan = chan->con_priv;
u32 hwirq;
hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
return 0;
}
static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
{
chan->con_priv = NULL;
}
static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *ph)
{
struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
struct qcom_ipcc_chan_info *mchan;
struct mbox_chan *chan;
struct device *dev;
int chan_id;
dev = ipcc->dev;
if (ph->args_count != 2)
return ERR_PTR(-EINVAL);
for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) {
chan = &ipcc->chans[chan_id];
mchan = chan->con_priv;
if (!mchan)
break;
else if (mchan->client_id == ph->args[0] &&
mchan->signal_id == ph->args[1])
return ERR_PTR(-EBUSY);
}
if (chan_id >= mbox->num_chans)
return ERR_PTR(-EBUSY);
mchan = devm_kzalloc(dev, sizeof(*mchan), GFP_KERNEL);
if (!mchan)
return ERR_PTR(-ENOMEM);
mchan->client_id = ph->args[0];
mchan->signal_id = ph->args[1];
chan->con_priv = mchan;
return chan;
}
static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
.send_data = qcom_ipcc_mbox_send_data,
.shutdown = qcom_ipcc_mbox_shutdown,
};
static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
struct device_node *controller_dn)
{
struct of_phandle_args curr_ph;
struct device_node *client_dn;
struct mbox_controller *mbox;
struct device *dev = ipcc->dev;
int i, j, ret;
/*
* Find out the number of clients interested in this mailbox
* and create channels accordingly.
*/
ipcc->num_chans = 0;
for_each_node_with_property(client_dn, "mboxes") {
if (!of_device_is_available(client_dn))
continue;
i = of_count_phandle_with_args(client_dn,
"mboxes", "#mbox-cells");
for (j = 0; j < i; j++) {
ret = of_parse_phandle_with_args(client_dn, "mboxes",
"#mbox-cells", j, &curr_ph);
of_node_put(curr_ph.np);
if (!ret && curr_ph.np == controller_dn)
ipcc->num_chans++;
}
}
/* If no clients are found, skip registering as a mbox controller */
if (!ipcc->num_chans)
return 0;
ipcc->chans = devm_kcalloc(dev, ipcc->num_chans,
sizeof(struct mbox_chan), GFP_KERNEL);
if (!ipcc->chans)
return -ENOMEM;
mbox = &ipcc->mbox;
mbox->dev = dev;
mbox->num_chans = ipcc->num_chans;
mbox->chans = ipcc->chans;
mbox->ops = &ipcc_mbox_chan_ops;
mbox->of_xlate = qcom_ipcc_mbox_xlate;
mbox->txdone_irq = false;
mbox->txdone_poll = false;
return devm_mbox_controller_register(dev, mbox);
}
static int qcom_ipcc_pm_resume(struct device *dev)
{
struct qcom_ipcc *ipcc = dev_get_drvdata(dev);
u32 hwirq;
int virq;
hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
if (hwirq == IPCC_NO_PENDING_IRQ)
return 0;
virq = irq_find_mapping(ipcc->irq_domain, hwirq);
dev_dbg(dev, "virq: %d triggered client-id: %ld; signal-id: %ld\n", virq,
FIELD_GET(IPCC_CLIENT_ID_MASK, hwirq), FIELD_GET(IPCC_SIGNAL_ID_MASK, hwirq));
return 0;
}
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
static int id;
char *name;
int ret;
ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
if (!ipcc)
return -ENOMEM;
ipcc->dev = &pdev->dev;
ipcc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ipcc->base))
return PTR_ERR(ipcc->base);
ipcc->irq = platform_get_irq(pdev, 0);
if (ipcc->irq < 0)
return ipcc->irq;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ipcc_%d", id++);
if (!name)
return -ENOMEM;
ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
&qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
ret = qcom_ipcc_setup_mbox(ipcc, pdev->dev.of_node);
if (ret)
goto err_mbox;
ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND |
IRQF_NO_THREAD, name, ipcc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
goto err_req_irq;
}
platform_set_drvdata(pdev, ipcc);
return 0;
err_req_irq:
if (ipcc->num_chans)
mbox_controller_unregister(&ipcc->mbox);
err_mbox:
irq_domain_remove(ipcc->irq_domain);
return ret;
}
static int qcom_ipcc_remove(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
disable_irq_wake(ipcc->irq);
irq_domain_remove(ipcc->irq_domain);
return 0;
}
static const struct of_device_id qcom_ipcc_of_match[] = {
{ .compatible = "qcom,ipcc"},
{}
};
MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, qcom_ipcc_pm_resume)
};
static struct platform_driver qcom_ipcc_driver = {
.probe = qcom_ipcc_probe,
.remove = qcom_ipcc_remove,
.driver = {
.name = "qcom-ipcc",
.of_match_table = qcom_ipcc_of_match,
.suppress_bind_attrs = true,
.pm = pm_sleep_ptr(&qcom_ipcc_dev_pm_ops),
},
};
static int __init qcom_ipcc_init(void)
{
return platform_driver_register(&qcom_ipcc_driver);
}
arch_initcall(qcom_ipcc_init);
MODULE_AUTHOR("Venkata Narendra Kumar Gutta <[email protected]>");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/qcom-ipcc.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018 HiSilicon Limited.
// Copyright (c) 2017-2018 Linaro Limited.
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "mailbox.h"
#define MBOX_CHAN_MAX 32
#define MBOX_RX 0x0
#define MBOX_TX 0x1
#define MBOX_BASE(mbox, ch) ((mbox)->base + ((ch) * 0x40))
#define MBOX_SRC_REG 0x00
#define MBOX_DST_REG 0x04
#define MBOX_DCLR_REG 0x08
#define MBOX_DSTAT_REG 0x0c
#define MBOX_MODE_REG 0x10
#define MBOX_IMASK_REG 0x14
#define MBOX_ICLR_REG 0x18
#define MBOX_SEND_REG 0x1c
#define MBOX_DATA_REG 0x20
#define MBOX_IPC_LOCK_REG 0xa00
#define MBOX_IPC_UNLOCK 0x1acce551
#define MBOX_AUTOMATIC_ACK 1
#define MBOX_STATE_IDLE BIT(4)
#define MBOX_STATE_READY BIT(5)
#define MBOX_STATE_ACK BIT(7)
#define MBOX_MSG_LEN 8
/**
* struct hi3660_chan_info - Hi3660 mailbox channel information
* @dst_irq: Interrupt vector for remote processor
* @ack_irq: Interrupt vector for local processor
*
* A channel can be used for TX or RX, it can trigger remote
* processor interrupt to notify remote processor and can receive
* interrupt if it has an incoming message.
*/
struct hi3660_chan_info {
unsigned int dst_irq;
unsigned int ack_irq;
};
/**
* struct hi3660_mbox - Hi3660 mailbox controller data
* @dev: Device to which it is attached
* @base: Base address of the register mapping region
* @chan: Representation of channels in mailbox controller
* @mchan: Representation of channel info
* @controller: Representation of a communication channel controller
*
* Mailbox controller includes 32 channels and can allocate
* channel for message transferring.
*/
struct hi3660_mbox {
struct device *dev;
void __iomem *base;
struct mbox_chan chan[MBOX_CHAN_MAX];
struct hi3660_chan_info mchan[MBOX_CHAN_MAX];
struct mbox_controller controller;
};
static struct hi3660_mbox *to_hi3660_mbox(struct mbox_controller *mbox)
{
return container_of(mbox, struct hi3660_mbox, controller);
}
static int hi3660_mbox_check_state(struct mbox_chan *chan)
{
unsigned long ch = (unsigned long)chan->con_priv;
struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
struct hi3660_chan_info *mchan = &mbox->mchan[ch];
void __iomem *base = MBOX_BASE(mbox, ch);
unsigned long val;
unsigned int ret;
/* Mailbox is ready to use */
if (readl(base + MBOX_MODE_REG) & MBOX_STATE_READY)
return 0;
/* Wait for acknowledge from remote */
ret = readx_poll_timeout_atomic(readl, base + MBOX_MODE_REG,
val, (val & MBOX_STATE_ACK), 1000, 300000);
if (ret) {
dev_err(mbox->dev, "%s: timeout for receiving ack\n", __func__);
return ret;
}
/* clear ack state, mailbox will get back to ready state */
writel(BIT(mchan->ack_irq), base + MBOX_ICLR_REG);
return 0;
}
static int hi3660_mbox_unlock(struct mbox_chan *chan)
{
struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
unsigned int val, retry = 3;
do {
writel(MBOX_IPC_UNLOCK, mbox->base + MBOX_IPC_LOCK_REG);
val = readl(mbox->base + MBOX_IPC_LOCK_REG);
if (!val)
break;
udelay(10);
} while (retry--);
if (val)
dev_err(mbox->dev, "%s: failed to unlock mailbox\n", __func__);
return (!val) ? 0 : -ETIMEDOUT;
}
static int hi3660_mbox_acquire_channel(struct mbox_chan *chan)
{
unsigned long ch = (unsigned long)chan->con_priv;
struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
struct hi3660_chan_info *mchan = &mbox->mchan[ch];
void __iomem *base = MBOX_BASE(mbox, ch);
unsigned int val, retry;
for (retry = 10; retry; retry--) {
/* Check if channel is in idle state */
if (readl(base + MBOX_MODE_REG) & MBOX_STATE_IDLE) {
writel(BIT(mchan->ack_irq), base + MBOX_SRC_REG);
/* Check ack bit has been set successfully */
val = readl(base + MBOX_SRC_REG);
if (val & BIT(mchan->ack_irq))
break;
}
}
if (!retry)
dev_err(mbox->dev, "%s: failed to acquire channel\n", __func__);
return retry ? 0 : -ETIMEDOUT;
}
static int hi3660_mbox_startup(struct mbox_chan *chan)
{
int ret;
ret = hi3660_mbox_unlock(chan);
if (ret)
return ret;
ret = hi3660_mbox_acquire_channel(chan);
if (ret)
return ret;
return 0;
}
static int hi3660_mbox_send_data(struct mbox_chan *chan, void *msg)
{
unsigned long ch = (unsigned long)chan->con_priv;
struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
struct hi3660_chan_info *mchan = &mbox->mchan[ch];
void __iomem *base = MBOX_BASE(mbox, ch);
u32 *buf = msg;
unsigned int i;
int ret;
ret = hi3660_mbox_check_state(chan);
if (ret)
return ret;
/* Clear mask for destination interrupt */
writel_relaxed(~BIT(mchan->dst_irq), base + MBOX_IMASK_REG);
/* Config destination for interrupt vector */
writel_relaxed(BIT(mchan->dst_irq), base + MBOX_DST_REG);
/* Automatic acknowledge mode */
writel_relaxed(MBOX_AUTOMATIC_ACK, base + MBOX_MODE_REG);
/* Fill message data */
for (i = 0; i < MBOX_MSG_LEN; i++)
writel_relaxed(buf[i], base + MBOX_DATA_REG + i * 4);
/* Trigger data transferring */
writel(BIT(mchan->ack_irq), base + MBOX_SEND_REG);
return 0;
}
static const struct mbox_chan_ops hi3660_mbox_ops = {
.startup = hi3660_mbox_startup,
.send_data = hi3660_mbox_send_data,
};
static struct mbox_chan *hi3660_mbox_xlate(struct mbox_controller *controller,
const struct of_phandle_args *spec)
{
struct hi3660_mbox *mbox = to_hi3660_mbox(controller);
struct hi3660_chan_info *mchan;
unsigned int ch = spec->args[0];
if (ch >= MBOX_CHAN_MAX) {
dev_err(mbox->dev, "Invalid channel idx %d\n", ch);
return ERR_PTR(-EINVAL);
}
mchan = &mbox->mchan[ch];
mchan->dst_irq = spec->args[1];
mchan->ack_irq = spec->args[2];
return &mbox->chan[ch];
}
static const struct of_device_id hi3660_mbox_of_match[] = {
{ .compatible = "hisilicon,hi3660-mbox", },
{},
};
MODULE_DEVICE_TABLE(of, hi3660_mbox_of_match);
static int hi3660_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hi3660_mbox *mbox;
struct mbox_chan *chan;
unsigned long ch;
int err;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
mbox->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
mbox->dev = dev;
mbox->controller.dev = dev;
mbox->controller.chans = mbox->chan;
mbox->controller.num_chans = MBOX_CHAN_MAX;
mbox->controller.ops = &hi3660_mbox_ops;
mbox->controller.of_xlate = hi3660_mbox_xlate;
/* Initialize mailbox channel data */
chan = mbox->chan;
for (ch = 0; ch < MBOX_CHAN_MAX; ch++)
chan[ch].con_priv = (void *)ch;
err = devm_mbox_controller_register(dev, &mbox->controller);
if (err) {
dev_err(dev, "Failed to register mailbox %d\n", err);
return err;
}
platform_set_drvdata(pdev, mbox);
dev_info(dev, "Mailbox enabled\n");
return 0;
}
static struct platform_driver hi3660_mbox_driver = {
.probe = hi3660_mbox_probe,
.driver = {
.name = "hi3660-mbox",
.of_match_table = hi3660_mbox_of_match,
},
};
static int __init hi3660_mbox_init(void)
{
return platform_driver_register(&hi3660_mbox_driver);
}
core_initcall(hi3660_mbox_init);
static void __exit hi3660_mbox_exit(void)
{
platform_driver_unregister(&hi3660_mbox_driver);
}
module_exit(hi3660_mbox_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Hisilicon Hi3660 Mailbox Controller");
MODULE_AUTHOR("Leo Yan <[email protected]>");
|
linux-master
|
drivers/mailbox/hi3660-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2018 MediaTek Inc.
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/mtk-cmdq-mailbox.h>
#include <linux/of.h>
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
#define CMDQ_GCE_NUM_MAX (2)
#define CMDQ_CURR_IRQ_STATUS 0x10
#define CMDQ_SYNC_TOKEN_UPDATE 0x68
#define CMDQ_THR_SLOT_CYCLES 0x30
#define CMDQ_THR_BASE 0x100
#define CMDQ_THR_SIZE 0x80
#define CMDQ_THR_WARM_RESET 0x00
#define CMDQ_THR_ENABLE_TASK 0x04
#define CMDQ_THR_SUSPEND_TASK 0x08
#define CMDQ_THR_CURR_STATUS 0x0c
#define CMDQ_THR_IRQ_STATUS 0x10
#define CMDQ_THR_IRQ_ENABLE 0x14
#define CMDQ_THR_CURR_ADDR 0x20
#define CMDQ_THR_END_ADDR 0x24
#define CMDQ_THR_WAIT_TOKEN 0x30
#define CMDQ_THR_PRIORITY 0x40
#define GCE_GCTL_VALUE 0x48
#define GCE_CTRL_BY_SW GENMASK(2, 0)
#define GCE_DDR_EN GENMASK(18, 16)
#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
#define CMDQ_THR_ENABLED 0x1
#define CMDQ_THR_DISABLED 0x0
#define CMDQ_THR_SUSPEND 0x1
#define CMDQ_THR_RESUME 0x0
#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
#define CMDQ_THR_DO_WARM_RESET BIT(0)
#define CMDQ_THR_IRQ_DONE 0x1
#define CMDQ_THR_IRQ_ERROR 0x12
#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
#define CMDQ_THR_IS_WAITING BIT(31)
#define CMDQ_JUMP_BY_OFFSET 0x10000000
#define CMDQ_JUMP_BY_PA 0x10000001
struct cmdq_thread {
struct mbox_chan *chan;
void __iomem *base;
struct list_head task_busy_list;
u32 priority;
};
struct cmdq_task {
struct cmdq *cmdq;
struct list_head list_entry;
dma_addr_t pa_base;
struct cmdq_thread *thread;
struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
};
struct cmdq {
struct mbox_controller mbox;
void __iomem *base;
int irq;
u32 irq_mask;
const struct gce_plat *pdata;
struct cmdq_thread *thread;
struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
bool suspended;
};
struct gce_plat {
u32 thread_nr;
u8 shift;
bool control_by_sw;
bool sw_ddr_en;
u32 gce_num;
};
static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
{
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
if (enable)
writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
else
writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
}
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
return cmdq->pdata->shift;
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
/* If already disabled, treat as suspended successful. */
if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
return 0;
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
(u32)(thread->base - cmdq->base));
return -EFAULT;
}
return 0;
}
static void cmdq_thread_resume(struct cmdq_thread *thread)
{
writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
}
static void cmdq_init(struct cmdq *cmdq)
{
int i;
u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
if (cmdq->pdata->control_by_sw)
gctl_regval = GCE_CTRL_BY_SW;
if (cmdq->pdata->sw_ddr_en)
gctl_regval |= GCE_DDR_EN;
if (gctl_regval)
writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
}
static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 warm_reset;
writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
0, 10)) {
dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
(u32)(thread->base - cmdq->base));
return -EFAULT;
}
return 0;
}
static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
{
cmdq_thread_reset(cmdq, thread);
writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
}
/* notify GCE to re-fetch commands by setting GCE thread PC */
static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
{
writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
thread->base + CMDQ_THR_CURR_ADDR);
}
static void cmdq_task_insert_into_thread(struct cmdq_task *task)
{
struct device *dev = task->cmdq->mbox.dev;
struct cmdq_thread *thread = task->thread;
struct cmdq_task *prev_task = list_last_entry(
&thread->task_busy_list, typeof(*task), list_entry);
u64 *prev_task_base = prev_task->pkt->va_base;
/* let previous task jump to this task */
dma_sync_single_for_cpu(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
(u64)CMDQ_JUMP_BY_PA << 32 |
(task->pa_base >> task->cmdq->pdata->shift);
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
cmdq_thread_invalidate_fetched_data(thread);
}
static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
{
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
}
static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
{
struct cmdq_cb_data data;
data.sta = sta;
data.pkt = task->pkt;
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
}
static void cmdq_task_handle_error(struct cmdq_task *task)
{
struct cmdq_thread *thread = task->thread;
struct cmdq_task *next_task;
struct cmdq *cmdq = task->cmdq;
dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
next_task = list_first_entry_or_null(&thread->task_busy_list,
struct cmdq_task, list_entry);
if (next_task)
writel(next_task->pa_base >> cmdq->pdata->shift,
thread->base + CMDQ_THR_CURR_ADDR);
cmdq_thread_resume(thread);
}
static void cmdq_thread_irq_handler(struct cmdq *cmdq,
struct cmdq_thread *thread)
{
struct cmdq_task *task, *tmp, *curr_task = NULL;
u32 curr_pa, irq_flag, task_end_pa;
bool err;
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
/*
* When ISR call this function, another CPU core could run
* "release task" right before we acquire the spin lock, and thus
* reset / disable this GCE thread, so we need to check the enable
* bit of this GCE thread.
*/
if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
return;
if (irq_flag & CMDQ_THR_IRQ_ERROR)
err = true;
else if (irq_flag & CMDQ_THR_IRQ_DONE)
err = false;
else
return;
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
curr_task = task;
if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
cmdq_task_exec_done(task, 0);
kfree(task);
} else if (err) {
cmdq_task_exec_done(task, -ENOEXEC);
cmdq_task_handle_error(curr_task);
kfree(task);
}
if (curr_task)
break;
}
if (list_empty(&thread->task_busy_list)) {
cmdq_thread_disable(cmdq, thread);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
}
}
static irqreturn_t cmdq_irq_handler(int irq, void *dev)
{
struct cmdq *cmdq = dev;
unsigned long irq_status, flags = 0L;
int bit;
irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
if (!(irq_status ^ cmdq->irq_mask))
return IRQ_NONE;
for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
struct cmdq_thread *thread = &cmdq->thread[bit];
spin_lock_irqsave(&thread->chan->lock, flags);
cmdq_thread_irq_handler(cmdq, thread);
spin_unlock_irqrestore(&thread->chan->lock, flags);
}
return IRQ_HANDLED;
}
static int cmdq_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
struct cmdq_thread *thread;
int i;
bool task_running = false;
cmdq->suspended = true;
for (i = 0; i < cmdq->pdata->thread_nr; i++) {
thread = &cmdq->thread[i];
if (!list_empty(&thread->task_busy_list)) {
task_running = true;
break;
}
}
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, false);
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
static int cmdq_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
cmdq->suspended = false;
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, true);
return 0;
}
static int cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, false);
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
unsigned long curr_pa, end_pa;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
task = kzalloc(sizeof(*task), GFP_ATOMIC);
if (!task)
return -ENOMEM;
task->cmdq = cmdq;
INIT_LIST_HEAD(&task->list_entry);
task->pa_base = pkt->pa_base;
task->thread = thread;
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
* set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
* thread and make it running.
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
writel(task->pa_base >> cmdq->pdata->shift,
thread->base + CMDQ_THR_CURR_ADDR);
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
cmdq->pdata->shift;
end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
cmdq->pdata->shift;
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
/* set to this task directly */
writel(task->pa_base >> cmdq->pdata->shift,
thread->base + CMDQ_THR_CURR_ADDR);
} else {
cmdq_task_insert_into_thread(task);
smp_mb(); /* modify jump before enable thread */
}
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
thread->base + CMDQ_THR_END_ADDR);
cmdq_thread_resume(thread);
}
list_move_tail(&task->list_entry, &thread->task_busy_list);
return 0;
}
static int cmdq_mbox_startup(struct mbox_chan *chan)
{
return 0;
}
static void cmdq_mbox_shutdown(struct mbox_chan *chan)
{
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
unsigned long flags;
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
goto done;
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
/* make sure executed tasks have success callback */
cmdq_thread_irq_handler(cmdq, thread);
if (list_empty(&thread->task_busy_list))
goto done;
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
cmdq_task_exec_done(task, -ECONNABORTED);
kfree(task);
}
cmdq_thread_disable(cmdq, thread);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
done:
/*
* The thread->task_busy_list empty means thread already disable. The
* cmdq_mbox_send_data() always reset thread which clear disable and
* suspend statue when first pkt send to channel, so there is no need
* to do any operation here, only unlock and leave.
*/
spin_unlock_irqrestore(&thread->chan->lock, flags);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq_cb_data data;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
unsigned long flags;
u32 enable;
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
goto out;
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
if (!cmdq_thread_is_in_wfe(thread))
goto wait;
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
data.sta = -ECONNABORTED;
data.pkt = task->pkt;
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
kfree(task);
}
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
return 0;
wait:
cmdq_thread_resume(thread);
spin_unlock_irqrestore(&thread->chan->lock, flags);
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
enable, enable == 0, 1, timeout)) {
dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
(u32)(thread->base - cmdq->base));
return -EFAULT;
}
return 0;
}
static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
.send_data = cmdq_mbox_send_data,
.startup = cmdq_mbox_startup,
.shutdown = cmdq_mbox_shutdown,
.flush = cmdq_mbox_flush,
};
static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
int ind = sp->args[0];
struct cmdq_thread *thread;
if (ind >= mbox->num_chans)
return ERR_PTR(-EINVAL);
thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
thread->priority = sp->args[1];
thread->chan = &mbox->chans[ind];
return &mbox->chans[ind];
}
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cmdq *cmdq;
int err, i;
struct device_node *phandle = dev->of_node;
struct device_node *node;
int alias_id = 0;
static const char * const clk_name = "gce";
static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return -ENOMEM;
cmdq->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cmdq->base))
return PTR_ERR(cmdq->base);
cmdq->irq = platform_get_irq(pdev, 0);
if (cmdq->irq < 0)
return cmdq->irq;
cmdq->pdata = device_get_match_data(dev);
if (!cmdq->pdata) {
dev_err(dev, "failed to get match data\n");
return -EINVAL;
}
cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
if (cmdq->pdata->gce_num > 1) {
for_each_child_of_node(phandle->parent, node) {
alias_id = of_alias_get_id(node, clk_name);
if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
of_node_put(node);
return dev_err_probe(dev,
PTR_ERR(cmdq->clocks[alias_id].clk),
"failed to get gce clk: %d\n",
alias_id);
}
}
}
} else {
cmdq->clocks[alias_id].id = clk_name;
cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
"failed to get gce clk\n");
}
}
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
if (!cmdq->mbox.chans)
return -ENOMEM;
cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
cmdq->mbox.ops = &cmdq_mbox_chan_ops;
cmdq->mbox.of_xlate = cmdq_xlate;
/* make use of TXDONE_BY_ACK */
cmdq->mbox.txdone_irq = false;
cmdq->mbox.txdone_poll = false;
cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->thread), GFP_KERNEL);
if (!cmdq->thread)
return -ENOMEM;
for (i = 0; i < cmdq->pdata->thread_nr; i++) {
cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
CMDQ_THR_SIZE * i;
INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
}
err = devm_mbox_controller_register(dev, &cmdq->mbox);
if (err < 0) {
dev_err(dev, "failed to register mailbox: %d\n", err);
return err;
}
platform_set_drvdata(pdev, cmdq);
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
cmdq_init(cmdq);
err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
"mtk_cmdq", cmdq);
if (err < 0) {
dev_err(dev, "failed to register ISR (%d)\n", err);
return err;
}
return 0;
}
static const struct dev_pm_ops cmdq_pm_ops = {
.suspend = cmdq_suspend,
.resume = cmdq_resume,
};
static const struct gce_plat gce_plat_v2 = {
.thread_nr = 16,
.shift = 0,
.control_by_sw = false,
.gce_num = 1
};
static const struct gce_plat gce_plat_v3 = {
.thread_nr = 24,
.shift = 0,
.control_by_sw = false,
.gce_num = 1
};
static const struct gce_plat gce_plat_v4 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = false,
.gce_num = 1
};
static const struct gce_plat gce_plat_v5 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
.gce_num = 1
};
static const struct gce_plat gce_plat_v6 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
.gce_num = 2
};
static const struct gce_plat gce_plat_v7 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
.sw_ddr_en = true,
.gce_num = 1
};
static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
{.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
{.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
{}
};
static struct platform_driver cmdq_drv = {
.probe = cmdq_probe,
.remove = cmdq_remove,
.driver = {
.name = "mtk_cmdq",
.pm = &cmdq_pm_ops,
.of_match_table = cmdq_of_ids,
}
};
static int __init cmdq_drv_init(void)
{
return platform_driver_register(&cmdq_drv);
}
static void __exit cmdq_drv_exit(void)
{
platform_driver_unregister(&cmdq_drv);
}
subsys_initcall(cmdq_drv_init);
module_exit(cmdq_drv_exit);
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/mtk-cmdq-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Calxeda, Inc.
*/
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/pl320-ipc.h>
#define IPCMxSOURCE(m) ((m) * 0x40)
#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
#define IPCMMIS(irq) (((irq) * 8) + 0x800)
#define IPCMRIS(irq) (((irq) * 8) + 0x804)
#define MBOX_MASK(n) (1 << (n))
#define IPC_TX_MBOX 1
#define IPC_RX_MBOX 2
#define CHAN_MASK(n) (1 << (n))
#define A9_SOURCE 1
#define M3_SOURCE 0
static void __iomem *ipc_base;
static int ipc_irq;
static DEFINE_MUTEX(ipc_m1_lock);
static DECLARE_COMPLETION(ipc_completion);
static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
static inline void set_destination(int source, int mbox)
{
writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
}
static inline void clear_destination(int source, int mbox)
{
writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
}
static void __ipc_send(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i));
writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox));
}
static u32 __ipc_rcv(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i));
return data[1];
}
/* blocking implementation from the A9 side, not usable in interrupts! */
int pl320_ipc_transmit(u32 *data)
{
int ret;
mutex_lock(&ipc_m1_lock);
init_completion(&ipc_completion);
__ipc_send(IPC_TX_MBOX, data);
ret = wait_for_completion_timeout(&ipc_completion,
msecs_to_jiffies(1000));
if (ret == 0) {
ret = -ETIMEDOUT;
goto out;
}
ret = __ipc_rcv(IPC_TX_MBOX, data);
out:
mutex_unlock(&ipc_m1_lock);
return ret;
}
EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
static irqreturn_t ipc_handler(int irq, void *dev)
{
u32 irq_stat;
u32 data[7];
irq_stat = readl_relaxed(ipc_base + IPCMMIS(1));
if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
complete(&ipc_completion);
}
if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
__ipc_rcv(IPC_RX_MBOX, data);
atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
}
return IRQ_HANDLED;
}
int pl320_ipc_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&ipc_notifier, nb);
}
EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
int pl320_ipc_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&ipc_notifier, nb);
}
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
if (ipc_base == NULL)
return -ENOMEM;
writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
ipc_irq = adev->irq[0];
ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
if (ret < 0)
goto err;
/* Init slow mailbox */
writel_relaxed(CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
writel_relaxed(CHAN_MASK(M3_SOURCE),
ipc_base + IPCMxDSET(IPC_TX_MBOX));
writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxMSET(IPC_TX_MBOX));
/* Init receive mailbox */
writel_relaxed(CHAN_MASK(M3_SOURCE),
ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
writel_relaxed(CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxDSET(IPC_RX_MBOX));
writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxMSET(IPC_RX_MBOX));
return 0;
err:
iounmap(ipc_base);
return ret;
}
static struct amba_id pl320_ids[] = {
{
.id = 0x00041320,
.mask = 0x000fffff,
},
{ 0, 0 },
};
static struct amba_driver pl320_driver = {
.drv = {
.name = "pl320",
},
.id_table = pl320_ids,
.probe = pl320_probe,
};
static int __init ipc_init(void)
{
return amba_driver_register(&pl320_driver);
}
subsys_initcall(ipc_init);
|
linux-master
|
drivers/mailbox/pl320-ipc.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* ARM Message Handling Unit Version 2 (MHUv2) driver.
*
* Copyright (C) 2020 ARM Ltd.
* Copyright (C) 2020 Linaro Ltd.
*
* An MHUv2 mailbox controller can provide up to 124 channel windows (each 32
* bit long) and the driver allows any combination of both the transport
* protocol modes: data-transfer and doorbell, to be used on those channel
* windows.
*
* The transport protocols should be specified in the device tree entry for the
* device. The transport protocols determine how the underlying hardware
* resources of the device are utilized when transmitting data. Refer to the
* device tree bindings of the ARM MHUv2 controller for more details.
*
* The number of registered mailbox channels is dependent on both the underlying
* hardware - mainly the number of channel windows implemented by the platform,
* as well as the selected transport protocols.
*
* The MHUv2 controller can work both as a sender and receiver, but the driver
* and the DT bindings support unidirectional transfers for better allocation of
* the channels. That is, this driver will be probed for two separate devices
* for each mailbox controller, a sender device and a receiver device.
*/
#include <linux/amba/bus.h>
#include <linux/interrupt.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/arm_mhuv2_message.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
/* ====== MHUv2 Registers ====== */
/* Maximum number of channel windows */
#define MHUV2_CH_WN_MAX 124
/* Number of combined interrupt status registers */
#define MHUV2_CMB_INT_ST_REG_CNT 4
#define MHUV2_STAT_BYTES (sizeof(u32))
#define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__)
#define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1)
#define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols"
/* Register Message Handling Unit Configuration fields */
struct mhu_cfg_t {
u32 num_ch : 7;
u32 pad : 25;
} __packed;
/* register Interrupt Status fields */
struct int_st_t {
u32 nr2r : 1;
u32 r2nr : 1;
u32 pad : 30;
} __packed;
/* Register Interrupt Clear fields */
struct int_clr_t {
u32 nr2r : 1;
u32 r2nr : 1;
u32 pad : 30;
} __packed;
/* Register Interrupt Enable fields */
struct int_en_t {
u32 r2nr : 1;
u32 nr2r : 1;
u32 chcomb : 1;
u32 pad : 29;
} __packed;
/* Register Implementer Identification fields */
struct iidr_t {
u32 implementer : 12;
u32 revision : 4;
u32 variant : 4;
u32 product_id : 12;
} __packed;
/* Register Architecture Identification Register fields */
struct aidr_t {
u32 arch_minor_rev : 4;
u32 arch_major_rev : 4;
u32 pad : 24;
} __packed;
/* Sender Channel Window fields */
struct mhu2_send_ch_wn_reg {
u32 stat;
u8 pad1[0x0C - 0x04];
u32 stat_set;
u32 int_st;
u32 int_clr;
u32 int_en;
u8 pad2[0x20 - 0x1C];
} __packed;
/* Sender frame register fields */
struct mhu2_send_frame_reg {
struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
struct mhu_cfg_t mhu_cfg;
u32 resp_cfg;
u32 access_request;
u32 access_ready;
struct int_st_t int_st;
struct int_clr_t int_clr;
struct int_en_t int_en;
u32 reserved0;
u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
u8 pad[0xFC8 - 0xFB0];
struct iidr_t iidr;
struct aidr_t aidr;
} __packed;
/* Receiver Channel Window fields */
struct mhu2_recv_ch_wn_reg {
u32 stat;
u32 stat_masked;
u32 stat_clear;
u8 reserved0[0x10 - 0x0C];
u32 mask;
u32 mask_set;
u32 mask_clear;
u8 pad[0x20 - 0x1C];
} __packed;
/* Receiver frame register fields */
struct mhu2_recv_frame_reg {
struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
struct mhu_cfg_t mhu_cfg;
u8 reserved0[0xF90 - 0xF84];
struct int_st_t int_st;
struct int_clr_t int_clr;
struct int_en_t int_en;
u32 pad;
u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
u8 reserved2[0xFC8 - 0xFB0];
struct iidr_t iidr;
struct aidr_t aidr;
} __packed;
/* ====== MHUv2 data structures ====== */
enum mhuv2_transport_protocol {
DOORBELL = 0,
DATA_TRANSFER = 1
};
enum mhuv2_frame {
RECEIVER_FRAME,
SENDER_FRAME
};
/**
* struct mhuv2 - MHUv2 mailbox controller data
*
* @mbox: Mailbox controller belonging to the MHU frame.
* @send: Base address of the register mapping region.
* @recv: Base address of the register mapping region.
* @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME.
* @irq: Interrupt.
* @windows: Channel windows implemented by the platform.
* @minor: Minor version of the controller.
* @length: Length of the protocols array in bytes.
* @protocols: Raw protocol information, derived from device tree.
* @doorbell_pending_lock: spinlock required for correct operation of Tx
* interrupt for doorbells.
*/
struct mhuv2 {
struct mbox_controller mbox;
union {
struct mhu2_send_frame_reg __iomem *send;
struct mhu2_recv_frame_reg __iomem *recv;
};
enum mhuv2_frame frame;
unsigned int irq;
unsigned int windows;
unsigned int minor;
unsigned int length;
u32 *protocols;
spinlock_t doorbell_pending_lock;
};
#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox)
/**
* struct mhuv2_protocol_ops - MHUv2 operations
*
* Each transport protocol must provide an implementation of the operations
* provided here.
*
* @rx_startup: Startup callback for receiver.
* @rx_shutdown: Shutdown callback for receiver.
* @read_data: Reads and clears newly available data.
* @tx_startup: Startup callback for receiver.
* @tx_shutdown: Shutdown callback for receiver.
* @last_tx_done: Report back if the last tx is completed or not.
* @send_data: Send data to the receiver.
*/
struct mhuv2_protocol_ops {
int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan);
void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan);
int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg);
};
/*
* MHUv2 mailbox channel's private information
*
* @ops: protocol specific ops for the channel.
* @ch_wn_idx: Channel window index allocated to the channel.
* @windows: Total number of windows consumed by the channel, only relevant
* in DATA_TRANSFER protocol.
* @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant
* in DOORBELL protocol.
* @pending: Flag indicating pending doorbell interrupt, only relevant in
* DOORBELL protocol.
*/
struct mhuv2_mbox_chan_priv {
const struct mhuv2_protocol_ops *ops;
u32 ch_wn_idx;
union {
u32 windows;
struct {
u32 doorbell;
u32 pending;
};
};
};
/* Macro for reading a bitfield within a physically mapped packed struct */
#define readl_relaxed_bitfield(_regptr, _type, _field) \
({ \
u32 _regval; \
_regval = readl_relaxed((_regptr)); \
(*(_type *)(&_regval))._field; \
})
/* Macro for writing a bitfield within a physically mapped packed struct */
#define writel_relaxed_bitfield(_value, _regptr, _type, _field) \
({ \
u32 _regval; \
_regval = readl_relaxed(_regptr); \
(*(_type *)(&_regval))._field = _value; \
writel_relaxed(_regval, _regptr); \
})
/* =================== Doorbell transport protocol operations =============== */
static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
writel_relaxed(BIT(priv->doorbell),
&mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear);
return 0;
}
static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
writel_relaxed(BIT(priv->doorbell),
&mhu->recv->ch_wn[priv->ch_wn_idx].mask_set);
}
static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
writel_relaxed(BIT(priv->doorbell),
&mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear);
return NULL;
}
static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) &
BIT(priv->doorbell));
}
static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan,
void *arg)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
unsigned long flags;
spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
priv->pending = 1;
writel_relaxed(BIT(priv->doorbell),
&mhu->send->ch_wn[priv->ch_wn_idx].stat_set);
spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
return 0;
}
static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = {
.rx_startup = mhuv2_doorbell_rx_startup,
.rx_shutdown = mhuv2_doorbell_rx_shutdown,
.read_data = mhuv2_doorbell_read_data,
.last_tx_done = mhuv2_doorbell_last_tx_done,
.send_data = mhuv2_doorbell_send_data,
};
#define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops)
/* ============= Data transfer transport protocol operations ================ */
static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
int i = priv->ch_wn_idx + priv->windows - 1;
/*
* The protocol mandates that all but the last status register must be
* masked.
*/
writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear);
return 0;
}
static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
int i = priv->ch_wn_idx + priv->windows - 1;
writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
}
static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
const int windows = priv->windows;
struct arm_mhuv2_mbox_msg *msg;
u32 *data;
int i, idx;
msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL);
if (!msg)
return ERR_PTR(-ENOMEM);
data = msg->data = msg + 1;
msg->len = windows * MHUV2_STAT_BYTES;
/*
* Messages are expected in order of most significant word to least
* significant word. Refer mhuv2_data_transfer_send_data() for more
* details.
*
* We also need to read the stat register instead of stat_masked, as we
* masked all but the last window.
*
* Last channel window must be cleared as the final operation. Upon
* clearing the last channel window register, which is unmasked in
* data-transfer protocol, the interrupt is de-asserted.
*/
for (i = 0; i < windows; i++) {
idx = priv->ch_wn_idx + i;
data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat);
writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear);
}
return msg;
}
static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
int i = priv->ch_wn_idx + priv->windows - 1;
/* Enable interrupts only for the last window */
if (mhu->minor) {
writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr);
writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en);
}
}
static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
int i = priv->ch_wn_idx + priv->windows - 1;
if (mhu->minor)
writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
}
static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu,
struct mbox_chan *chan)
{
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
int i = priv->ch_wn_idx + priv->windows - 1;
/* Just checking the last channel window should be enough */
return !readl_relaxed(&mhu->send->ch_wn[i].stat);
}
/*
* Message will be transmitted from most significant to least significant word.
* This is to allow for messages shorter than channel windows to still trigger
* the receiver interrupt which gets activated when the last stat register is
* written. As an example, a 6-word message is to be written on a 4-channel MHU
* connection: Registers marked with '*' are masked, and will not generate an
* interrupt on the receiver side once written.
*
* u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004],
* [0x00000005], [0x00000006]
*
* ROUND 1:
* stat reg To write Write sequence
* [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver
* [ stat 2 ] <- [0x00000002] 3
* [ stat 1 ] <- [0x00000003] 2
* [ stat 0 ] <- [0x00000004] 1
*
* data += 4 // Increment data pointer by number of stat regs
*
* ROUND 2:
* stat reg To write Write sequence
* [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver
* [ stat 2 ] <- [0x00000006] 1
* [ stat 1 ] <- [0x00000000]
* [ stat 0 ] <- [0x00000000]
*/
static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu,
struct mbox_chan *chan, void *arg)
{
const struct arm_mhuv2_mbox_msg *msg = arg;
int bytes_left = msg->len, bytes_to_send, bytes_in_round, i;
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
int windows = priv->windows;
u32 *data = msg->data, word;
while (bytes_left) {
if (!data[0]) {
dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver");
return -EINVAL;
}
while(!mhuv2_data_transfer_last_tx_done(mhu, chan))
continue;
bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES));
for (i = windows - 1; i >= 0; i--) {
/* Data less than windows can transfer ? */
if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES))
continue;
word = data[i];
bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1);
if (unlikely(bytes_to_send))
word &= LSB_MASK(bytes_to_send);
else
bytes_to_send = MHUV2_STAT_BYTES;
writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set);
bytes_left -= bytes_to_send;
bytes_in_round -= bytes_to_send;
}
data += windows;
}
return 0;
}
static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
.rx_startup = mhuv2_data_transfer_rx_startup,
.rx_shutdown = mhuv2_data_transfer_rx_shutdown,
.read_data = mhuv2_data_transfer_read_data,
.tx_startup = mhuv2_data_transfer_tx_startup,
.tx_shutdown = mhuv2_data_transfer_tx_shutdown,
.last_tx_done = mhuv2_data_transfer_last_tx_done,
.send_data = mhuv2_data_transfer_send_data,
};
/* Interrupt handlers */
static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
{
struct mbox_chan *chans = mhu->mbox.chans;
int channel = 0, i, offset = 0, windows, protocol, ch_wn;
u32 stat;
for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
stat = readl_relaxed(reg + i);
if (!stat)
continue;
ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
for (i = 0; i < mhu->length; i += 2) {
protocol = mhu->protocols[i];
windows = mhu->protocols[i + 1];
if (ch_wn >= offset + windows) {
if (protocol == DOORBELL)
channel += MHUV2_STAT_BITS * windows;
else
channel++;
offset += windows;
continue;
}
/* Return first chan of the window in doorbell mode */
if (protocol == DOORBELL)
channel += MHUV2_STAT_BITS * (ch_wn - offset);
return &chans[channel];
}
}
return ERR_PTR(-EIO);
}
static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
{
struct mhuv2 *mhu = data;
struct device *dev = mhu->mbox.dev;
struct mhuv2_mbox_chan_priv *priv;
struct mbox_chan *chan;
unsigned long flags;
int i, found = 0;
u32 stat;
chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st);
if (IS_ERR(chan)) {
dev_warn(dev, "Failed to find channel for the Tx interrupt\n");
return IRQ_NONE;
}
priv = chan->con_priv;
if (!IS_PROTOCOL_DOORBELL(priv)) {
writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
if (chan->cl) {
mbox_chan_txdone(chan, 0);
return IRQ_HANDLED;
}
dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n",
priv->ch_wn_idx);
return IRQ_NONE;
}
/* Clear the interrupt first, so we don't miss any doorbell later */
writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr);
/*
* In Doorbell mode, make sure no new transitions happen while the
* interrupt handler is trying to find the finished doorbell tx
* operations, else we may think few of the transfers were complete
* before they actually were.
*/
spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
/*
* In case of doorbell mode, the first channel of the window is returned
* by get_irq_chan_comb(). Find all the pending channels here.
*/
stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat);
for (i = 0; i < MHUV2_STAT_BITS; i++) {
priv = chan[i].con_priv;
/* Find cases where pending was 1, but stat's bit is cleared */
if (priv->pending ^ ((stat >> i) & 0x1)) {
BUG_ON(!priv->pending);
if (!chan->cl) {
dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n",
priv->ch_wn_idx, i);
continue;
}
mbox_chan_txdone(&chan[i], 0);
priv->pending = 0;
found++;
}
}
spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
if (!found) {
/*
* We may have already processed the doorbell in the previous
* iteration if the interrupt came right after we cleared it but
* before we read the stat register.
*/
dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n",
priv->ch_wn_idx);
return IRQ_NONE;
}
return IRQ_HANDLED;
}
static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu)
{
struct mhuv2_mbox_chan_priv *priv;
struct mbox_chan *chan;
u32 stat;
chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st);
if (IS_ERR(chan))
return chan;
priv = chan->con_priv;
if (!IS_PROTOCOL_DOORBELL(priv))
return chan;
/*
* In case of doorbell mode, the first channel of the window is returned
* by the routine. Find the exact channel here.
*/
stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
BUG_ON(!stat);
return chan + __builtin_ctz(stat);
}
static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu)
{
struct mbox_chan *chans = mhu->mbox.chans;
struct mhuv2_mbox_chan_priv *priv;
u32 stat;
int i = 0;
while (i < mhu->mbox.num_chans) {
priv = chans[i].con_priv;
stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
if (stat) {
if (IS_PROTOCOL_DOORBELL(priv))
i += __builtin_ctz(stat);
return &chans[i];
}
i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1;
}
return ERR_PTR(-EIO);
}
static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu)
{
if (!mhu->minor)
return get_irq_chan_stat_rx(mhu);
return get_irq_chan_comb_rx(mhu);
}
static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
{
struct mhuv2 *mhu = arg;
struct mbox_chan *chan = get_irq_chan_rx(mhu);
struct device *dev = mhu->mbox.dev;
struct mhuv2_mbox_chan_priv *priv;
int ret = IRQ_NONE;
void *data;
if (IS_ERR(chan)) {
dev_warn(dev, "Failed to find channel for the rx interrupt\n");
return IRQ_NONE;
}
priv = chan->con_priv;
/* Read and clear the data first */
data = priv->ops->read_data(mhu, chan);
if (!chan->cl) {
dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n",
priv->ch_wn_idx);
} else if (IS_ERR(data)) {
dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data));
} else {
mbox_chan_received_data(chan, data);
ret = IRQ_HANDLED;
}
if (!IS_ERR(data))
kfree(data);
return ret;
}
/* Sender and receiver ops */
static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan)
{
struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
return priv->ops->last_tx_done(mhu, chan);
}
static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data)
{
struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
if (!priv->ops->last_tx_done(mhu, chan))
return -EBUSY;
return priv->ops->send_data(mhu, chan, data);
}
static int mhuv2_sender_startup(struct mbox_chan *chan)
{
struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
if (priv->ops->tx_startup)
priv->ops->tx_startup(mhu, chan);
return 0;
}
static void mhuv2_sender_shutdown(struct mbox_chan *chan)
{
struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
if (priv->ops->tx_shutdown)
priv->ops->tx_shutdown(mhu, chan);
}
static const struct mbox_chan_ops mhuv2_sender_ops = {
.send_data = mhuv2_sender_send_data,
.startup = mhuv2_sender_startup,
.shutdown = mhuv2_sender_shutdown,
.last_tx_done = mhuv2_sender_last_tx_done,
};
static int mhuv2_receiver_startup(struct mbox_chan *chan)
{
struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
return priv->ops->rx_startup(mhu, chan);
}
static void mhuv2_receiver_shutdown(struct mbox_chan *chan)
{
struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
priv->ops->rx_shutdown(mhu, chan);
}
static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data)
{
dev_err(chan->mbox->dev,
"Trying to transmit on a receiver MHU frame\n");
return -EIO;
}
static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan)
{
dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n");
return true;
}
static const struct mbox_chan_ops mhuv2_receiver_ops = {
.send_data = mhuv2_receiver_send_data,
.startup = mhuv2_receiver_startup,
.shutdown = mhuv2_receiver_shutdown,
.last_tx_done = mhuv2_receiver_last_tx_done,
};
static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *pa)
{
struct mhuv2 *mhu = mhu_from_mbox(mbox);
struct mbox_chan *chans = mbox->chans;
int channel = 0, i, offset, doorbell, protocol, windows;
if (pa->args_count != 2)
return ERR_PTR(-EINVAL);
offset = pa->args[0];
doorbell = pa->args[1];
if (doorbell >= MHUV2_STAT_BITS)
goto out;
for (i = 0; i < mhu->length; i += 2) {
protocol = mhu->protocols[i];
windows = mhu->protocols[i + 1];
if (protocol == DOORBELL) {
if (offset < windows)
return &chans[channel + MHUV2_STAT_BITS * offset + doorbell];
channel += MHUV2_STAT_BITS * windows;
offset -= windows;
} else {
if (offset == 0) {
if (doorbell)
goto out;
return &chans[channel];
}
channel++;
offset--;
}
}
out:
dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
pa->args[0], doorbell);
return ERR_PTR(-ENODEV);
}
static int mhuv2_verify_protocol(struct mhuv2 *mhu)
{
struct device *dev = mhu->mbox.dev;
int protocol, windows, channels = 0, total_windows = 0, i;
for (i = 0; i < mhu->length; i += 2) {
protocol = mhu->protocols[i];
windows = mhu->protocols[i + 1];
if (!windows) {
dev_err(dev, "Window size can't be zero (%d)\n", i);
return -EINVAL;
}
total_windows += windows;
if (protocol == DOORBELL) {
channels += MHUV2_STAT_BITS * windows;
} else if (protocol == DATA_TRANSFER) {
channels++;
} else {
dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n",
protocol, MHUV2_PROTOCOL_PROP, i);
return -EINVAL;
}
}
if (total_windows > mhu->windows) {
dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n",
total_windows, mhu->windows);
return -EINVAL;
}
mhu->mbox.num_chans = channels;
return 0;
}
static int mhuv2_allocate_channels(struct mhuv2 *mhu)
{
struct mbox_controller *mbox = &mhu->mbox;
struct mhuv2_mbox_chan_priv *priv;
struct device *dev = mbox->dev;
struct mbox_chan *chans;
int protocol, windows = 0, next_window = 0, i, j, k;
chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mbox->chans = chans;
for (i = 0; i < mhu->length; i += 2) {
next_window += windows;
protocol = mhu->protocols[i];
windows = mhu->protocols[i + 1];
if (protocol == DATA_TRANSFER) {
priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ch_wn_idx = next_window;
priv->ops = &mhuv2_data_transfer_ops;
priv->windows = windows;
chans++->con_priv = priv;
continue;
}
for (j = 0; j < windows; j++) {
for (k = 0; k < MHUV2_STAT_BITS; k++) {
priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ch_wn_idx = next_window + j;
priv->ops = &mhuv2_doorbell_ops;
priv->doorbell = k;
chans++->con_priv = priv;
}
/*
* Permanently enable interrupt as we can't
* control it per doorbell.
*/
if (mhu->frame == SENDER_FRAME && mhu->minor)
writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en);
}
}
/* Make sure we have initialized all channels */
BUG_ON(chans - mbox->chans != mbox->num_chans);
return 0;
}
static int mhuv2_parse_channels(struct mhuv2 *mhu)
{
struct device *dev = mhu->mbox.dev;
const struct device_node *np = dev->of_node;
int ret, count;
u32 *protocols;
count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP);
if (count <= 0 || count % 2) {
dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP,
count);
return -EINVAL;
}
protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL);
if (!protocols)
return -ENOMEM;
ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count);
if (ret) {
dev_err(dev, "Failed to read %s property: %d\n",
MHUV2_PROTOCOL_PROP, ret);
return ret;
}
mhu->protocols = protocols;
mhu->length = count;
ret = mhuv2_verify_protocol(mhu);
if (ret)
return ret;
return mhuv2_allocate_channels(mhu);
}
static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
void __iomem *reg)
{
struct device *dev = mhu->mbox.dev;
int ret, i;
mhu->frame = SENDER_FRAME;
mhu->mbox.ops = &mhuv2_sender_ops;
mhu->send = reg;
mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch);
mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev);
spin_lock_init(&mhu->doorbell_pending_lock);
/*
* For minor version 1 and forward, tx interrupt is provided by
* the controller.
*/
if (mhu->minor && adev->irq[0]) {
ret = devm_request_threaded_irq(dev, adev->irq[0], NULL,
mhuv2_sender_interrupt,
IRQF_ONESHOT, "mhuv2-tx", mhu);
if (ret) {
dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n",
ret);
} else {
mhu->mbox.txdone_irq = true;
mhu->mbox.txdone_poll = false;
mhu->irq = adev->irq[0];
writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb);
/* Disable all channel interrupts */
for (i = 0; i < mhu->windows; i++)
writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
goto out;
}
}
mhu->mbox.txdone_irq = false;
mhu->mbox.txdone_poll = true;
mhu->mbox.txpoll_period = 1;
out:
/* Wait for receiver to be ready */
writel_relaxed(0x1, &mhu->send->access_request);
while (!readl_relaxed(&mhu->send->access_ready))
continue;
return 0;
}
static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
void __iomem *reg)
{
struct device *dev = mhu->mbox.dev;
int ret, i;
mhu->frame = RECEIVER_FRAME;
mhu->mbox.ops = &mhuv2_receiver_ops;
mhu->recv = reg;
mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch);
mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev);
mhu->irq = adev->irq[0];
if (!mhu->irq) {
dev_err(dev, "Missing receiver IRQ\n");
return -EINVAL;
}
ret = devm_request_threaded_irq(dev, mhu->irq, NULL,
mhuv2_receiver_interrupt, IRQF_ONESHOT,
"mhuv2-rx", mhu);
if (ret) {
dev_err(dev, "Failed to request rx IRQ\n");
return ret;
}
/* Mask all the channel windows */
for (i = 0; i < mhu->windows; i++)
writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
if (mhu->minor)
writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb);
return 0;
}
static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
const struct device_node *np = dev->of_node;
struct mhuv2 *mhu;
void __iomem *reg;
int ret = -EINVAL;
reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
if (IS_ERR(reg))
return PTR_ERR(reg);
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
return -ENOMEM;
mhu->mbox.dev = dev;
mhu->mbox.of_xlate = mhuv2_mbox_of_xlate;
if (of_device_is_compatible(np, "arm,mhuv2-tx"))
ret = mhuv2_tx_init(adev, mhu, reg);
else if (of_device_is_compatible(np, "arm,mhuv2-rx"))
ret = mhuv2_rx_init(adev, mhu, reg);
else
dev_err(dev, "Invalid compatible property\n");
if (ret)
return ret;
/* Channel windows can't be 0 */
BUG_ON(!mhu->windows);
ret = mhuv2_parse_channels(mhu);
if (ret)
return ret;
amba_set_drvdata(adev, mhu);
ret = devm_mbox_controller_register(dev, &mhu->mbox);
if (ret)
dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret);
return ret;
}
static void mhuv2_remove(struct amba_device *adev)
{
struct mhuv2 *mhu = amba_get_drvdata(adev);
if (mhu->frame == SENDER_FRAME)
writel_relaxed(0x0, &mhu->send->access_request);
}
static struct amba_id mhuv2_ids[] = {
{
/* 2.0 */
.id = 0xbb0d1,
.mask = 0xfffff,
},
{
/* 2.1 */
.id = 0xbb076,
.mask = 0xfffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, mhuv2_ids);
static struct amba_driver mhuv2_driver = {
.drv = {
.name = "arm-mhuv2",
},
.id_table = mhuv2_ids,
.probe = mhuv2_probe,
.remove = mhuv2_remove,
};
module_amba_driver(mhuv2_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ARM MHUv2 Driver");
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
MODULE_AUTHOR("Tushar Khandelwal <[email protected]>");
|
linux-master
|
drivers/mailbox/arm_mhuv2.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 MediaTek Corporation. All rights reserved.
* Author: Allen-KH Cheng <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct mtk_adsp_mbox_priv {
struct device *dev;
struct mbox_controller mbox;
void __iomem *va_mboxreg;
const struct mtk_adsp_mbox_cfg *cfg;
};
struct mtk_adsp_mbox_cfg {
u32 set_in;
u32 set_out;
u32 clr_in;
u32 clr_out;
};
static inline struct mtk_adsp_mbox_priv *get_mtk_adsp_mbox_priv(struct mbox_controller *mbox)
{
return container_of(mbox, struct mtk_adsp_mbox_priv, mbox);
}
static irqreturn_t mtk_adsp_mbox_irq(int irq, void *data)
{
struct mbox_chan *chan = data;
struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
u32 op = readl(priv->va_mboxreg + priv->cfg->set_out);
writel(op, priv->va_mboxreg + priv->cfg->clr_out);
return IRQ_WAKE_THREAD;
}
static irqreturn_t mtk_adsp_mbox_isr(int irq, void *data)
{
struct mbox_chan *chan = data;
mbox_chan_received_data(chan, NULL);
return IRQ_HANDLED;
}
static struct mbox_chan *mtk_adsp_mbox_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
return mbox->chans;
}
static int mtk_adsp_mbox_startup(struct mbox_chan *chan)
{
struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
/* Clear ADSP mbox command */
writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_in);
writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_out);
return 0;
}
static void mtk_adsp_mbox_shutdown(struct mbox_chan *chan)
{
struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
/* Clear ADSP mbox command */
writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_in);
writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_out);
}
static int mtk_adsp_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
u32 *msg = data;
writel(*msg, priv->va_mboxreg + priv->cfg->set_in);
return 0;
}
static bool mtk_adsp_mbox_last_tx_done(struct mbox_chan *chan)
{
struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
return readl(priv->va_mboxreg + priv->cfg->set_in) == 0;
}
static const struct mbox_chan_ops mtk_adsp_mbox_chan_ops = {
.send_data = mtk_adsp_mbox_send_data,
.startup = mtk_adsp_mbox_startup,
.shutdown = mtk_adsp_mbox_shutdown,
.last_tx_done = mtk_adsp_mbox_last_tx_done,
};
static int mtk_adsp_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_adsp_mbox_priv *priv;
const struct mtk_adsp_mbox_cfg *cfg;
struct mbox_controller *mbox;
int ret, irq;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mbox = &priv->mbox;
mbox->dev = dev;
mbox->ops = &mtk_adsp_mbox_chan_ops;
mbox->txdone_irq = false;
mbox->txdone_poll = true;
mbox->of_xlate = mtk_adsp_mbox_xlate;
mbox->num_chans = 1;
mbox->chans = devm_kzalloc(dev, sizeof(*mbox->chans), GFP_KERNEL);
if (!mbox->chans)
return -ENOMEM;
priv->va_mboxreg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->va_mboxreg))
return PTR_ERR(priv->va_mboxreg);
cfg = of_device_get_match_data(dev);
if (!cfg)
return -EINVAL;
priv->cfg = cfg;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(dev, irq, mtk_adsp_mbox_irq,
mtk_adsp_mbox_isr, IRQF_TRIGGER_NONE,
dev_name(dev), mbox->chans);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, priv);
return devm_mbox_controller_register(dev, &priv->mbox);
}
static const struct mtk_adsp_mbox_cfg mt8186_adsp_mbox_cfg = {
.set_in = 0x00,
.set_out = 0x04,
.clr_in = 0x08,
.clr_out = 0x0C,
};
static const struct mtk_adsp_mbox_cfg mt8195_adsp_mbox_cfg = {
.set_in = 0x00,
.set_out = 0x1c,
.clr_in = 0x04,
.clr_out = 0x20,
};
static const struct of_device_id mtk_adsp_mbox_of_match[] = {
{ .compatible = "mediatek,mt8186-adsp-mbox", .data = &mt8186_adsp_mbox_cfg },
{ .compatible = "mediatek,mt8195-adsp-mbox", .data = &mt8195_adsp_mbox_cfg },
{},
};
MODULE_DEVICE_TABLE(of, mtk_adsp_mbox_of_match);
static struct platform_driver mtk_adsp_mbox_driver = {
.probe = mtk_adsp_mbox_probe,
.driver = {
.name = "mtk_adsp_mbox",
.of_match_table = mtk_adsp_mbox_of_match,
},
};
module_platform_driver(mtk_adsp_mbox_driver);
MODULE_AUTHOR("Allen-KH Cheng <[email protected]>");
MODULE_DESCRIPTION("MTK ADSP Mailbox Controller");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/mtk-adsp-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* rWTM BIU Mailbox driver for Armada 37xx
*
* Author: Marek Behún <[email protected]>
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
#define DRIVER_NAME "armada-37xx-rwtm-mailbox"
/* relative to rWTM BIU Mailbox Registers */
#define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2))
#define RWTM_MBOX_COMMAND 0x40
#define RWTM_MBOX_RETURN_STATUS 0x80
#define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2))
#define RWTM_MBOX_FIFO_STATUS 0xc4
#define FIFO_STS_RDY 0x100
#define FIFO_STS_CNTR_MASK 0x7
#define FIFO_STS_CNTR_MAX 4
#define RWTM_HOST_INT_RESET 0xc8
#define RWTM_HOST_INT_MASK 0xcc
#define SP_CMD_COMPLETE BIT(0)
#define SP_CMD_QUEUE_FULL_ACCESS BIT(17)
#define SP_CMD_QUEUE_FULL BIT(18)
struct a37xx_mbox {
struct device *dev;
struct mbox_controller controller;
void __iomem *base;
int irq;
};
static void a37xx_mbox_receive(struct mbox_chan *chan)
{
struct a37xx_mbox *mbox = chan->con_priv;
struct armada_37xx_rwtm_rx_msg rx_msg;
int i;
rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS);
for (i = 0; i < 16; ++i)
rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i));
mbox_chan_received_data(chan, &rx_msg);
}
static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data)
{
struct mbox_chan *chan = data;
struct a37xx_mbox *mbox = chan->con_priv;
u32 reg;
reg = readl(mbox->base + RWTM_HOST_INT_RESET);
if (reg & SP_CMD_COMPLETE)
a37xx_mbox_receive(chan);
if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL))
dev_err(mbox->dev, "Secure processor command queue full\n");
writel(reg, mbox->base + RWTM_HOST_INT_RESET);
if (reg)
mbox_chan_txdone(chan, 0);
return reg ? IRQ_HANDLED : IRQ_NONE;
}
static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct a37xx_mbox *mbox = chan->con_priv;
struct armada_37xx_rwtm_tx_msg *msg = data;
int i;
u32 reg;
if (!data)
return -EINVAL;
reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS);
if (!(reg & FIFO_STS_RDY))
dev_warn(mbox->dev, "Secure processor not ready\n");
if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) {
dev_err(mbox->dev, "Secure processor command queue full\n");
return -EBUSY;
}
for (i = 0; i < 16; ++i)
writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i));
writel(msg->command, mbox->base + RWTM_MBOX_COMMAND);
return 0;
}
static int a37xx_mbox_startup(struct mbox_chan *chan)
{
struct a37xx_mbox *mbox = chan->con_priv;
u32 reg;
int ret;
ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0,
DRIVER_NAME, chan);
if (ret < 0) {
dev_err(mbox->dev, "Cannot request irq\n");
return ret;
}
/* enable IRQ generation */
reg = readl(mbox->base + RWTM_HOST_INT_MASK);
reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL);
writel(reg, mbox->base + RWTM_HOST_INT_MASK);
return 0;
}
static void a37xx_mbox_shutdown(struct mbox_chan *chan)
{
u32 reg;
struct a37xx_mbox *mbox = chan->con_priv;
/* disable interrupt generation */
reg = readl(mbox->base + RWTM_HOST_INT_MASK);
reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL;
writel(reg, mbox->base + RWTM_HOST_INT_MASK);
devm_free_irq(mbox->dev, mbox->irq, chan);
}
static const struct mbox_chan_ops a37xx_mbox_ops = {
.send_data = a37xx_mbox_send_data,
.startup = a37xx_mbox_startup,
.shutdown = a37xx_mbox_shutdown,
};
static int armada_37xx_mbox_probe(struct platform_device *pdev)
{
struct a37xx_mbox *mbox;
struct mbox_chan *chans;
int ret;
mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
/* Allocated one channel */
chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mbox->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq < 0)
return mbox->irq;
mbox->dev = &pdev->dev;
/* Hardware supports only one channel. */
chans[0].con_priv = mbox;
mbox->controller.dev = mbox->dev;
mbox->controller.num_chans = 1;
mbox->controller.chans = chans;
mbox->controller.ops = &a37xx_mbox_ops;
mbox->controller.txdone_irq = true;
ret = devm_mbox_controller_register(mbox->dev, &mbox->controller);
if (ret) {
dev_err(&pdev->dev, "Could not register mailbox controller\n");
return ret;
}
platform_set_drvdata(pdev, mbox);
return ret;
}
static const struct of_device_id armada_37xx_mbox_match[] = {
{ .compatible = "marvell,armada-3700-rwtm-mailbox" },
{ },
};
MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match);
static struct platform_driver armada_37xx_mbox_driver = {
.probe = armada_37xx_mbox_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = armada_37xx_mbox_match,
},
};
module_platform_driver(armada_37xx_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
MODULE_AUTHOR("Marek Behun <[email protected]>");
|
linux-master
|
drivers/mailbox/armada-37xx-rwtm-mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 ST Microelectronics
*
* Author: Lee Jones <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
#define MBOX_MAX_SIG_LEN 8
#define MBOX_MAX_MSG_LEN 128
#define MBOX_BYTES_PER_LINE 16
#define MBOX_HEXDUMP_LINE_LEN ((MBOX_BYTES_PER_LINE * 4) + 2)
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
static bool mbox_data_ready;
struct mbox_test_device {
struct device *dev;
void __iomem *tx_mmio;
void __iomem *rx_mmio;
struct mbox_chan *tx_channel;
struct mbox_chan *rx_channel;
char *rx_buffer;
char *signal;
char *message;
spinlock_t lock;
struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
};
static ssize_t mbox_test_signal_write(struct file *filp,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
return -EINVAL;
}
if (count > MBOX_MAX_SIG_LEN) {
dev_err(tdev->dev,
"Signal length %zd greater than max allowed %d\n",
count, MBOX_MAX_SIG_LEN);
return -EINVAL;
}
/* Only allocate memory if we need to */
if (!tdev->signal) {
tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
if (!tdev->signal)
return -ENOMEM;
}
if (copy_from_user(tdev->signal, userbuf, count)) {
kfree(tdev->signal);
tdev->signal = NULL;
return -EFAULT;
}
return count;
}
static const struct file_operations mbox_test_signal_ops = {
.write = mbox_test_signal_write,
.open = simple_open,
.llseek = generic_file_llseek,
};
static int mbox_test_message_fasync(int fd, struct file *filp, int on)
{
struct mbox_test_device *tdev = filp->private_data;
return fasync_helper(fd, filp, on, &tdev->async_queue);
}
static ssize_t mbox_test_message_write(struct file *filp,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
char *message;
void *data;
int ret;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
return -EINVAL;
}
if (count > MBOX_MAX_MSG_LEN) {
dev_err(tdev->dev,
"Message length %zd greater than max allowed %d\n",
count, MBOX_MAX_MSG_LEN);
return -EINVAL;
}
message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!message)
return -ENOMEM;
mutex_lock(&tdev->mutex);
tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
goto out;
}
/*
* A separate signal is only of use if there is
* MMIO to subsequently pass the message through
*/
if (tdev->tx_mmio && tdev->signal) {
print_hex_dump_bytes("Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
tdev->signal, MBOX_MAX_SIG_LEN);
data = tdev->signal;
} else
data = tdev->message;
print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
tdev->message, MBOX_MAX_MSG_LEN);
ret = mbox_send_message(tdev->tx_channel, data);
if (ret < 0)
dev_err(tdev->dev, "Failed to send message via mailbox\n");
out:
kfree(tdev->signal);
kfree(tdev->message);
tdev->signal = NULL;
mutex_unlock(&tdev->mutex);
return ret < 0 ? ret : count;
}
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
{
bool data_ready;
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
data_ready = mbox_data_ready;
spin_unlock_irqrestore(&tdev->lock, flags);
return data_ready;
}
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
unsigned long flags;
char *touser, *ptr;
int l = 0;
int ret;
DECLARE_WAITQUEUE(wait, current);
touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
if (!touser)
return -ENOMEM;
if (!tdev->rx_channel) {
ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
ret = simple_read_from_buffer(userbuf, count, ppos,
touser, ret);
goto kfree_err;
}
add_wait_queue(&tdev->waitq, &wait);
do {
__set_current_state(TASK_INTERRUPTIBLE);
if (mbox_test_message_data_ready(tdev))
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto waitq_err;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
goto waitq_err;
}
schedule();
} while (1);
spin_lock_irqsave(&tdev->lock, flags);
ptr = tdev->rx_buffer;
while (l < MBOX_HEXDUMP_MAX_LEN) {
hex_dump_to_buffer(ptr,
MBOX_BYTES_PER_LINE,
MBOX_BYTES_PER_LINE, 1, touser + l,
MBOX_HEXDUMP_LINE_LEN, true);
ptr += MBOX_BYTES_PER_LINE;
l += MBOX_HEXDUMP_LINE_LEN;
*(touser + (l - 1)) = '\n';
}
*(touser + l) = '\0';
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
mbox_data_ready = false;
spin_unlock_irqrestore(&tdev->lock, flags);
ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
waitq_err:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tdev->waitq, &wait);
kfree_err:
kfree(touser);
return ret;
}
static __poll_t
mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
{
struct mbox_test_device *tdev = filp->private_data;
poll_wait(filp, &tdev->waitq, wait);
if (mbox_test_message_data_ready(tdev))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static const struct file_operations mbox_test_message_ops = {
.write = mbox_test_message_write,
.read = mbox_test_message_read,
.fasync = mbox_test_message_fasync,
.poll = mbox_test_message_poll,
.open = simple_open,
.llseek = generic_file_llseek,
};
static int mbox_test_add_debugfs(struct platform_device *pdev,
struct mbox_test_device *tdev)
{
if (!debugfs_initialized())
return 0;
tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
if (!tdev->root_debugfs_dir) {
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
return -EINVAL;
}
debugfs_create_file("message", 0600, tdev->root_debugfs_dir,
tdev, &mbox_test_message_ops);
debugfs_create_file("signal", 0200, tdev->root_debugfs_dir,
tdev, &mbox_test_signal_ops);
return 0;
}
static void mbox_test_receive_message(struct mbox_client *client, void *message)
{
struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
if (tdev->rx_mmio) {
memcpy_fromio(tdev->rx_buffer, tdev->rx_mmio, MBOX_MAX_MSG_LEN);
print_hex_dump_bytes("Client: Received [MMIO]: ", DUMP_PREFIX_ADDRESS,
tdev->rx_buffer, MBOX_MAX_MSG_LEN);
} else if (message) {
print_hex_dump_bytes("Client: Received [API]: ", DUMP_PREFIX_ADDRESS,
message, MBOX_MAX_MSG_LEN);
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
mbox_data_ready = true;
spin_unlock_irqrestore(&tdev->lock, flags);
wake_up_interruptible(&tdev->waitq);
kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
}
static void mbox_test_prepare_message(struct mbox_client *client, void *message)
{
struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
if (tdev->tx_mmio) {
if (tdev->signal)
memcpy_toio(tdev->tx_mmio, tdev->message, MBOX_MAX_MSG_LEN);
else
memcpy_toio(tdev->tx_mmio, message, MBOX_MAX_MSG_LEN);
}
}
static void mbox_test_message_sent(struct mbox_client *client,
void *message, int r)
{
if (r)
dev_warn(client->dev,
"Client: Message could not be sent: %d\n", r);
else
dev_info(client->dev,
"Client: Message sent\n");
}
static struct mbox_chan *
mbox_test_request_channel(struct platform_device *pdev, const char *name)
{
struct mbox_client *client;
struct mbox_chan *channel;
client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
client->dev = &pdev->dev;
client->rx_callback = mbox_test_receive_message;
client->tx_prepare = mbox_test_prepare_message;
client->tx_done = mbox_test_message_sent;
client->tx_block = true;
client->knows_txdone = false;
client->tx_tout = 500;
channel = mbox_request_channel_byname(client, name);
if (IS_ERR(channel)) {
dev_warn(&pdev->dev, "Failed to request %s channel\n", name);
return NULL;
}
return channel;
}
static int mbox_test_probe(struct platform_device *pdev)
{
struct mbox_test_device *tdev;
struct resource *res;
resource_size_t size;
int ret;
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
if (!tdev)
return -ENOMEM;
/* It's okay for MMIO to be NULL */
tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
/* if reserved area in SRAM, try just ioremap */
size = resource_size(res);
tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
} else if (IS_ERR(tdev->tx_mmio)) {
tdev->tx_mmio = NULL;
}
/* If specified, second reg entry is Rx MMIO */
tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
size = resource_size(res);
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
} else if (IS_ERR(tdev->rx_mmio)) {
tdev->rx_mmio = tdev->tx_mmio;
}
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel))
return -EPROBE_DEFER;
/* If Rx is not specified but has Rx MMIO, then Rx = Tx */
if (!tdev->rx_channel && (tdev->rx_mmio != tdev->tx_mmio))
tdev->rx_channel = tdev->tx_channel;
tdev->dev = &pdev->dev;
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,
MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->rx_buffer)
return -ENOMEM;
}
ret = mbox_test_add_debugfs(pdev, tdev);
if (ret)
return ret;
init_waitqueue_head(&tdev->waitq);
dev_info(&pdev->dev, "Successfully registered\n");
return 0;
}
static int mbox_test_remove(struct platform_device *pdev)
{
struct mbox_test_device *tdev = platform_get_drvdata(pdev);
debugfs_remove_recursive(tdev->root_debugfs_dir);
if (tdev->tx_channel)
mbox_free_channel(tdev->tx_channel);
if (tdev->rx_channel)
mbox_free_channel(tdev->rx_channel);
return 0;
}
static const struct of_device_id mbox_test_match[] = {
{ .compatible = "mailbox-test" },
{},
};
MODULE_DEVICE_TABLE(of, mbox_test_match);
static struct platform_driver mbox_test_driver = {
.driver = {
.name = "mailbox_test",
.of_match_table = mbox_test_match,
},
.probe = mbox_test_probe,
.remove = mbox_test_remove,
};
module_platform_driver(mbox_test_driver);
MODULE_DESCRIPTION("Generic Mailbox Testing Facility");
MODULE_AUTHOR("Lee Jones <[email protected]");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/mailbox-test.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2017-2019 Samuel Holland <[email protected]>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#define NUM_CHANS 8
#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4))
#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4))
#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4))
#define REMOTE_IRQ_EN_REG 0x0040
#define REMOTE_IRQ_STAT_REG 0x0050
#define LOCAL_IRQ_EN_REG 0x0060
#define LOCAL_IRQ_STAT_REG 0x0070
#define RX_IRQ(n) BIT(0 + 2 * (n))
#define RX_IRQ_MASK 0x5555
#define TX_IRQ(n) BIT(1 + 2 * (n))
#define TX_IRQ_MASK 0xaaaa
#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n))
#define FIFO_STAT_MASK GENMASK(0, 0)
#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n))
#define MSG_STAT_MASK GENMASK(2, 0)
#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n))
#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__)
struct sun6i_msgbox {
struct mbox_controller controller;
struct clk *clk;
spinlock_t lock;
void __iomem *regs;
};
static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan);
static bool sun6i_msgbox_peek_data(struct mbox_chan *chan);
static inline int channel_number(struct mbox_chan *chan)
{
return chan - chan->mbox->chans;
}
static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan)
{
return chan->con_priv;
}
static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id)
{
struct sun6i_msgbox *mbox = dev_id;
uint32_t status;
int n;
/* Only examine channels that are currently enabled. */
status = readl(mbox->regs + LOCAL_IRQ_EN_REG) &
readl(mbox->regs + LOCAL_IRQ_STAT_REG);
if (!(status & RX_IRQ_MASK))
return IRQ_NONE;
for (n = 0; n < NUM_CHANS; ++n) {
struct mbox_chan *chan = &mbox->controller.chans[n];
if (!(status & RX_IRQ(n)))
continue;
while (sun6i_msgbox_peek_data(chan)) {
uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n));
mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg);
mbox_chan_received_data(chan, &msg);
}
/* The IRQ can be cleared only once the FIFO is empty. */
writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
}
return IRQ_HANDLED;
}
static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data)
{
struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
int n = channel_number(chan);
uint32_t msg = *(uint32_t *)data;
/* Using a channel backwards gets the hardware into a bad state. */
if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n))))
return 0;
writel(msg, mbox->regs + MSG_DATA_REG(n));
mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg);
return 0;
}
static int sun6i_msgbox_startup(struct mbox_chan *chan)
{
struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
int n = channel_number(chan);
/* The coprocessor is responsible for setting channel directions. */
if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
/* Flush the receive FIFO. */
while (sun6i_msgbox_peek_data(chan))
readl(mbox->regs + MSG_DATA_REG(n));
writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
/* Enable the receive IRQ. */
spin_lock(&mbox->lock);
writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n),
mbox->regs + LOCAL_IRQ_EN_REG);
spin_unlock(&mbox->lock);
}
mbox_dbg(mbox, "Channel %d startup complete\n", n);
return 0;
}
static void sun6i_msgbox_shutdown(struct mbox_chan *chan)
{
struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
int n = channel_number(chan);
if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
/* Disable the receive IRQ. */
spin_lock(&mbox->lock);
writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n),
mbox->regs + LOCAL_IRQ_EN_REG);
spin_unlock(&mbox->lock);
/* Attempt to flush the FIFO until the IRQ is cleared. */
do {
while (sun6i_msgbox_peek_data(chan))
readl(mbox->regs + MSG_DATA_REG(n));
writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
} while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n));
}
mbox_dbg(mbox, "Channel %d shutdown complete\n", n);
}
static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan)
{
struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
int n = channel_number(chan);
/*
* The hardware allows snooping on the remote user's IRQ statuses.
* We consider a message to be acknowledged only once the receive IRQ
* for that channel is cleared. Since the receive IRQ for a channel
* cannot be cleared until the FIFO for that channel is empty, this
* ensures that the message has actually been read. It also gives the
* recipient an opportunity to perform minimal processing before
* acknowledging the message.
*/
return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n));
}
static bool sun6i_msgbox_peek_data(struct mbox_chan *chan)
{
struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
int n = channel_number(chan);
return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK;
}
static const struct mbox_chan_ops sun6i_msgbox_chan_ops = {
.send_data = sun6i_msgbox_send_data,
.startup = sun6i_msgbox_startup,
.shutdown = sun6i_msgbox_shutdown,
.last_tx_done = sun6i_msgbox_last_tx_done,
.peek_data = sun6i_msgbox_peek_data,
};
static int sun6i_msgbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mbox_chan *chans;
struct reset_control *reset;
struct sun6i_msgbox *mbox;
int i, ret;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
if (!mbox)
return -ENOMEM;
chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
for (i = 0; i < NUM_CHANS; ++i)
chans[i].con_priv = mbox;
mbox->clk = devm_clk_get(dev, NULL);
if (IS_ERR(mbox->clk)) {
ret = PTR_ERR(mbox->clk);
dev_err(dev, "Failed to get clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mbox->clk);
if (ret) {
dev_err(dev, "Failed to enable clock: %d\n", ret);
return ret;
}
reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(reset)) {
ret = PTR_ERR(reset);
dev_err(dev, "Failed to get reset control: %d\n", ret);
goto err_disable_unprepare;
}
/*
* NOTE: We rely on platform firmware to preconfigure the channel
* directions, and we share this hardware block with other firmware
* that runs concurrently with Linux (e.g. a trusted monitor).
*
* Therefore, we do *not* assert the reset line if probing fails or
* when removing the device.
*/
ret = reset_control_deassert(reset);
if (ret) {
dev_err(dev, "Failed to deassert reset: %d\n", ret);
goto err_disable_unprepare;
}
mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
goto err_disable_unprepare;
}
/* Disable all IRQs for this end of the msgbox. */
writel(0, mbox->regs + LOCAL_IRQ_EN_REG);
ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
sun6i_msgbox_irq, 0, dev_name(dev), mbox);
if (ret) {
dev_err(dev, "Failed to register IRQ handler: %d\n", ret);
goto err_disable_unprepare;
}
mbox->controller.dev = dev;
mbox->controller.ops = &sun6i_msgbox_chan_ops;
mbox->controller.chans = chans;
mbox->controller.num_chans = NUM_CHANS;
mbox->controller.txdone_irq = false;
mbox->controller.txdone_poll = true;
mbox->controller.txpoll_period = 5;
spin_lock_init(&mbox->lock);
platform_set_drvdata(pdev, mbox);
ret = mbox_controller_register(&mbox->controller);
if (ret) {
dev_err(dev, "Failed to register controller: %d\n", ret);
goto err_disable_unprepare;
}
return 0;
err_disable_unprepare:
clk_disable_unprepare(mbox->clk);
return ret;
}
static int sun6i_msgbox_remove(struct platform_device *pdev)
{
struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
mbox_controller_unregister(&mbox->controller);
/* See the comment in sun6i_msgbox_probe about the reset line. */
clk_disable_unprepare(mbox->clk);
return 0;
}
static const struct of_device_id sun6i_msgbox_of_match[] = {
{ .compatible = "allwinner,sun6i-a31-msgbox", },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match);
static struct platform_driver sun6i_msgbox_driver = {
.driver = {
.name = "sun6i-msgbox",
.of_match_table = sun6i_msgbox_of_match,
},
.probe = sun6i_msgbox_probe,
.remove = sun6i_msgbox_remove,
};
module_platform_driver(sun6i_msgbox_driver);
MODULE_AUTHOR("Samuel Holland <[email protected]>");
MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mailbox/sun6i-msgbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mailbox: Common code for Mailbox controllers and users
*
* Copyright (C) 2013-2014 Linaro Ltd.
* Author: Jassi Brar <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
#include "mailbox.h"
static LIST_HEAD(mbox_cons);
static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
/* See if there is any space left */
if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -ENOBUFS;
}
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
chan->msg_count++;
if (idx == MBOX_TX_QUEUE_LEN - 1)
chan->msg_free = 0;
else
chan->msg_free++;
spin_unlock_irqrestore(&chan->lock, flags);
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
unsigned long flags;
void *data;
int err = -EBUSY;
spin_lock_irqsave(&chan->lock, flags);
if (!chan->msg_count || chan->active_req)
goto exit;
count = chan->msg_count;
idx = chan->msg_free;
if (idx >= count)
idx -= count;
else
idx += MBOX_TX_QUEUE_LEN - count;
data = chan->msg_data[idx];
if (chan->cl->tx_prepare)
chan->cl->tx_prepare(chan->cl, data);
/* Try to submit a message to the MBOX controller */
err = chan->mbox->ops->send_data(chan, data);
if (!err) {
chan->active_req = data;
chan->msg_count--;
}
exit:
spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
unsigned long flags;
void *mssg;
spin_lock_irqsave(&chan->lock, flags);
mssg = chan->active_req;
chan->active_req = NULL;
spin_unlock_irqrestore(&chan->lock, flags);
/* Submit next message */
msg_submit(chan);
if (!mssg)
return;
/* Notify the client */
if (chan->cl->tx_done)
chan->cl->tx_done(chan->cl, mssg, r);
if (r != -ETIME && chan->cl->tx_block)
complete(&chan->tx_complete);
}
static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
{
struct mbox_controller *mbox =
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
txdone = chan->mbox->ops->last_tx_done(chan);
if (txdone)
tx_tick(chan, 0);
else
resched = true;
}
}
if (resched) {
spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
if (!hrtimer_is_queued(hrtimer))
hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
}
/**
* mbox_chan_received_data - A way for controller driver to push data
* received from remote to the upper layer.
* @chan: Pointer to the mailbox channel on which RX happened.
* @mssg: Client specific message typecasted as void *
*
* After startup and before shutdown any data received on the chan
* is passed on to the API via atomic mbox_chan_received_data().
* The controller should ACK the RX only after this call returns.
*/
void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
{
/* No buffering the received data */
if (chan->cl->rx_callback)
chan->cl->rx_callback(chan->cl, mssg);
}
EXPORT_SYMBOL_GPL(mbox_chan_received_data);
/**
* mbox_chan_txdone - A way for controller driver to notify the
* framework that the last TX has completed.
* @chan: Pointer to the mailbox chan on which TX happened.
* @r: Status of last TX - OK or ERROR
*
* The controller that has IRQ for TX ACK calls this atomic API
* to tick the TX state machine. It works only if txdone_irq
* is set by the controller.
*/
void mbox_chan_txdone(struct mbox_chan *chan, int r)
{
if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
dev_err(chan->mbox->dev,
"Controller can't run the TX ticker\n");
return;
}
tx_tick(chan, r);
}
EXPORT_SYMBOL_GPL(mbox_chan_txdone);
/**
* mbox_client_txdone - The way for a client to run the TX state machine.
* @chan: Mailbox channel assigned to this client.
* @r: Success status of last transmission.
*
* The client/protocol had received some 'ACK' packet and it notifies
* the API that the last packet was sent successfully. This only works
* if the controller can't sense TX-Done.
*/
void mbox_client_txdone(struct mbox_chan *chan, int r)
{
if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
return;
}
tx_tick(chan, r);
}
EXPORT_SYMBOL_GPL(mbox_client_txdone);
/**
* mbox_client_peek_data - A way for client driver to pull data
* received from remote by the controller.
* @chan: Mailbox channel assigned to this client.
*
* A poke to controller driver for any received data.
* The data is actually passed onto client via the
* mbox_chan_received_data()
* The call can be made from atomic context, so the controller's
* implementation of peek_data() must not sleep.
*
* Return: True, if controller has, and is going to push after this,
* some data.
* False, if controller doesn't have any data to be read.
*/
bool mbox_client_peek_data(struct mbox_chan *chan)
{
if (chan->mbox->ops->peek_data)
return chan->mbox->ops->peek_data(chan);
return false;
}
EXPORT_SYMBOL_GPL(mbox_client_peek_data);
/**
* mbox_send_message - For client to submit a message to be
* sent to the remote.
* @chan: Mailbox channel assigned to this client.
* @mssg: Client specific message typecasted.
*
* For client to submit data to the controller destined for a remote
* processor. If the client had set 'tx_block', the call will return
* either when the remote receives the data or when 'tx_tout' millisecs
* run out.
* In non-blocking mode, the requests are buffered by the API and a
* non-negative token is returned for each queued request. If the request
* is not queued, a negative token is returned. Upon failure or successful
* TX, the API calls 'tx_done' from atomic context, from which the client
* could submit yet another request.
* The pointer to message should be preserved until it is sent
* over the chan, i.e, tx_done() is made.
* This function could be called from atomic context as it simply
* queues the data and returns a token against the request.
*
* Return: Non-negative integer for successful submission (non-blocking mode)
* or transmission over chan (blocking mode).
* Negative value denotes failure.
*/
int mbox_send_message(struct mbox_chan *chan, void *mssg)
{
int t;
if (!chan || !chan->cl)
return -EINVAL;
t = add_to_rbuf(chan, mssg);
if (t < 0) {
dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
return t;
}
msg_submit(chan);
if (chan->cl->tx_block) {
unsigned long wait;
int ret;
if (!chan->cl->tx_tout) /* wait forever */
wait = msecs_to_jiffies(3600000);
else
wait = msecs_to_jiffies(chan->cl->tx_tout);
ret = wait_for_completion_timeout(&chan->tx_complete, wait);
if (ret == 0) {
t = -ETIME;
tx_tick(chan, t);
}
}
return t;
}
EXPORT_SYMBOL_GPL(mbox_send_message);
/**
* mbox_flush - flush a mailbox channel
* @chan: mailbox channel to flush
* @timeout: time, in milliseconds, to allow the flush operation to succeed
*
* Mailbox controllers that need to work in atomic context can implement the
* ->flush() callback to busy loop until a transmission has been completed.
* The implementation must call mbox_chan_txdone() upon success. Clients can
* call the mbox_flush() function at any time after mbox_send_message() to
* flush the transmission. After the function returns success, the mailbox
* transmission is guaranteed to have completed.
*
* Returns: 0 on success or a negative error code on failure.
*/
int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
int ret;
if (!chan->mbox->ops->flush)
return -ENOTSUPP;
ret = chan->mbox->ops->flush(chan, timeout);
if (ret < 0)
tx_tick(chan, ret);
return ret;
}
EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
dev_dbg(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
if (ret) {
dev_err(dev, "Unable to startup the chan (%d)\n", ret);
mbox_free_channel(chan);
return ret;
}
}
return 0;
}
/**
* mbox_bind_client - Request a mailbox channel.
* @chan: The mailbox channel to bind the client to.
* @cl: Identity of the client requesting the channel.
*
* The Client specifies its requirements and capabilities while asking for
* a mailbox channel. It can't be called from atomic context.
* The channel is exclusively allocated and can't be used by another
* client before the owner calls mbox_free_channel.
* After assignment, any packet received on this channel will be
* handed over to the client via the 'rx_callback'.
* The framework holds reference to the client, so the mbox_client
* structure shouldn't be modified until the mbox_free_channel returns.
*
* Return: 0 if the channel was assigned to the client successfully.
* <0 for request failure.
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
int ret;
mutex_lock(&con_mutex);
ret = __mbox_bind_client(chan, cl);
mutex_unlock(&con_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
* @index: Index of mailbox specifier in 'mboxes' property.
*
* The Client specifies its requirements and capabilities while asking for
* a mailbox channel. It can't be called from atomic context.
* The channel is exclusively allocated and can't be used by another
* client before the owner calls mbox_free_channel.
* After assignment, any packet received on this channel will be
* handed over to the client via the 'rx_callback'.
* The framework holds reference to the client, so the mbox_client
* structure shouldn't be modified until the mbox_free_channel returns.
*
* Return: Pointer to the channel assigned to the client if successful.
* ERR_PTR for request failure.
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
struct device *dev = cl->dev;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
int ret;
if (!dev || !dev->of_node) {
pr_debug("%s: No owner device node\n", __func__);
return ERR_PTR(-ENODEV);
}
mutex_lock(&con_mutex);
if (of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", index, &spec)) {
dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-ENODEV);
}
chan = ERR_PTR(-EPROBE_DEFER);
list_for_each_entry(mbox, &mbox_cons, node)
if (mbox->dev->of_node == spec.np) {
chan = mbox->of_xlate(mbox, &spec);
if (!IS_ERR(chan))
break;
}
of_node_put(spec.np);
if (IS_ERR(chan)) {
mutex_unlock(&con_mutex);
return chan;
}
ret = __mbox_bind_client(chan, cl);
if (ret)
chan = ERR_PTR(ret);
mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
struct device_node *np = cl->dev->of_node;
struct property *prop;
const char *mbox_name;
int index = 0;
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
return ERR_PTR(-EINVAL);
}
if (!of_get_property(np, "mbox-names", NULL)) {
dev_err(cl->dev,
"%s() requires an \"mbox-names\" property\n", __func__);
return ERR_PTR(-EINVAL);
}
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
if (!strncmp(name, mbox_name, strlen(name)))
return mbox_request_channel(cl, index);
index++;
}
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
/**
* mbox_free_channel - The client relinquishes control of a mailbox
* channel by this call.
* @chan: The mailbox channel to be freed.
*/
void mbox_free_channel(struct mbox_chan *chan)
{
unsigned long flags;
if (!chan || !chan->cl)
return;
if (chan->mbox->ops->shutdown)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
static struct mbox_chan *
of_mbox_index_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
int ind = sp->args[0];
if (ind >= mbox->num_chans)
return ERR_PTR(-EINVAL);
return &mbox->chans[ind];
}
/**
* mbox_controller_register - Register the mailbox controller
* @mbox: Pointer to the mailbox controller.
*
* The controller driver registers its communication channels
*/
int mbox_controller_register(struct mbox_controller *mbox)
{
int i, txdone;
/* Sanity check */
if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
return -EINVAL;
if (mbox->txdone_irq)
txdone = TXDONE_BY_IRQ;
else if (mbox->txdone_poll)
txdone = TXDONE_BY_POLL;
else /* It has to be ACK then */
txdone = TXDONE_BY_ACK;
if (txdone == TXDONE_BY_POLL) {
if (!mbox->ops->last_tx_done) {
dev_err(mbox->dev, "last_tx_done method is absent\n");
return -EINVAL;
}
hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
mbox->poll_hrt.function = txdone_hrtimer;
spin_lock_init(&mbox->poll_hrt_lock);
}
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
chan->cl = NULL;
chan->mbox = mbox;
chan->txdone_method = txdone;
spin_lock_init(&chan->lock);
}
if (!mbox->of_xlate)
mbox->of_xlate = of_mbox_index_xlate;
mutex_lock(&con_mutex);
list_add_tail(&mbox->node, &mbox_cons);
mutex_unlock(&con_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mbox_controller_register);
/**
* mbox_controller_unregister - Unregister the mailbox controller
* @mbox: Pointer to the mailbox controller.
*/
void mbox_controller_unregister(struct mbox_controller *mbox)
{
int i;
if (!mbox)
return;
mutex_lock(&con_mutex);
list_del(&mbox->node);
for (i = 0; i < mbox->num_chans; i++)
mbox_free_channel(&mbox->chans[i]);
if (mbox->txdone_poll)
hrtimer_cancel(&mbox->poll_hrt);
mutex_unlock(&con_mutex);
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
static void __devm_mbox_controller_unregister(struct device *dev, void *res)
{
struct mbox_controller **mbox = res;
mbox_controller_unregister(*mbox);
}
static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
{
struct mbox_controller **mbox = res;
if (WARN_ON(!mbox || !*mbox))
return 0;
return *mbox == data;
}
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
* @mbox: mailbox controller being registered
*
* This function adds a device-managed resource that will make sure that the
* mailbox controller, which is registered using mbox_controller_register()
* as part of this function, will be unregistered along with the rest of
* device-managed resources upon driver probe failure or driver removal.
*
* Returns 0 on success or a negative error code on failure.
*/
int devm_mbox_controller_register(struct device *dev,
struct mbox_controller *mbox)
{
struct mbox_controller **ptr;
int err;
ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
err = mbox_controller_register(mbox);
if (err < 0) {
devres_free(ptr);
return err;
}
devres_add(dev, ptr);
*ptr = mbox;
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
/**
* devm_mbox_controller_unregister() - managed mbox_controller_unregister()
* @dev: device owning the mailbox controller being unregistered
* @mbox: mailbox controller being unregistered
*
* This function unregisters the mailbox controller and removes the device-
* managed resource that was set up to automatically unregister the mailbox
* controller on driver probe failure or driver removal. It's typically not
* necessary to call this function.
*/
void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
{
WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
devm_mbox_controller_match, mbox));
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
|
linux-master
|
drivers/mailbox/mailbox.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 AV backend support.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <asm/firmware.h>
#include <asm/ps3av.h>
#include <asm/ps3.h>
#include <video/cmdline.h>
#include "vuart.h"
#define BUFSIZE 4096 /* vuart buf size */
#define PS3AV_BUF_SIZE 512 /* max packet size */
static int safe_mode;
static int timeout = 5000; /* in msec ( 5 sec ) */
module_param(timeout, int, 0644);
static struct ps3av {
struct mutex mutex;
struct work_struct work;
struct completion done;
int open_count;
struct ps3_system_bus_device *dev;
int region;
struct ps3av_pkt_av_get_hw_conf av_hw_conf;
u32 av_port[PS3AV_AV_PORT_MAX + PS3AV_OPT_PORT_MAX];
u32 opt_port[PS3AV_OPT_PORT_MAX];
u32 head[PS3AV_HEAD_MAX];
u32 audio_port;
int ps3av_mode;
int ps3av_mode_old;
union {
struct ps3av_reply_hdr reply_hdr;
u8 raw[PS3AV_BUF_SIZE];
} recv_buf;
} *ps3av;
/* color space */
#define YUV444 PS3AV_CMD_VIDEO_CS_YUV444_8
#define RGB8 PS3AV_CMD_VIDEO_CS_RGB_8
/* format */
#define XRGB PS3AV_CMD_VIDEO_FMT_X8R8G8B8
/* aspect */
#define A_N PS3AV_CMD_AV_ASPECT_4_3
#define A_W PS3AV_CMD_AV_ASPECT_16_9
static const struct avset_video_mode {
u32 cs;
u32 fmt;
u32 vid;
u32 aspect;
u32 x;
u32 y;
} video_mode_table[] = {
{ 0, }, /* auto */
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_W, 1280, 720},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_W, 1280, 720},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080},
{ RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768},
{ RGB8, XRGB, PS3AV_CMD_VIDEO_VID_SXGA, A_N, 1280, 1024},
{ RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WUXGA, A_W, 1920, 1200},
};
/* supported CIDs */
static u32 cmd_table[] = {
/* init */
PS3AV_CID_AV_INIT,
PS3AV_CID_AV_FIN,
PS3AV_CID_VIDEO_INIT,
PS3AV_CID_AUDIO_INIT,
/* set */
PS3AV_CID_AV_ENABLE_EVENT,
PS3AV_CID_AV_DISABLE_EVENT,
PS3AV_CID_AV_VIDEO_CS,
PS3AV_CID_AV_VIDEO_MUTE,
PS3AV_CID_AV_VIDEO_DISABLE_SIG,
PS3AV_CID_AV_AUDIO_PARAM,
PS3AV_CID_AV_AUDIO_MUTE,
PS3AV_CID_AV_HDMI_MODE,
PS3AV_CID_AV_TV_MUTE,
PS3AV_CID_VIDEO_MODE,
PS3AV_CID_VIDEO_FORMAT,
PS3AV_CID_VIDEO_PITCH,
PS3AV_CID_AUDIO_MODE,
PS3AV_CID_AUDIO_MUTE,
PS3AV_CID_AUDIO_ACTIVE,
PS3AV_CID_AUDIO_INACTIVE,
PS3AV_CID_AVB_PARAM,
/* get */
PS3AV_CID_AV_GET_HW_CONF,
PS3AV_CID_AV_GET_MONITOR_INFO,
/* event */
PS3AV_CID_EVENT_UNPLUGGED,
PS3AV_CID_EVENT_PLUGGED,
PS3AV_CID_EVENT_HDCP_DONE,
PS3AV_CID_EVENT_HDCP_FAIL,
PS3AV_CID_EVENT_HDCP_AUTH,
PS3AV_CID_EVENT_HDCP_ERROR,
0
};
#define PS3AV_EVENT_CMD_MASK 0x10000000
#define PS3AV_EVENT_ID_MASK 0x0000ffff
#define PS3AV_CID_MASK 0xffffffff
#define PS3AV_REPLY_BIT 0x80000000
#define ps3av_event_get_port_id(cid) ((cid >> 16) & 0xff)
static u32 *ps3av_search_cmd_table(u32 cid, u32 mask)
{
u32 *table;
int i;
table = cmd_table;
for (i = 0;; table++, i++) {
if ((*table & mask) == (cid & mask))
break;
if (*table == 0)
return NULL;
}
return table;
}
static int ps3av_parse_event_packet(const struct ps3av_reply_hdr *hdr)
{
u32 *table;
if (hdr->cid & PS3AV_EVENT_CMD_MASK) {
table = ps3av_search_cmd_table(hdr->cid, PS3AV_EVENT_CMD_MASK);
if (table)
dev_dbg(&ps3av->dev->core,
"recv event packet cid:%08x port:0x%x size:%d\n",
hdr->cid, ps3av_event_get_port_id(hdr->cid),
hdr->size);
else
printk(KERN_ERR
"%s: failed event packet, cid:%08x size:%d\n",
__func__, hdr->cid, hdr->size);
return 1; /* receive event packet */
}
return 0;
}
#define POLLING_INTERVAL 25 /* in msec */
static int ps3av_vuart_write(struct ps3_system_bus_device *dev,
const void *buf, unsigned long size)
{
int error;
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
error = ps3_vuart_write(dev, buf, size);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
return error ? error : size;
}
static int ps3av_vuart_read(struct ps3_system_bus_device *dev, void *buf,
unsigned long size, int timeout)
{
int error;
int loopcnt = 0;
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
timeout = (timeout + POLLING_INTERVAL - 1) / POLLING_INTERVAL;
while (loopcnt++ <= timeout) {
error = ps3_vuart_read(dev, buf, size);
if (!error)
return size;
if (error != -EAGAIN) {
printk(KERN_ERR "%s: ps3_vuart_read failed %d\n",
__func__, error);
return error;
}
msleep(POLLING_INTERVAL);
}
return -EWOULDBLOCK;
}
static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
struct ps3av_reply_hdr *recv_buf, int write_len,
int read_len)
{
int res;
u32 cmd;
int event;
if (!ps3av)
return -ENODEV;
/* send pkt */
res = ps3av_vuart_write(ps3av->dev, send_buf, write_len);
if (res < 0) {
dev_warn(&ps3av->dev->core,
"%s:%d: ps3av_vuart_write() failed: %s\n", __func__,
__LINE__, ps3_result(res));
return res;
}
/* recv pkt */
cmd = send_buf->cid;
do {
/* read header */
res = ps3av_vuart_read(ps3av->dev, recv_buf, PS3AV_HDR_SIZE,
timeout);
if (res != PS3AV_HDR_SIZE) {
dev_warn(&ps3av->dev->core,
"%s:%d: ps3av_vuart_read() failed: %s\n", __func__,
__LINE__, ps3_result(res));
return res;
}
/* read body */
res = ps3av_vuart_read(ps3av->dev, &recv_buf->cid,
recv_buf->size, timeout);
if (res < 0) {
dev_warn(&ps3av->dev->core,
"%s:%d: ps3av_vuart_read() failed: %s\n", __func__,
__LINE__, ps3_result(res));
return res;
}
res += PS3AV_HDR_SIZE; /* total len */
event = ps3av_parse_event_packet(recv_buf);
/* ret > 0 event packet */
} while (event);
if ((cmd | PS3AV_REPLY_BIT) != recv_buf->cid) {
dev_warn(&ps3av->dev->core, "%s:%d: reply err: %x\n", __func__,
__LINE__, recv_buf->cid);
return -EINVAL;
}
return 0;
}
static int ps3av_process_reply_packet(struct ps3av_send_hdr *cmd_buf,
const struct ps3av_reply_hdr *recv_buf,
int user_buf_size)
{
int return_len;
if (recv_buf->version != PS3AV_VERSION) {
dev_dbg(&ps3av->dev->core, "reply_packet invalid version:%x\n",
recv_buf->version);
return -EFAULT;
}
return_len = recv_buf->size + PS3AV_HDR_SIZE;
if (return_len > user_buf_size)
return_len = user_buf_size;
memcpy(cmd_buf, recv_buf, return_len);
return 0; /* success */
}
void ps3av_set_hdr(u32 cid, u16 size, struct ps3av_send_hdr *hdr)
{
hdr->version = PS3AV_VERSION;
hdr->size = size - PS3AV_HDR_SIZE;
hdr->cid = cid;
}
int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size,
struct ps3av_send_hdr *buf)
{
int res = 0;
u32 *table;
BUG_ON(!ps3av);
mutex_lock(&ps3av->mutex);
table = ps3av_search_cmd_table(cid, PS3AV_CID_MASK);
BUG_ON(!table);
BUG_ON(send_len < PS3AV_HDR_SIZE);
BUG_ON(usr_buf_size < send_len);
BUG_ON(usr_buf_size > PS3AV_BUF_SIZE);
/* create header */
ps3av_set_hdr(cid, send_len, buf);
/* send packet via vuart */
res = ps3av_send_cmd_pkt(buf, &ps3av->recv_buf.reply_hdr, send_len,
usr_buf_size);
if (res < 0) {
printk(KERN_ERR
"%s: ps3av_send_cmd_pkt() failed (result=%d)\n",
__func__, res);
goto err;
}
/* process reply packet */
res = ps3av_process_reply_packet(buf, &ps3av->recv_buf.reply_hdr,
usr_buf_size);
if (res < 0) {
printk(KERN_ERR "%s: put_return_status() failed (result=%d)\n",
__func__, res);
goto err;
}
mutex_unlock(&ps3av->mutex);
return 0;
err:
mutex_unlock(&ps3av->mutex);
printk(KERN_ERR "%s: failed cid:%x res:%d\n", __func__, cid, res);
return res;
}
static int ps3av_set_av_video_mute(u32 mute)
{
int i, num_of_av_port, res;
num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
ps3av->av_hw_conf.num_of_avmulti;
/* video mute on */
for (i = 0; i < num_of_av_port; i++) {
res = ps3av_cmd_av_video_mute(1, &ps3av->av_port[i], mute);
if (res < 0)
return -1;
}
return 0;
}
static int ps3av_set_video_disable_sig(void)
{
int i, num_of_hdmi_port, num_of_av_port, res;
num_of_hdmi_port = ps3av->av_hw_conf.num_of_hdmi;
num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
ps3av->av_hw_conf.num_of_avmulti;
/* tv mute */
for (i = 0; i < num_of_hdmi_port; i++) {
res = ps3av_cmd_av_tv_mute(ps3av->av_port[i],
PS3AV_CMD_MUTE_ON);
if (res < 0)
return -1;
}
msleep(100);
/* video mute on */
for (i = 0; i < num_of_av_port; i++) {
res = ps3av_cmd_av_video_disable_sig(ps3av->av_port[i]);
if (res < 0)
return -1;
if (i < num_of_hdmi_port) {
res = ps3av_cmd_av_tv_mute(ps3av->av_port[i],
PS3AV_CMD_MUTE_OFF);
if (res < 0)
return -1;
}
}
msleep(300);
return 0;
}
static int ps3av_set_audio_mute(u32 mute)
{
int i, num_of_av_port, num_of_opt_port, res;
num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
ps3av->av_hw_conf.num_of_avmulti;
num_of_opt_port = ps3av->av_hw_conf.num_of_spdif;
for (i = 0; i < num_of_av_port; i++) {
res = ps3av_cmd_av_audio_mute(1, &ps3av->av_port[i], mute);
if (res < 0)
return -1;
}
for (i = 0; i < num_of_opt_port; i++) {
res = ps3av_cmd_audio_mute(1, &ps3av->opt_port[i], mute);
if (res < 0)
return -1;
}
return 0;
}
int ps3av_set_audio_mode(u32 ch, u32 fs, u32 word_bits, u32 format, u32 source)
{
struct ps3av_pkt_avb_param avb_param;
int i, num_of_audio, vid, res;
struct ps3av_pkt_audio_mode audio_mode;
u32 len = 0;
num_of_audio = ps3av->av_hw_conf.num_of_hdmi +
ps3av->av_hw_conf.num_of_avmulti +
ps3av->av_hw_conf.num_of_spdif;
avb_param.num_of_video_pkt = 0;
avb_param.num_of_audio_pkt = PS3AV_AVB_NUM_AUDIO; /* always 0 */
avb_param.num_of_av_video_pkt = 0;
avb_param.num_of_av_audio_pkt = ps3av->av_hw_conf.num_of_hdmi;
vid = video_mode_table[ps3av->ps3av_mode].vid;
/* audio mute */
ps3av_set_audio_mute(PS3AV_CMD_MUTE_ON);
/* audio inactive */
res = ps3av_cmd_audio_active(0, ps3av->audio_port);
if (res < 0)
dev_dbg(&ps3av->dev->core,
"ps3av_cmd_audio_active OFF failed\n");
/* audio_pkt */
for (i = 0; i < num_of_audio; i++) {
ps3av_cmd_set_audio_mode(&audio_mode, ps3av->av_port[i], ch,
fs, word_bits, format, source);
if (i < ps3av->av_hw_conf.num_of_hdmi) {
/* hdmi only */
len += ps3av_cmd_set_av_audio_param(&avb_param.buf[len],
ps3av->av_port[i],
&audio_mode, vid);
}
/* audio_mode pkt should be sent separately */
res = ps3av_cmd_audio_mode(&audio_mode);
if (res < 0)
dev_dbg(&ps3av->dev->core,
"ps3av_cmd_audio_mode failed, port:%x\n", i);
}
/* send command using avb pkt */
len += offsetof(struct ps3av_pkt_avb_param, buf);
res = ps3av_cmd_avb_param(&avb_param, len);
if (res < 0)
dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
/* audio mute */
ps3av_set_audio_mute(PS3AV_CMD_MUTE_OFF);
/* audio active */
res = ps3av_cmd_audio_active(1, ps3av->audio_port);
if (res < 0)
dev_dbg(&ps3av->dev->core,
"ps3av_cmd_audio_active ON failed\n");
return 0;
}
EXPORT_SYMBOL_GPL(ps3av_set_audio_mode);
static int ps3av_set_videomode(void)
{
/* av video mute */
ps3av_set_av_video_mute(PS3AV_CMD_MUTE_ON);
/* wake up ps3avd to do the actual video mode setting */
schedule_work(&ps3av->work);
return 0;
}
static void ps3av_set_videomode_packet(u32 id)
{
struct ps3av_pkt_avb_param avb_param;
unsigned int i;
u32 len = 0, av_video_cs;
const struct avset_video_mode *video_mode;
int res;
video_mode = &video_mode_table[id & PS3AV_MODE_MASK];
avb_param.num_of_video_pkt = PS3AV_AVB_NUM_VIDEO; /* num of head */
avb_param.num_of_audio_pkt = 0;
avb_param.num_of_av_video_pkt = ps3av->av_hw_conf.num_of_hdmi +
ps3av->av_hw_conf.num_of_avmulti;
avb_param.num_of_av_audio_pkt = 0;
/* video_pkt */
for (i = 0; i < avb_param.num_of_video_pkt; i++)
len += ps3av_cmd_set_video_mode(&avb_param.buf[len],
ps3av->head[i], video_mode->vid,
video_mode->fmt, id);
/* av_video_pkt */
for (i = 0; i < avb_param.num_of_av_video_pkt; i++) {
if (id & PS3AV_MODE_DVI || id & PS3AV_MODE_RGB)
av_video_cs = RGB8;
else
av_video_cs = video_mode->cs;
#ifndef PS3AV_HDMI_YUV
if (ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 ||
ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_1)
av_video_cs = RGB8; /* use RGB for HDMI */
#endif
len += ps3av_cmd_set_av_video_cs(&avb_param.buf[len],
ps3av->av_port[i],
video_mode->vid, av_video_cs,
video_mode->aspect, id);
}
/* send command using avb pkt */
len += offsetof(struct ps3av_pkt_avb_param, buf);
res = ps3av_cmd_avb_param(&avb_param, len);
if (res == PS3AV_STATUS_NO_SYNC_HEAD)
printk(KERN_WARNING
"%s: Command failed. Please try your request again.\n",
__func__);
else if (res)
dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
}
static void ps3av_set_videomode_cont(u32 id, u32 old_id)
{
static int vesa;
int res;
/* video signal off */
ps3av_set_video_disable_sig();
/*
* AV backend needs non-VESA mode setting at least one time
* when VESA mode is used.
*/
if (vesa == 0 && (id & PS3AV_MODE_MASK) >= PS3AV_MODE_WXGA) {
/* vesa mode */
ps3av_set_videomode_packet(PS3AV_MODE_480P);
}
vesa = 1;
/* Retail PS3 product doesn't support this */
if (id & PS3AV_MODE_HDCP_OFF) {
res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_HDCP_OFF);
if (res == PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
dev_dbg(&ps3av->dev->core, "Not supported\n");
else if (res)
dev_dbg(&ps3av->dev->core,
"ps3av_cmd_av_hdmi_mode failed\n");
} else if (old_id & PS3AV_MODE_HDCP_OFF) {
res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_MODE_NORMAL);
if (res < 0 && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
dev_dbg(&ps3av->dev->core,
"ps3av_cmd_av_hdmi_mode failed\n");
}
ps3av_set_videomode_packet(id);
msleep(1500);
/* av video mute */
ps3av_set_av_video_mute(PS3AV_CMD_MUTE_OFF);
}
static void ps3avd(struct work_struct *work)
{
ps3av_set_videomode_cont(ps3av->ps3av_mode, ps3av->ps3av_mode_old);
complete(&ps3av->done);
}
#define SHIFT_50 0
#define SHIFT_60 4
#define SHIFT_VESA 8
static const struct {
unsigned mask:19;
unsigned id:4;
} ps3av_preferred_modes[] = {
{ PS3AV_RESBIT_WUXGA << SHIFT_VESA, PS3AV_MODE_WUXGA },
{ PS3AV_RESBIT_1920x1080P << SHIFT_60, PS3AV_MODE_1080P60 },
{ PS3AV_RESBIT_1920x1080P << SHIFT_50, PS3AV_MODE_1080P50 },
{ PS3AV_RESBIT_1920x1080I << SHIFT_60, PS3AV_MODE_1080I60 },
{ PS3AV_RESBIT_1920x1080I << SHIFT_50, PS3AV_MODE_1080I50 },
{ PS3AV_RESBIT_SXGA << SHIFT_VESA, PS3AV_MODE_SXGA },
{ PS3AV_RESBIT_WXGA << SHIFT_VESA, PS3AV_MODE_WXGA },
{ PS3AV_RESBIT_1280x720P << SHIFT_60, PS3AV_MODE_720P60 },
{ PS3AV_RESBIT_1280x720P << SHIFT_50, PS3AV_MODE_720P50 },
{ PS3AV_RESBIT_720x480P << SHIFT_60, PS3AV_MODE_480P },
{ PS3AV_RESBIT_720x576P << SHIFT_50, PS3AV_MODE_576P },
};
static enum ps3av_mode_num ps3av_resbit2id(u32 res_50, u32 res_60,
u32 res_vesa)
{
unsigned int i;
u32 res_all;
/*
* We mask off the resolution bits we care about and combine the
* results in one bitfield, so make sure there's no overlap
*/
BUILD_BUG_ON(PS3AV_RES_MASK_50 << SHIFT_50 &
PS3AV_RES_MASK_60 << SHIFT_60);
BUILD_BUG_ON(PS3AV_RES_MASK_50 << SHIFT_50 &
PS3AV_RES_MASK_VESA << SHIFT_VESA);
BUILD_BUG_ON(PS3AV_RES_MASK_60 << SHIFT_60 &
PS3AV_RES_MASK_VESA << SHIFT_VESA);
res_all = (res_50 & PS3AV_RES_MASK_50) << SHIFT_50 |
(res_60 & PS3AV_RES_MASK_60) << SHIFT_60 |
(res_vesa & PS3AV_RES_MASK_VESA) << SHIFT_VESA;
if (!res_all)
return 0;
for (i = 0; i < ARRAY_SIZE(ps3av_preferred_modes); i++)
if (res_all & ps3av_preferred_modes[i].mask)
return ps3av_preferred_modes[i].id;
return 0;
}
static enum ps3av_mode_num ps3av_hdmi_get_id(struct ps3av_info_monitor *info)
{
enum ps3av_mode_num id;
if (safe_mode)
return PS3AV_DEFAULT_HDMI_MODE_ID_REG_60;
/* check native resolution */
id = ps3av_resbit2id(info->res_50.native, info->res_60.native,
info->res_vesa.native);
if (id) {
pr_debug("%s: Using native mode %d\n", __func__, id);
return id;
}
/* check supported resolutions */
id = ps3av_resbit2id(info->res_50.res_bits, info->res_60.res_bits,
info->res_vesa.res_bits);
if (id) {
pr_debug("%s: Using supported mode %d\n", __func__, id);
return id;
}
if (ps3av->region & PS3AV_REGION_60)
id = PS3AV_DEFAULT_HDMI_MODE_ID_REG_60;
else
id = PS3AV_DEFAULT_HDMI_MODE_ID_REG_50;
pr_debug("%s: Using default mode %d\n", __func__, id);
return id;
}
static void ps3av_monitor_info_dump(
const struct ps3av_pkt_av_get_monitor_info *monitor_info)
{
const struct ps3av_info_monitor *info = &monitor_info->info;
const struct ps3av_info_audio *audio = info->audio;
char id[sizeof(info->monitor_id)*3+1];
int i;
pr_debug("Monitor Info: size %u\n", monitor_info->send_hdr.size);
pr_debug("avport: %02x\n", info->avport);
for (i = 0; i < sizeof(info->monitor_id); i++)
sprintf(&id[i*3], " %02x", info->monitor_id[i]);
pr_debug("monitor_id: %s\n", id);
pr_debug("monitor_type: %02x\n", info->monitor_type);
pr_debug("monitor_name: %.*s\n", (int)sizeof(info->monitor_name),
info->monitor_name);
/* resolution */
pr_debug("resolution_60: bits: %08x native: %08x\n",
info->res_60.res_bits, info->res_60.native);
pr_debug("resolution_50: bits: %08x native: %08x\n",
info->res_50.res_bits, info->res_50.native);
pr_debug("resolution_other: bits: %08x native: %08x\n",
info->res_other.res_bits, info->res_other.native);
pr_debug("resolution_vesa: bits: %08x native: %08x\n",
info->res_vesa.res_bits, info->res_vesa.native);
/* color space */
pr_debug("color space rgb: %02x\n", info->cs.rgb);
pr_debug("color space yuv444: %02x\n", info->cs.yuv444);
pr_debug("color space yuv422: %02x\n", info->cs.yuv422);
/* color info */
pr_debug("color info red: X %04x Y %04x\n", info->color.red_x,
info->color.red_y);
pr_debug("color info green: X %04x Y %04x\n", info->color.green_x,
info->color.green_y);
pr_debug("color info blue: X %04x Y %04x\n", info->color.blue_x,
info->color.blue_y);
pr_debug("color info white: X %04x Y %04x\n", info->color.white_x,
info->color.white_y);
pr_debug("color info gamma: %08x\n", info->color.gamma);
/* other info */
pr_debug("supported_AI: %02x\n", info->supported_ai);
pr_debug("speaker_info: %02x\n", info->speaker_info);
pr_debug("num of audio: %02x\n", info->num_of_audio_block);
/* audio block */
for (i = 0; i < info->num_of_audio_block; i++) {
pr_debug(
"audio[%d] type: %02x max_ch: %02x fs: %02x sbit: %02x\n",
i, audio->type, audio->max_num_of_ch, audio->fs,
audio->sbit);
audio++;
}
}
static const struct ps3av_monitor_quirk {
const char *monitor_name;
u32 clear_60;
} ps3av_monitor_quirks[] = {
{
.monitor_name = "DELL 2007WFP",
.clear_60 = PS3AV_RESBIT_1920x1080I
}, {
.monitor_name = "L226WTQ",
.clear_60 = PS3AV_RESBIT_1920x1080I |
PS3AV_RESBIT_1920x1080P
}, {
.monitor_name = "SyncMaster",
.clear_60 = PS3AV_RESBIT_1920x1080I
}
};
static void ps3av_fixup_monitor_info(struct ps3av_info_monitor *info)
{
unsigned int i;
const struct ps3av_monitor_quirk *quirk;
for (i = 0; i < ARRAY_SIZE(ps3av_monitor_quirks); i++) {
quirk = &ps3av_monitor_quirks[i];
if (!strncmp(info->monitor_name, quirk->monitor_name,
sizeof(info->monitor_name))) {
pr_info("%s: Applying quirk for %s\n", __func__,
quirk->monitor_name);
info->res_60.res_bits &= ~quirk->clear_60;
info->res_60.native &= ~quirk->clear_60;
break;
}
}
}
static int ps3av_auto_videomode(struct ps3av_pkt_av_get_hw_conf *av_hw_conf)
{
int i, res, id = 0, dvi = 0, rgb = 0;
struct ps3av_pkt_av_get_monitor_info monitor_info;
struct ps3av_info_monitor *info;
/* get mode id for hdmi */
for (i = 0; i < av_hw_conf->num_of_hdmi && !id; i++) {
res = ps3av_cmd_video_get_monitor_info(&monitor_info,
PS3AV_CMD_AVPORT_HDMI_0 +
i);
if (res < 0)
return -1;
ps3av_monitor_info_dump(&monitor_info);
info = &monitor_info.info;
ps3av_fixup_monitor_info(info);
switch (info->monitor_type) {
case PS3AV_MONITOR_TYPE_DVI:
dvi = PS3AV_MODE_DVI;
fallthrough;
case PS3AV_MONITOR_TYPE_HDMI:
id = ps3av_hdmi_get_id(info);
break;
}
}
if (!id) {
/* no HDMI interface or HDMI is off */
if (ps3av->region & PS3AV_REGION_60)
id = PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60;
else
id = PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50;
if (ps3av->region & PS3AV_REGION_RGB)
rgb = PS3AV_MODE_RGB;
pr_debug("%s: Using avmulti mode %d\n", __func__, id);
}
return id | dvi | rgb;
}
static int ps3av_get_hw_conf(struct ps3av *ps3av)
{
int i, j, k, res;
const struct ps3av_pkt_av_get_hw_conf *hw_conf;
/* get av_hw_conf */
res = ps3av_cmd_av_get_hw_conf(&ps3av->av_hw_conf);
if (res < 0)
return -1;
hw_conf = &ps3av->av_hw_conf;
pr_debug("av_h_conf: num of hdmi: %u\n", hw_conf->num_of_hdmi);
pr_debug("av_h_conf: num of avmulti: %u\n", hw_conf->num_of_avmulti);
pr_debug("av_h_conf: num of spdif: %u\n", hw_conf->num_of_spdif);
for (i = 0; i < PS3AV_HEAD_MAX; i++)
ps3av->head[i] = PS3AV_CMD_VIDEO_HEAD_A + i;
for (i = 0; i < PS3AV_OPT_PORT_MAX; i++)
ps3av->opt_port[i] = PS3AV_CMD_AVPORT_SPDIF_0 + i;
for (i = 0; i < hw_conf->num_of_hdmi; i++)
ps3av->av_port[i] = PS3AV_CMD_AVPORT_HDMI_0 + i;
for (j = 0; j < hw_conf->num_of_avmulti; j++)
ps3av->av_port[i + j] = PS3AV_CMD_AVPORT_AVMULTI_0 + j;
for (k = 0; k < hw_conf->num_of_spdif; k++)
ps3av->av_port[i + j + k] = PS3AV_CMD_AVPORT_SPDIF_0 + k;
/* set all audio port */
ps3av->audio_port = PS3AV_CMD_AUDIO_PORT_HDMI_0
| PS3AV_CMD_AUDIO_PORT_HDMI_1
| PS3AV_CMD_AUDIO_PORT_AVMULTI_0
| PS3AV_CMD_AUDIO_PORT_SPDIF_0 | PS3AV_CMD_AUDIO_PORT_SPDIF_1;
return 0;
}
/* set mode using id */
int ps3av_set_video_mode(int id)
{
int size;
u32 option;
size = ARRAY_SIZE(video_mode_table);
if ((id & PS3AV_MODE_MASK) > size - 1 || id < 0) {
dev_dbg(&ps3av->dev->core, "%s: error id :%d\n", __func__, id);
return -EINVAL;
}
/* auto mode */
option = id & ~PS3AV_MODE_MASK;
if ((id & PS3AV_MODE_MASK) == PS3AV_MODE_AUTO) {
id = ps3av_auto_videomode(&ps3av->av_hw_conf);
if (id < 1) {
printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
return -EINVAL;
}
id |= option;
}
/* set videomode */
wait_for_completion(&ps3av->done);
ps3av->ps3av_mode_old = ps3av->ps3av_mode;
ps3av->ps3av_mode = id;
if (ps3av_set_videomode())
ps3av->ps3av_mode = ps3av->ps3av_mode_old;
return 0;
}
EXPORT_SYMBOL_GPL(ps3av_set_video_mode);
int ps3av_get_auto_mode(void)
{
return ps3av_auto_videomode(&ps3av->av_hw_conf);
}
EXPORT_SYMBOL_GPL(ps3av_get_auto_mode);
int ps3av_get_mode(void)
{
return ps3av ? ps3av->ps3av_mode : 0;
}
EXPORT_SYMBOL_GPL(ps3av_get_mode);
/* get resolution by video_mode */
int ps3av_video_mode2res(u32 id, u32 *xres, u32 *yres)
{
int size;
id = id & PS3AV_MODE_MASK;
size = ARRAY_SIZE(video_mode_table);
if (id > size - 1 || id < 0) {
printk(KERN_ERR "%s: invalid mode %d\n", __func__, id);
return -EINVAL;
}
*xres = video_mode_table[id].x;
*yres = video_mode_table[id].y;
return 0;
}
EXPORT_SYMBOL_GPL(ps3av_video_mode2res);
/* mute */
int ps3av_video_mute(int mute)
{
return ps3av_set_av_video_mute(mute ? PS3AV_CMD_MUTE_ON
: PS3AV_CMD_MUTE_OFF);
}
EXPORT_SYMBOL_GPL(ps3av_video_mute);
/* mute analog output only */
int ps3av_audio_mute_analog(int mute)
{
int i, res;
for (i = 0; i < ps3av->av_hw_conf.num_of_avmulti; i++) {
res = ps3av_cmd_av_audio_mute(1,
&ps3av->av_port[i + ps3av->av_hw_conf.num_of_hdmi],
mute);
if (res < 0)
return -1;
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3av_audio_mute_analog);
int ps3av_audio_mute(int mute)
{
return ps3av_set_audio_mute(mute ? PS3AV_CMD_MUTE_ON
: PS3AV_CMD_MUTE_OFF);
}
EXPORT_SYMBOL_GPL(ps3av_audio_mute);
static int ps3av_probe(struct ps3_system_bus_device *dev)
{
const char *mode_option;
int res;
int id;
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
dev_dbg(&dev->core, " timeout=%d\n", timeout);
if (ps3av) {
dev_err(&dev->core, "Only one ps3av device is supported\n");
return -EBUSY;
}
ps3av = kzalloc(sizeof(*ps3av), GFP_KERNEL);
if (!ps3av)
return -ENOMEM;
mutex_init(&ps3av->mutex);
ps3av->ps3av_mode = PS3AV_MODE_AUTO;
ps3av->dev = dev;
INIT_WORK(&ps3av->work, ps3avd);
init_completion(&ps3av->done);
complete(&ps3av->done);
switch (ps3_os_area_get_av_multi_out()) {
case PS3_PARAM_AV_MULTI_OUT_NTSC:
ps3av->region = PS3AV_REGION_60;
break;
case PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR:
case PS3_PARAM_AV_MULTI_OUT_SECAM:
ps3av->region = PS3AV_REGION_50;
break;
case PS3_PARAM_AV_MULTI_OUT_PAL_RGB:
ps3av->region = PS3AV_REGION_50 | PS3AV_REGION_RGB;
break;
default:
ps3av->region = PS3AV_REGION_60;
break;
}
/* init avsetting modules */
res = ps3av_cmd_init();
if (res < 0)
printk(KERN_ERR "%s: ps3av_cmd_init failed %d\n", __func__,
res);
ps3av_get_hw_conf(ps3av);
mode_option = video_get_options(NULL);
if (mode_option && !strcmp(mode_option, "safe"))
safe_mode = 1;
id = ps3av_auto_videomode(&ps3av->av_hw_conf);
if (id < 0) {
printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
res = -EINVAL;
goto fail;
}
safe_mode = 0;
mutex_lock(&ps3av->mutex);
ps3av->ps3av_mode = id;
mutex_unlock(&ps3av->mutex);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
return 0;
fail:
kfree(ps3av);
ps3av = NULL;
return res;
}
static int ps3av_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
if (ps3av) {
ps3av_cmd_fin();
flush_work(&ps3av->work);
kfree(ps3av);
ps3av = NULL;
}
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
return 0;
}
static void ps3av_shutdown(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
ps3av_remove(dev);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
}
static struct ps3_vuart_port_driver ps3av_driver = {
.core.match_id = PS3_MATCH_ID_AV_SETTINGS,
.core.core.name = "ps3_av",
.probe = ps3av_probe,
.remove = ps3av_remove,
.shutdown = ps3av_shutdown,
};
static int __init ps3av_module_init(void)
{
int error;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
error = ps3_vuart_port_driver_register(&ps3av_driver);
if (error) {
printk(KERN_ERR
"%s: ps3_vuart_port_driver_register failed %d\n",
__func__, error);
return error;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return error;
}
static void __exit ps3av_module_exit(void)
{
pr_debug(" -> %s:%d\n", __func__, __LINE__);
ps3_vuart_port_driver_unregister(&ps3av_driver);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
subsys_initcall(ps3av_module_init);
module_exit(ps3av_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 AV Settings Driver");
MODULE_AUTHOR("Sony Computer Entertainment Inc.");
MODULE_ALIAS(PS3_MODULE_ALIAS_AV_SETTINGS);
|
linux-master
|
drivers/ps3/ps3av.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 virtual uart
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <asm/ps3.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include "vuart.h"
MODULE_AUTHOR("Sony Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 vuart");
/**
* vuart - An inter-partition data link service.
* port 0: PS3 AV Settings.
* port 2: PS3 System Manager.
*
* The vuart provides a bi-directional byte stream data link between logical
* partitions. Its primary role is as a communications link between the guest
* OS and the system policy module. The current HV does not support any
* connections other than those listed.
*/
enum {PORT_COUNT = 3,};
enum vuart_param {
PARAM_TX_TRIGGER = 0,
PARAM_RX_TRIGGER = 1,
PARAM_INTERRUPT_MASK = 2,
PARAM_RX_BUF_SIZE = 3, /* read only */
PARAM_RX_BYTES = 4, /* read only */
PARAM_TX_BUF_SIZE = 5, /* read only */
PARAM_TX_BYTES = 6, /* read only */
PARAM_INTERRUPT_STATUS = 7, /* read only */
};
enum vuart_interrupt_bit {
INTERRUPT_BIT_TX = 0,
INTERRUPT_BIT_RX = 1,
INTERRUPT_BIT_DISCONNECT = 2,
};
enum vuart_interrupt_mask {
INTERRUPT_MASK_TX = 1,
INTERRUPT_MASK_RX = 2,
INTERRUPT_MASK_DISCONNECT = 4,
};
/**
* struct ps3_vuart_port_priv - private vuart device data.
*/
struct ps3_vuart_port_priv {
u64 interrupt_mask;
struct {
spinlock_t lock;
struct list_head head;
} tx_list;
struct {
struct ps3_vuart_work work;
unsigned long bytes_held;
spinlock_t lock;
struct list_head head;
} rx_list;
struct ps3_vuart_stats stats;
};
static struct ps3_vuart_port_priv *to_port_priv(
struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
BUG_ON(!dev->driver_priv);
return (struct ps3_vuart_port_priv *)dev->driver_priv;
}
/**
* struct ports_bmp - bitmap indicating ports needing service.
*
* A 256 bit read only bitmap indicating ports needing service. Do not write
* to these bits. Must not cross a page boundary.
*/
struct ports_bmp {
u64 status;
u64 unused[3];
} __attribute__((aligned(32)));
#define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
static void __maybe_unused _dump_ports_bmp(
const struct ports_bmp *bmp, const char *func, int line)
{
pr_debug("%s:%d: ports_bmp: %016llxh\n", func, line, bmp->status);
}
#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
static void __maybe_unused _dump_port_params(unsigned int port_number,
const char *func, int line)
{
#if defined(DEBUG)
static const char *strings[] = {
"tx_trigger ",
"rx_trigger ",
"interrupt_mask ",
"rx_buf_size ",
"rx_bytes ",
"tx_buf_size ",
"tx_bytes ",
"interrupt_status",
};
int result;
unsigned int i;
u64 value;
for (i = 0; i < ARRAY_SIZE(strings); i++) {
result = lv1_get_virtual_uart_param(port_number, i, &value);
if (result) {
pr_debug("%s:%d: port_%u: %s failed: %s\n", func, line,
port_number, strings[i], ps3_result(result));
continue;
}
pr_debug("%s:%d: port_%u: %s = %lxh\n",
func, line, port_number, strings[i], value);
}
#endif
}
int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
struct vuart_triggers *trig)
{
int result;
u64 size;
u64 val;
u64 tx;
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_TX_TRIGGER, &tx);
trig->tx = tx;
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BUF_SIZE, &size);
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_TRIGGER, &val);
if (result) {
dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
trig->rx = size - val;
dev_dbg(&dev->core, "%s:%d: tx %lxh, rx %lxh\n", __func__, __LINE__,
trig->tx, trig->rx);
return result;
}
int ps3_vuart_set_triggers(struct ps3_system_bus_device *dev, unsigned int tx,
unsigned int rx)
{
int result;
u64 size;
result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_TX_TRIGGER, tx);
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BUF_SIZE, &size);
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_RX_TRIGGER, size - rx);
if (result) {
dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
dev_dbg(&dev->core, "%s:%d: tx %xh, rx %xh\n", __func__, __LINE__,
tx, rx);
return result;
}
static int ps3_vuart_get_rx_bytes_waiting(struct ps3_system_bus_device *dev,
u64 *bytes_waiting)
{
int result;
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BYTES, bytes_waiting);
if (result)
dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n",
__func__, __LINE__, ps3_result(result));
dev_dbg(&dev->core, "%s:%d: %llxh\n", __func__, __LINE__,
*bytes_waiting);
return result;
}
/**
* ps3_vuart_set_interrupt_mask - Enable/disable the port interrupt sources.
* @dev: The struct ps3_system_bus_device instance.
* @bmp: Logical OR of enum vuart_interrupt_mask values. A zero bit disables.
*/
static int ps3_vuart_set_interrupt_mask(struct ps3_system_bus_device *dev,
unsigned long mask)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, mask);
priv->interrupt_mask = mask;
result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_INTERRUPT_MASK, priv->interrupt_mask);
if (result)
dev_dbg(&dev->core, "%s:%d: interrupt_mask failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
static int ps3_vuart_get_interrupt_status(struct ps3_system_bus_device *dev,
unsigned long *status)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
u64 tmp;
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_INTERRUPT_STATUS, &tmp);
if (result)
dev_dbg(&dev->core, "%s:%d: interrupt_status failed: %s\n",
__func__, __LINE__, ps3_result(result));
*status = tmp & priv->interrupt_mask;
dev_dbg(&dev->core, "%s:%d: m %llxh, s %llxh, m&s %lxh\n",
__func__, __LINE__, priv->interrupt_mask, tmp, *status);
return result;
}
int ps3_vuart_enable_interrupt_tx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_TX) ? 0
: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_TX);
}
int ps3_vuart_enable_interrupt_rx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_RX) ? 0
: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_RX);
}
int ps3_vuart_enable_interrupt_disconnect(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_DISCONNECT);
}
int ps3_vuart_disable_interrupt_tx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_TX)
? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_TX) : 0;
}
int ps3_vuart_disable_interrupt_rx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_RX)
? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_RX) : 0;
}
int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_DISCONNECT) : 0;
}
/**
* ps3_vuart_raw_write - Low level write helper.
* @dev: The struct ps3_system_bus_device instance.
*
* Do not call ps3_vuart_raw_write directly, use ps3_vuart_write.
*/
static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
const void *buf, unsigned int bytes, u64 *bytes_written)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
result = lv1_write_virtual_uart(dev->port_number,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
if (result) {
dev_warn(&dev->core, "%s:%d: lv1_write_virtual_uart failed: "
"%s\n", __func__, __LINE__, ps3_result(result));
return result;
}
priv->stats.bytes_written += *bytes_written;
dev_dbg(&dev->core, "%s:%d: wrote %llxh/%xh=>%lxh\n", __func__, __LINE__,
*bytes_written, bytes, priv->stats.bytes_written);
return result;
}
/**
* ps3_vuart_raw_read - Low level read helper.
* @dev: The struct ps3_system_bus_device instance.
*
* Do not call ps3_vuart_raw_read directly, use ps3_vuart_read.
*/
static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes, u64 *bytes_read)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
result = lv1_read_virtual_uart(dev->port_number,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_read);
if (result) {
dev_dbg(&dev->core, "%s:%d: lv1_read_virtual_uart failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
priv->stats.bytes_read += *bytes_read;
dev_dbg(&dev->core, "%s:%d: read %llxh/%xh=>%lxh\n", __func__, __LINE__,
*bytes_read, bytes, priv->stats.bytes_read);
return result;
}
/**
* ps3_vuart_clear_rx_bytes - Discard bytes received.
* @dev: The struct ps3_system_bus_device instance.
* @bytes: Max byte count to discard, zero = all pending.
*
* Used to clear pending rx interrupt source. Will not block.
*/
void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
unsigned int bytes)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
u64 bytes_waiting;
void *tmp;
result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes_waiting);
BUG_ON(result);
bytes = bytes ? min(bytes, (unsigned int)bytes_waiting) : bytes_waiting;
dev_dbg(&dev->core, "%s:%d: %u\n", __func__, __LINE__, bytes);
if (!bytes)
return;
/* Add some extra space for recently arrived data. */
bytes += 128;
tmp = kmalloc(bytes, GFP_KERNEL);
if (!tmp)
return;
ps3_vuart_raw_read(dev, tmp, bytes, &bytes_waiting);
kfree(tmp);
/* Don't include these bytes in the stats. */
priv->stats.bytes_read -= bytes_waiting;
}
EXPORT_SYMBOL_GPL(ps3_vuart_clear_rx_bytes);
/**
* struct list_buffer - An element for a port device fifo buffer list.
*/
struct list_buffer {
struct list_head link;
const unsigned char *head;
const unsigned char *tail;
unsigned long dbg_number;
unsigned char data[];
};
/**
* ps3_vuart_write - the entry point for writing data to a port
* @dev: The struct ps3_system_bus_device instance.
*
* If the port is idle on entry as much of the incoming data is written to
* the port as the port will accept. Otherwise a list buffer is created
* and any remaning incoming data is copied to that buffer. The buffer is
* then enqueued for transmision via the transmit interrupt.
*/
int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
unsigned int bytes)
{
static unsigned long dbg_number;
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb;
dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
bytes, bytes);
spin_lock_irqsave(&priv->tx_list.lock, flags);
if (list_empty(&priv->tx_list.head)) {
u64 bytes_written;
result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
if (result) {
dev_dbg(&dev->core,
"%s:%d: ps3_vuart_raw_write failed\n",
__func__, __LINE__);
return result;
}
if (bytes_written == bytes) {
dev_dbg(&dev->core, "%s:%d: wrote %xh bytes\n",
__func__, __LINE__, bytes);
return 0;
}
bytes -= bytes_written;
buf += bytes_written;
} else
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
if (!lb)
return -ENOMEM;
memcpy(lb->data, buf, bytes);
lb->head = lb->data;
lb->tail = lb->data + bytes;
lb->dbg_number = ++dbg_number;
spin_lock_irqsave(&priv->tx_list.lock, flags);
list_add_tail(&lb->link, &priv->tx_list.head);
ps3_vuart_enable_interrupt_tx(dev);
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %xh bytes\n",
__func__, __LINE__, lb->dbg_number, bytes);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_write);
/**
* ps3_vuart_queue_rx_bytes - Queue waiting bytes into the buffer list.
* @dev: The struct ps3_system_bus_device instance.
* @bytes_queued: Number of bytes queued to the buffer list.
*
* Must be called with priv->rx_list.lock held.
*/
static int ps3_vuart_queue_rx_bytes(struct ps3_system_bus_device *dev,
u64 *bytes_queued)
{
static unsigned long dbg_number;
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
struct list_buffer *lb;
u64 bytes;
*bytes_queued = 0;
result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
BUG_ON(result);
if (result)
return -EIO;
if (!bytes)
return 0;
/* Add some extra space for recently arrived data. */
bytes += 128;
lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
if (!lb)
return -ENOMEM;
ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
lb->head = lb->data;
lb->tail = lb->data + bytes;
lb->dbg_number = ++dbg_number;
list_add_tail(&lb->link, &priv->rx_list.head);
priv->rx_list.bytes_held += bytes;
dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %llxh bytes\n",
__func__, __LINE__, lb->dbg_number, bytes);
*bytes_queued = bytes;
return 0;
}
/**
* ps3_vuart_read - The entry point for reading data from a port.
*
* Queue data waiting at the port, and if enough bytes to satisfy the request
* are held in the buffer list those bytes are dequeued and copied to the
* caller's buffer. Emptied list buffers are retiered. If the request cannot
* be statified by bytes held in the list buffers -EAGAIN is returned.
*/
int ps3_vuart_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb, *n;
unsigned long bytes_read;
dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
bytes, bytes);
spin_lock_irqsave(&priv->rx_list.lock, flags);
/* Queue rx bytes here for polled reads. */
while (priv->rx_list.bytes_held < bytes) {
u64 tmp;
result = ps3_vuart_queue_rx_bytes(dev, &tmp);
if (result || !tmp) {
dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
__func__, __LINE__,
bytes - priv->rx_list.bytes_held);
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return -EAGAIN;
}
}
list_for_each_entry_safe(lb, n, &priv->rx_list.head, link) {
bytes_read = min((unsigned int)(lb->tail - lb->head), bytes);
memcpy(buf, lb->head, bytes_read);
buf += bytes_read;
bytes -= bytes_read;
priv->rx_list.bytes_held -= bytes_read;
if (bytes_read < lb->tail - lb->head) {
lb->head += bytes_read;
dev_dbg(&dev->core, "%s:%d: buf_%lu: dequeued %lxh "
"bytes\n", __func__, __LINE__, lb->dbg_number,
bytes_read);
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
dev_dbg(&dev->core, "%s:%d: buf_%lu: free, dequeued %lxh "
"bytes\n", __func__, __LINE__, lb->dbg_number,
bytes_read);
list_del(&lb->link);
kfree(lb);
}
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_read);
/**
* ps3_vuart_work - Asynchronous read handler.
*/
static void ps3_vuart_work(struct work_struct *work)
{
struct ps3_system_bus_device *dev =
ps3_vuart_work_to_system_bus_dev(work);
struct ps3_vuart_port_driver *drv =
ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
drv->work(dev);
}
int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
if (priv->rx_list.work.trigger) {
dev_dbg(&dev->core, "%s:%d: warning, multiple calls\n",
__func__, __LINE__);
return -EAGAIN;
}
BUG_ON(!bytes);
spin_lock_irqsave(&priv->rx_list.lock, flags);
if (priv->rx_list.bytes_held >= bytes) {
dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
__func__, __LINE__, bytes);
schedule_work(&priv->rx_list.work.work);
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
priv->rx_list.work.trigger = bytes;
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d: waiting for %u(%xh) bytes\n", __func__,
__LINE__, bytes, bytes);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_read_async);
void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev)
{
to_port_priv(dev)->rx_list.work.trigger = 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_cancel_async);
/**
* ps3_vuart_handle_interrupt_tx - third stage transmit interrupt handler
*
* Services the transmit interrupt for the port. Writes as much data from the
* buffer list as the port will accept. Retires any emptied list buffers and
* adjusts the final list buffer state for a partial write.
*/
static int ps3_vuart_handle_interrupt_tx(struct ps3_system_bus_device *dev)
{
int result = 0;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb, *n;
unsigned long bytes_total = 0;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
spin_lock_irqsave(&priv->tx_list.lock, flags);
list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) {
u64 bytes_written;
result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head,
&bytes_written);
if (result) {
dev_dbg(&dev->core,
"%s:%d: ps3_vuart_raw_write failed\n",
__func__, __LINE__);
break;
}
bytes_total += bytes_written;
if (bytes_written < lb->tail - lb->head) {
lb->head += bytes_written;
dev_dbg(&dev->core,
"%s:%d cleared buf_%lu, %llxh bytes\n",
__func__, __LINE__, lb->dbg_number,
bytes_written);
goto port_full;
}
dev_dbg(&dev->core, "%s:%d free buf_%lu\n", __func__, __LINE__,
lb->dbg_number);
list_del(&lb->link);
kfree(lb);
}
ps3_vuart_disable_interrupt_tx(dev);
port_full:
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d wrote %lxh bytes total\n",
__func__, __LINE__, bytes_total);
return result;
}
/**
* ps3_vuart_handle_interrupt_rx - third stage receive interrupt handler
*
* Services the receive interrupt for the port. Creates a list buffer and
* copies all waiting port data to that buffer and enqueues the buffer in the
* buffer list. Buffer list data is dequeued via ps3_vuart_read.
*/
static int ps3_vuart_handle_interrupt_rx(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
u64 bytes;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
spin_lock_irqsave(&priv->rx_list.lock, flags);
result = ps3_vuart_queue_rx_bytes(dev, &bytes);
if (result) {
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return result;
}
if (priv->rx_list.work.trigger && priv->rx_list.bytes_held
>= priv->rx_list.work.trigger) {
dev_dbg(&dev->core, "%s:%d: schedule_work %lxh bytes\n",
__func__, __LINE__, priv->rx_list.work.trigger);
priv->rx_list.work.trigger = 0;
schedule_work(&priv->rx_list.work.work);
}
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return result;
}
static int ps3_vuart_handle_interrupt_disconnect(
struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
BUG_ON("no support");
return -1;
}
/**
* ps3_vuart_handle_port_interrupt - second stage interrupt handler
*
* Services any pending interrupt types for the port. Passes control to the
* third stage type specific interrupt handler. Returns control to the first
* stage handler after one iteration.
*/
static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long status;
result = ps3_vuart_get_interrupt_status(dev, &status);
if (result)
return result;
dev_dbg(&dev->core, "%s:%d: status: %lxh\n", __func__, __LINE__,
status);
if (status & INTERRUPT_MASK_DISCONNECT) {
priv->stats.disconnect_interrupts++;
result = ps3_vuart_handle_interrupt_disconnect(dev);
if (result)
ps3_vuart_disable_interrupt_disconnect(dev);
}
if (status & INTERRUPT_MASK_TX) {
priv->stats.tx_interrupts++;
result = ps3_vuart_handle_interrupt_tx(dev);
if (result)
ps3_vuart_disable_interrupt_tx(dev);
}
if (status & INTERRUPT_MASK_RX) {
priv->stats.rx_interrupts++;
result = ps3_vuart_handle_interrupt_rx(dev);
if (result)
ps3_vuart_disable_interrupt_rx(dev);
}
return 0;
}
static struct vuart_bus_priv {
struct ports_bmp *bmp;
unsigned int virq;
struct mutex probe_mutex;
int use_count;
struct ps3_system_bus_device *devices[PORT_COUNT];
} vuart_bus_priv;
/**
* ps3_vuart_irq_handler - first stage interrupt handler
*
* Loops finding any interrupting port and its associated instance data.
* Passes control to the second stage port specific interrupt handler. Loops
* until all outstanding interrupts are serviced.
*/
static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
{
struct vuart_bus_priv *bus_priv = _private;
BUG_ON(!bus_priv);
while (1) {
unsigned int port;
dump_ports_bmp(bus_priv->bmp);
port = (BITS_PER_LONG - 1) - __ilog2(bus_priv->bmp->status);
if (port == BITS_PER_LONG)
break;
BUG_ON(port >= PORT_COUNT);
BUG_ON(!bus_priv->devices[port]);
ps3_vuart_handle_port_interrupt(bus_priv->devices[port]);
}
return IRQ_HANDLED;
}
static int ps3_vuart_bus_interrupt_get(void)
{
int result;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
vuart_bus_priv.use_count++;
BUG_ON(vuart_bus_priv.use_count > 2);
if (vuart_bus_priv.use_count != 1)
return 0;
BUG_ON(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL);
if (!vuart_bus_priv.bmp) {
result = -ENOMEM;
goto fail_bmp_malloc;
}
result = ps3_vuart_irq_setup(PS3_BINDING_CPU_ANY, vuart_bus_priv.bmp,
&vuart_bus_priv.virq);
if (result) {
pr_debug("%s:%d: ps3_vuart_irq_setup failed (%d)\n",
__func__, __LINE__, result);
result = -EPERM;
goto fail_alloc_irq;
}
result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
0, "vuart", &vuart_bus_priv);
if (result) {
pr_debug("%s:%d: request_irq failed (%d)\n",
__func__, __LINE__, result);
goto fail_request_irq;
}
pr_debug(" <- %s:%d: ok\n", __func__, __LINE__);
return result;
fail_request_irq:
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
vuart_bus_priv.virq = 0;
fail_alloc_irq:
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
fail_bmp_malloc:
vuart_bus_priv.use_count--;
pr_debug(" <- %s:%d: failed\n", __func__, __LINE__);
return result;
}
static int ps3_vuart_bus_interrupt_put(void)
{
pr_debug(" -> %s:%d\n", __func__, __LINE__);
vuart_bus_priv.use_count--;
BUG_ON(vuart_bus_priv.use_count < 0);
if (vuart_bus_priv.use_count != 0)
return 0;
free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
vuart_bus_priv.virq = 0;
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
}
static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_vuart_port_driver *drv;
struct ps3_vuart_port_priv *priv = NULL;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
drv = ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
drv->core.core.name);
if (dev->port_number >= PORT_COUNT) {
BUG();
return -EINVAL;
}
mutex_lock(&vuart_bus_priv.probe_mutex);
result = ps3_vuart_bus_interrupt_get();
if (result)
goto fail_setup_interrupt;
if (vuart_bus_priv.devices[dev->port_number]) {
dev_dbg(&dev->core, "%s:%d: port busy (%d)\n", __func__,
__LINE__, dev->port_number);
result = -EBUSY;
goto fail_busy;
}
vuart_bus_priv.devices[dev->port_number] = dev;
/* Setup dev->driver_priv. */
dev->driver_priv = kzalloc(sizeof(struct ps3_vuart_port_priv),
GFP_KERNEL);
if (!dev->driver_priv) {
result = -ENOMEM;
goto fail_dev_malloc;
}
priv = to_port_priv(dev);
INIT_LIST_HEAD(&priv->tx_list.head);
spin_lock_init(&priv->tx_list.lock);
INIT_LIST_HEAD(&priv->rx_list.head);
spin_lock_init(&priv->rx_list.lock);
INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work);
priv->rx_list.work.trigger = 0;
priv->rx_list.work.dev = dev;
/* clear stale pending interrupts */
ps3_vuart_clear_rx_bytes(dev, 0);
ps3_vuart_set_interrupt_mask(dev, INTERRUPT_MASK_RX);
ps3_vuart_set_triggers(dev, 1, 1);
if (drv->probe)
result = drv->probe(dev);
else {
result = 0;
dev_info(&dev->core, "%s:%d: no probe method\n", __func__,
__LINE__);
}
if (result) {
dev_dbg(&dev->core, "%s:%d: drv->probe failed\n",
__func__, __LINE__);
goto fail_probe;
}
mutex_unlock(&vuart_bus_priv.probe_mutex);
return result;
fail_probe:
ps3_vuart_set_interrupt_mask(dev, 0);
kfree(dev->driver_priv);
dev->driver_priv = NULL;
fail_dev_malloc:
vuart_bus_priv.devices[dev->port_number] = NULL;
fail_busy:
ps3_vuart_bus_interrupt_put();
fail_setup_interrupt:
mutex_unlock(&vuart_bus_priv.probe_mutex);
dev_dbg(&dev->core, "%s:%d: failed\n", __func__, __LINE__);
return result;
}
/**
* ps3_vuart_cleanup - common cleanup helper.
* @dev: The struct ps3_system_bus_device instance.
*
* Cleans interrupts and HV resources. Must be called with
* vuart_bus_priv.probe_mutex held. Used by ps3_vuart_remove and
* ps3_vuart_shutdown. After this call, polled reading will still work.
*/
static int ps3_vuart_cleanup(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
ps3_vuart_cancel_async(dev);
ps3_vuart_set_interrupt_mask(dev, 0);
ps3_vuart_bus_interrupt_put();
return 0;
}
/**
* ps3_vuart_remove - Completely clean the device instance.
* @dev: The struct ps3_system_bus_device instance.
*
* Cleans all memory, interrupts and HV resources. After this call the
* device can no longer be used.
*/
static void ps3_vuart_remove(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
struct ps3_vuart_port_driver *drv;
BUG_ON(!dev);
mutex_lock(&vuart_bus_priv.probe_mutex);
dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
dev->match_id);
if (!dev->core.driver) {
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
return;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
if (drv->remove) {
drv->remove(dev);
} else {
dev_dbg(&dev->core, "%s:%d: no remove method\n", __func__,
__LINE__);
BUG();
}
ps3_vuart_cleanup(dev);
vuart_bus_priv.devices[dev->port_number] = NULL;
kfree(priv);
priv = NULL;
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
}
/**
* ps3_vuart_shutdown - Cleans interrupts and HV resources.
* @dev: The struct ps3_system_bus_device instance.
*
* Cleans interrupts and HV resources. After this call the
* device can still be used in polling mode. This behavior required
* by sys-manager to be able to complete the device power operation
* sequence.
*/
static void ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_driver *drv;
BUG_ON(!dev);
mutex_lock(&vuart_bus_priv.probe_mutex);
dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
dev->match_id);
if (!dev->core.driver) {
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
return;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
if (drv->shutdown)
drv->shutdown(dev);
else if (drv->remove) {
dev_dbg(&dev->core, "%s:%d: no shutdown, calling remove\n",
__func__, __LINE__);
drv->remove(dev);
} else {
dev_dbg(&dev->core, "%s:%d: no shutdown method\n", __func__,
__LINE__);
BUG();
}
ps3_vuart_cleanup(dev);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
}
static int __init ps3_vuart_bus_init(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
mutex_init(&vuart_bus_priv.probe_mutex);
return 0;
}
static void __exit ps3_vuart_bus_exit(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
}
core_initcall(ps3_vuart_bus_init);
module_exit(ps3_vuart_bus_exit);
/**
* ps3_vuart_port_driver_register - Add a vuart port device driver.
*/
int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv)
{
int result;
pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
BUG_ON(!drv->core.match_id);
BUG_ON(!drv->core.core.name);
drv->core.probe = ps3_vuart_probe;
drv->core.remove = ps3_vuart_remove;
drv->core.shutdown = ps3_vuart_shutdown;
result = ps3_system_bus_driver_register(&drv->core);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
/**
* ps3_vuart_port_driver_unregister - Remove a vuart port device driver.
*/
void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv)
{
pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
ps3_system_bus_driver_unregister(&drv->core);
}
EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_unregister);
|
linux-master
|
drivers/ps3/ps3-vuart.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 Logical Performance Monitor.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/ps3.h>
#include <asm/lv1call.h>
#include <asm/cell-pmu.h>
/* BOOKMARK tag macros */
#define PS3_PM_BOOKMARK_START 0x8000000000000000ULL
#define PS3_PM_BOOKMARK_STOP 0x4000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_KERNEL 0x1000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_USER 0x3000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_MASK_HI 0xF000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_MASK_LO 0x0F00000000000000ULL
/* CBE PM CONTROL register macros */
#define PS3_PM_CONTROL_PPU_TH0_BOOKMARK 0x00001000
#define PS3_PM_CONTROL_PPU_TH1_BOOKMARK 0x00000800
#define PS3_PM_CONTROL_PPU_COUNT_MODE_MASK 0x000C0000
#define PS3_PM_CONTROL_PPU_COUNT_MODE_PROBLEM 0x00080000
#define PS3_WRITE_PM_MASK 0xFFFFFFFFFFFFFFFFULL
/* CBE PM START STOP register macros */
#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START 0x02000000
#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START 0x01000000
#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP 0x00020000
#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP 0x00010000
#define PS3_PM_START_STOP_START_MASK 0xFF000000
#define PS3_PM_START_STOP_STOP_MASK 0x00FF0000
/* CBE PM COUNTER register macres */
#define PS3_PM_COUNTER_MASK_HI 0xFFFFFFFF00000000ULL
#define PS3_PM_COUNTER_MASK_LO 0x00000000FFFFFFFFULL
/* BASE SIGNAL GROUP NUMBER macros */
#define PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER 0
#define PM_ISLAND2_SIGNAL_GROUP_NUMBER1 6
#define PM_ISLAND2_SIGNAL_GROUP_NUMBER2 7
#define PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER 7
#define PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER 15
#define PM_SPU_TRIGGER_SIGNAL_GROUP_NUMBER 17
#define PM_SPU_EVENT_SIGNAL_GROUP_NUMBER 18
#define PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER 18
#define PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER 24
#define PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER 49
#define PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER 52
#define PM_SIG_GROUP_SPU 41
#define PM_SIG_GROUP_SPU_TRIGGER 42
#define PM_SIG_GROUP_SPU_EVENT 43
#define PM_SIG_GROUP_MFC_MAX 60
/**
* struct ps3_lpm_shadow_regs - Performance monitor shadow registers.
*
* @pm_control: Shadow of the processor's pm_control register.
* @pm_start_stop: Shadow of the processor's pm_start_stop register.
* @group_control: Shadow of the processor's group_control register.
* @debug_bus_control: Shadow of the processor's debug_bus_control register.
*
* The logical performance monitor provides a write-only interface to
* these processor registers. These shadow variables cache the processor
* register values for reading.
*
* The initial value of the shadow registers at lpm creation is
* PS3_LPM_SHADOW_REG_INIT.
*/
struct ps3_lpm_shadow_regs {
u64 pm_control;
u64 pm_start_stop;
u64 group_control;
u64 debug_bus_control;
};
#define PS3_LPM_SHADOW_REG_INIT 0xFFFFFFFF00000000ULL
/**
* struct ps3_lpm_priv - Private lpm device data.
*
* @open: An atomic variable indicating the lpm driver has been opened.
* @rights: The lpm rigths granted by the system policy module. A logical
* OR of enum ps3_lpm_rights.
* @node_id: The node id of a BE processor whose performance monitor this
* lpar has the right to use.
* @pu_id: The lv1 id of the logical PU.
* @lpm_id: The lv1 id of this lpm instance.
* @outlet_id: The outlet created by lv1 for this lpm instance.
* @tb_count: The number of bytes of data held in the lv1 trace buffer.
* @tb_cache: Kernel buffer to receive the data from the lv1 trace buffer.
* Must be 128 byte aligned.
* @tb_cache_size: Size of the kernel @tb_cache buffer. Must be 128 byte
* aligned.
* @tb_cache_internal: An unaligned buffer allocated by this driver to be
* used for the trace buffer cache when ps3_lpm_open() is called with a
* NULL tb_cache argument. Otherwise unused.
* @shadow: Processor register shadow of type struct ps3_lpm_shadow_regs.
* @sbd: The struct ps3_system_bus_device attached to this driver.
*
* The trace buffer is a buffer allocated and used internally to the lv1
* hypervisor to collect trace data. The trace buffer cache is a guest
* buffer that accepts the trace data from the trace buffer.
*/
struct ps3_lpm_priv {
atomic_t open;
u64 rights;
u64 node_id;
u64 pu_id;
u64 lpm_id;
u64 outlet_id;
u64 tb_count;
void *tb_cache;
u64 tb_cache_size;
void *tb_cache_internal;
struct ps3_lpm_shadow_regs shadow;
struct ps3_system_bus_device *sbd;
};
enum {
PS3_LPM_DEFAULT_TB_CACHE_SIZE = 0x4000,
};
/**
* lpm_priv - Static instance of the lpm data.
*
* Since the exported routines don't support the notion of a device
* instance we need to hold the instance in this static variable
* and then only allow at most one instance at a time to be created.
*/
static struct ps3_lpm_priv *lpm_priv;
static struct device *sbd_core(void)
{
BUG_ON(!lpm_priv || !lpm_priv->sbd);
return &lpm_priv->sbd->core;
}
/**
* use_start_stop_bookmark - Enable the PPU bookmark trace.
*
* And it enables PPU bookmark triggers ONLY if the other triggers are not set.
* The start/stop bookmarks are inserted at ps3_enable_pm() and ps3_disable_pm()
* to start/stop LPM.
*
* Used to get good quality of the performance counter.
*/
enum {use_start_stop_bookmark = 1,};
void ps3_set_bookmark(u64 bookmark)
{
/*
* As per the PPE book IV, to avoid bookmark loss there must
* not be a traced branch within 10 cycles of setting the
* SPRN_BKMK register. The actual text is unclear if 'within'
* includes cycles before the call.
*/
asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
mtspr(SPRN_BKMK, bookmark);
asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
}
EXPORT_SYMBOL_GPL(ps3_set_bookmark);
void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id)
{
u64 bookmark;
bookmark = (get_tb() & 0x00000000FFFFFFFFULL) |
PS3_PM_BOOKMARK_TAG_KERNEL;
bookmark = ((tag << 56) & PS3_PM_BOOKMARK_TAG_MASK_LO) |
(incident << 48) | (th_id << 32) | bookmark;
ps3_set_bookmark(bookmark);
}
EXPORT_SYMBOL_GPL(ps3_set_pm_bookmark);
/**
* ps3_read_phys_ctr - Read physical counter registers.
*
* Each physical counter can act as one 32 bit counter or as two 16 bit
* counters.
*/
u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr)
{
int result;
u64 counter0415;
u64 counter2637;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return 0;
}
result = lv1_set_lpm_counter(lpm_priv->lpm_id, 0, 0, 0, 0, &counter0415,
&counter2637);
if (result) {
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
"phys_ctr %u, %s\n", __func__, __LINE__, phys_ctr,
ps3_result(result));
return 0;
}
switch (phys_ctr) {
case 0:
return counter0415 >> 32;
case 1:
return counter0415 & PS3_PM_COUNTER_MASK_LO;
case 2:
return counter2637 >> 32;
case 3:
return counter2637 & PS3_PM_COUNTER_MASK_LO;
default:
BUG();
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_phys_ctr);
/**
* ps3_write_phys_ctr - Write physical counter registers.
*
* Each physical counter can act as one 32 bit counter or as two 16 bit
* counters.
*/
void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
{
u64 counter0415;
u64 counter0415_mask;
u64 counter2637;
u64 counter2637_mask;
int result;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return;
}
switch (phys_ctr) {
case 0:
counter0415 = (u64)val << 32;
counter0415_mask = PS3_PM_COUNTER_MASK_HI;
counter2637 = 0x0;
counter2637_mask = 0x0;
break;
case 1:
counter0415 = (u64)val;
counter0415_mask = PS3_PM_COUNTER_MASK_LO;
counter2637 = 0x0;
counter2637_mask = 0x0;
break;
case 2:
counter0415 = 0x0;
counter0415_mask = 0x0;
counter2637 = (u64)val << 32;
counter2637_mask = PS3_PM_COUNTER_MASK_HI;
break;
case 3:
counter0415 = 0x0;
counter0415_mask = 0x0;
counter2637 = (u64)val;
counter2637_mask = PS3_PM_COUNTER_MASK_LO;
break;
default:
BUG();
}
result = lv1_set_lpm_counter(lpm_priv->lpm_id,
counter0415, counter0415_mask,
counter2637, counter2637_mask,
&counter0415, &counter2637);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
"phys_ctr %u, val %u, %s\n", __func__, __LINE__,
phys_ctr, val, ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_phys_ctr);
/**
* ps3_read_ctr - Read counter.
*
* Read 16 or 32 bits depending on the current size of the counter.
* Counters 4, 5, 6 & 7 are always 16 bit.
*/
u32 ps3_read_ctr(u32 cpu, u32 ctr)
{
u32 val;
u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
val = ps3_read_phys_ctr(cpu, phys_ctr);
if (ps3_get_ctr_size(cpu, phys_ctr) == 16)
val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
return val;
}
EXPORT_SYMBOL_GPL(ps3_read_ctr);
/**
* ps3_write_ctr - Write counter.
*
* Write 16 or 32 bits depending on the current size of the counter.
* Counters 4, 5, 6 & 7 are always 16 bit.
*/
void ps3_write_ctr(u32 cpu, u32 ctr, u32 val)
{
u32 phys_ctr;
u32 phys_val;
phys_ctr = ctr & (NR_PHYS_CTRS - 1);
if (ps3_get_ctr_size(cpu, phys_ctr) == 16) {
phys_val = ps3_read_phys_ctr(cpu, phys_ctr);
if (ctr < NR_PHYS_CTRS)
val = (val << 16) | (phys_val & 0xffff);
else
val = (val & 0xffff) | (phys_val & 0xffff0000);
}
ps3_write_phys_ctr(cpu, phys_ctr, val);
}
EXPORT_SYMBOL_GPL(ps3_write_ctr);
/**
* ps3_read_pm07_control - Read counter control registers.
*
* Each logical counter has a corresponding control register.
*/
u32 ps3_read_pm07_control(u32 cpu, u32 ctr)
{
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_pm07_control);
/**
* ps3_write_pm07_control - Write counter control registers.
*
* Each logical counter has a corresponding control register.
*/
void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val)
{
int result;
static const u64 mask = 0xFFFFFFFFFFFFFFFFULL;
u64 old_value;
if (ctr >= NR_CTRS) {
dev_dbg(sbd_core(), "%s:%u: ctr too big: %u\n", __func__,
__LINE__, ctr);
return;
}
result = lv1_set_lpm_counter_control(lpm_priv->lpm_id, ctr, val, mask,
&old_value);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter_control "
"failed: ctr %u, %s\n", __func__, __LINE__, ctr,
ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_pm07_control);
/**
* ps3_read_pm - Read Other LPM control registers.
*/
u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
{
int result = 0;
u64 val = 0;
switch (reg) {
case pm_control:
return lpm_priv->shadow.pm_control;
case trace_address:
return CBE_PM_TRACE_BUF_EMPTY;
case pm_start_stop:
return lpm_priv->shadow.pm_start_stop;
case pm_interval:
result = lv1_set_lpm_interval(lpm_priv->lpm_id, 0, 0, &val);
if (result) {
val = 0;
dev_dbg(sbd_core(), "%s:%u: lv1 set_interval failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
return (u32)val;
case group_control:
return lpm_priv->shadow.group_control;
case debug_bus_control:
return lpm_priv->shadow.debug_bus_control;
case pm_status:
result = lv1_get_lpm_interrupt_status(lpm_priv->lpm_id,
&val);
if (result) {
val = 0;
dev_dbg(sbd_core(), "%s:%u: lv1 get_lpm_status failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
return (u32)val;
case ext_tr_timer:
return 0;
default:
dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
__LINE__, reg);
BUG();
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_pm);
/**
* ps3_write_pm - Write Other LPM control registers.
*/
void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
{
int result = 0;
u64 dummy;
switch (reg) {
case group_control:
if (val != lpm_priv->shadow.group_control)
result = lv1_set_lpm_group_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.group_control = val;
break;
case debug_bus_control:
if (val != lpm_priv->shadow.debug_bus_control)
result = lv1_set_lpm_debug_bus_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.debug_bus_control = val;
break;
case pm_control:
if (use_start_stop_bookmark)
val |= (PS3_PM_CONTROL_PPU_TH0_BOOKMARK |
PS3_PM_CONTROL_PPU_TH1_BOOKMARK);
if (val != lpm_priv->shadow.pm_control)
result = lv1_set_lpm_general_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
0, 0, &dummy,
&dummy);
lpm_priv->shadow.pm_control = val;
break;
case pm_interval:
result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
PS3_WRITE_PM_MASK, &dummy);
break;
case pm_start_stop:
if (val != lpm_priv->shadow.pm_start_stop)
result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.pm_start_stop = val;
break;
case trace_address:
case ext_tr_timer:
case pm_status:
break;
default:
dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
__LINE__, reg);
BUG();
break;
}
if (result)
dev_err(sbd_core(), "%s:%u: lv1 set_control failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_pm);
/**
* ps3_get_ctr_size - Get the size of a physical counter.
*
* Returns either 16 or 32.
*/
u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr)
{
u32 pm_ctrl;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return 0;
}
pm_ctrl = ps3_read_pm(cpu, pm_control);
return (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
}
EXPORT_SYMBOL_GPL(ps3_get_ctr_size);
/**
* ps3_set_ctr_size - Set the size of a physical counter to 16 or 32 bits.
*/
void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
{
u32 pm_ctrl;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return;
}
pm_ctrl = ps3_read_pm(cpu, pm_control);
switch (ctr_size) {
case 16:
pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
ps3_write_pm(cpu, pm_control, pm_ctrl);
break;
case 32:
pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
ps3_write_pm(cpu, pm_control, pm_ctrl);
break;
default:
BUG();
}
}
EXPORT_SYMBOL_GPL(ps3_set_ctr_size);
static u64 pm_translate_signal_group_number_on_island2(u64 subgroup)
{
if (subgroup == 2)
subgroup = 3;
if (subgroup <= 6)
return PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER + subgroup;
else if (subgroup == 7)
return PM_ISLAND2_SIGNAL_GROUP_NUMBER1;
else
return PM_ISLAND2_SIGNAL_GROUP_NUMBER2;
}
static u64 pm_translate_signal_group_number_on_island3(u64 subgroup)
{
switch (subgroup) {
case 2:
case 3:
case 4:
subgroup += 2;
break;
case 5:
subgroup = 8;
break;
default:
break;
}
return PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island4(u64 subgroup)
{
return PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island5(u64 subgroup)
{
switch (subgroup) {
case 3:
subgroup = 4;
break;
case 4:
subgroup = 6;
break;
default:
break;
}
return PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island6(u64 subgroup,
u64 subsubgroup)
{
switch (subgroup) {
case 3:
case 4:
case 5:
subgroup += 1;
break;
default:
break;
}
switch (subsubgroup) {
case 4:
case 5:
case 6:
subsubgroup += 2;
break;
case 7:
case 8:
case 9:
case 10:
subsubgroup += 4;
break;
case 11:
case 12:
case 13:
subsubgroup += 5;
break;
default:
break;
}
if (subgroup <= 5)
return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup);
else
return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup
+ subsubgroup - 1);
}
static u64 pm_translate_signal_group_number_on_island7(u64 subgroup)
{
return PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island8(u64 subgroup)
{
return PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_signal_group_to_ps3_lv1_signal_group(u64 group)
{
u64 island;
u64 subgroup;
u64 subsubgroup;
subgroup = 0;
subsubgroup = 0;
island = 0;
if (group < 1000) {
if (group < 100) {
if (20 <= group && group < 30) {
island = 2;
subgroup = group - 20;
} else if (30 <= group && group < 40) {
island = 3;
subgroup = group - 30;
} else if (40 <= group && group < 50) {
island = 4;
subgroup = group - 40;
} else if (50 <= group && group < 60) {
island = 5;
subgroup = group - 50;
} else if (60 <= group && group < 70) {
island = 6;
subgroup = group - 60;
} else if (70 <= group && group < 80) {
island = 7;
subgroup = group - 70;
} else if (80 <= group && group < 90) {
island = 8;
subgroup = group - 80;
}
} else if (200 <= group && group < 300) {
island = 2;
subgroup = group - 200;
} else if (600 <= group && group < 700) {
island = 6;
subgroup = 5;
subsubgroup = group - 650;
}
} else if (6000 <= group && group < 7000) {
island = 6;
subgroup = 5;
subsubgroup = group - 6500;
}
switch (island) {
case 2:
return pm_translate_signal_group_number_on_island2(subgroup);
case 3:
return pm_translate_signal_group_number_on_island3(subgroup);
case 4:
return pm_translate_signal_group_number_on_island4(subgroup);
case 5:
return pm_translate_signal_group_number_on_island5(subgroup);
case 6:
return pm_translate_signal_group_number_on_island6(subgroup,
subsubgroup);
case 7:
return pm_translate_signal_group_number_on_island7(subgroup);
case 8:
return pm_translate_signal_group_number_on_island8(subgroup);
default:
dev_dbg(sbd_core(), "%s:%u: island not found: %llu\n", __func__,
__LINE__, group);
BUG();
break;
}
return 0;
}
static u64 pm_bus_word_to_ps3_lv1_bus_word(u8 word)
{
switch (word) {
case 1:
return 0xF000;
case 2:
return 0x0F00;
case 4:
return 0x00F0;
case 8:
default:
return 0x000F;
}
}
static int __ps3_set_signal(u64 lv1_signal_group, u64 bus_select,
u64 signal_select, u64 attr1, u64 attr2, u64 attr3)
{
int ret;
ret = lv1_set_lpm_signal(lpm_priv->lpm_id, lv1_signal_group, bus_select,
signal_select, attr1, attr2, attr3);
if (ret)
dev_err(sbd_core(),
"%s:%u: error:%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
__func__, __LINE__, ret, lv1_signal_group, bus_select,
signal_select, attr1, attr2, attr3);
return ret;
}
int ps3_set_signal(u64 signal_group, u8 signal_bit, u16 sub_unit,
u8 bus_word)
{
int ret;
u64 lv1_signal_group;
u64 bus_select;
u64 signal_select;
u64 attr1, attr2, attr3;
if (signal_group == 0)
return __ps3_set_signal(0, 0, 0, 0, 0, 0);
lv1_signal_group =
pm_signal_group_to_ps3_lv1_signal_group(signal_group);
bus_select = pm_bus_word_to_ps3_lv1_bus_word(bus_word);
switch (signal_group) {
case PM_SIG_GROUP_SPU_TRIGGER:
signal_select = 1;
signal_select = signal_select << (63 - signal_bit);
break;
case PM_SIG_GROUP_SPU_EVENT:
signal_select = 1;
signal_select = (signal_select << (63 - signal_bit)) | 0x3;
break;
default:
signal_select = 0;
break;
}
/*
* 0: physical object.
* 1: logical object.
* This parameter is only used for the PPE and SPE signals.
*/
attr1 = 1;
/*
* This parameter is used to specify the target physical/logical
* PPE/SPE object.
*/
if (PM_SIG_GROUP_SPU <= signal_group &&
signal_group < PM_SIG_GROUP_MFC_MAX)
attr2 = sub_unit;
else
attr2 = lpm_priv->pu_id;
/*
* This parameter is only used for setting the SPE signal.
*/
attr3 = 0;
ret = __ps3_set_signal(lv1_signal_group, bus_select, signal_select,
attr1, attr2, attr3);
if (ret)
dev_err(sbd_core(), "%s:%u: __ps3_set_signal failed: %d\n",
__func__, __LINE__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(ps3_set_signal);
u32 ps3_get_hw_thread_id(int cpu)
{
return get_hard_smp_processor_id(cpu);
}
EXPORT_SYMBOL_GPL(ps3_get_hw_thread_id);
/**
* ps3_enable_pm - Enable the entire performance monitoring unit.
*
* When we enable the LPM, all pending writes to counters get committed.
*/
void ps3_enable_pm(u32 cpu)
{
int result;
u64 tmp;
int insert_bookmark = 0;
lpm_priv->tb_count = 0;
if (use_start_stop_bookmark) {
if (!(lpm_priv->shadow.pm_start_stop &
(PS3_PM_START_STOP_START_MASK
| PS3_PM_START_STOP_STOP_MASK))) {
result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
(PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START |
PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START |
PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP |
PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP),
0xFFFFFFFFFFFFFFFFULL, &tmp);
if (result)
dev_err(sbd_core(), "%s:%u: "
"lv1_set_lpm_trigger_control failed: "
"%s\n", __func__, __LINE__,
ps3_result(result));
insert_bookmark = !result;
}
}
result = lv1_start_lpm(lpm_priv->lpm_id);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_start_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
if (use_start_stop_bookmark && !result && insert_bookmark)
ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_START);
}
EXPORT_SYMBOL_GPL(ps3_enable_pm);
/**
* ps3_disable_pm - Disable the entire performance monitoring unit.
*/
void ps3_disable_pm(u32 cpu)
{
int result;
u64 tmp;
ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_STOP);
result = lv1_stop_lpm(lpm_priv->lpm_id, &tmp);
if (result) {
if (result != LV1_WRONG_STATE)
dev_err(sbd_core(), "%s:%u: lv1_stop_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
return;
}
lpm_priv->tb_count = tmp;
dev_dbg(sbd_core(), "%s:%u: tb_count %llu (%llxh)\n", __func__, __LINE__,
lpm_priv->tb_count, lpm_priv->tb_count);
}
EXPORT_SYMBOL_GPL(ps3_disable_pm);
/**
* ps3_lpm_copy_tb - Copy data from the trace buffer to a kernel buffer.
* @offset: Offset in bytes from the start of the trace buffer.
* @buf: Copy destination.
* @count: Maximum count of bytes to copy.
* @bytes_copied: Pointer to a variable that will receive the number of
* bytes copied to @buf.
*
* On error @buf will contain any successfully copied trace buffer data
* and bytes_copied will be set to the number of bytes successfully copied.
*/
int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
unsigned long *bytes_copied)
{
int result;
*bytes_copied = 0;
if (!lpm_priv->tb_cache)
return -EPERM;
if (offset >= lpm_priv->tb_count)
return 0;
count = min_t(u64, count, lpm_priv->tb_count - offset);
while (*bytes_copied < count) {
const unsigned long request = count - *bytes_copied;
u64 tmp;
result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
request, &tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
__func__, __LINE__, request, offset);
dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
}
memcpy(buf, lpm_priv->tb_cache, tmp);
buf += tmp;
*bytes_copied += tmp;
offset += tmp;
}
dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
*bytes_copied);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb);
/**
* ps3_lpm_copy_tb_to_user - Copy data from the trace buffer to a user buffer.
* @offset: Offset in bytes from the start of the trace buffer.
* @buf: A __user copy destination.
* @count: Maximum count of bytes to copy.
* @bytes_copied: Pointer to a variable that will receive the number of
* bytes copied to @buf.
*
* On error @buf will contain any successfully copied trace buffer data
* and bytes_copied will be set to the number of bytes successfully copied.
*/
int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
unsigned long count, unsigned long *bytes_copied)
{
int result;
*bytes_copied = 0;
if (!lpm_priv->tb_cache)
return -EPERM;
if (offset >= lpm_priv->tb_count)
return 0;
count = min_t(u64, count, lpm_priv->tb_count - offset);
while (*bytes_copied < count) {
const unsigned long request = count - *bytes_copied;
u64 tmp;
result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
request, &tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
__func__, __LINE__, request, offset);
dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
}
result = copy_to_user(buf, lpm_priv->tb_cache, tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%llx bytes at 0x%p\n",
__func__, __LINE__, tmp, buf);
dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n",
__func__, __LINE__, result);
return -EFAULT;
}
buf += tmp;
*bytes_copied += tmp;
offset += tmp;
}
dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
*bytes_copied);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb_to_user);
/**
* ps3_get_and_clear_pm_interrupts -
*
* Clearing interrupts for the entire performance monitoring unit.
* Reading pm_status clears the interrupt bits.
*/
u32 ps3_get_and_clear_pm_interrupts(u32 cpu)
{
return ps3_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(ps3_get_and_clear_pm_interrupts);
/**
* ps3_enable_pm_interrupts -
*
* Enabling interrupts for the entire performance monitoring unit.
* Enables the interrupt bits in the pm_status register.
*/
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{
if (mask)
ps3_write_pm(cpu, pm_status, mask);
}
EXPORT_SYMBOL_GPL(ps3_enable_pm_interrupts);
/**
* ps3_enable_pm_interrupts -
*
* Disabling interrupts for the entire performance monitoring unit.
*/
void ps3_disable_pm_interrupts(u32 cpu)
{
ps3_get_and_clear_pm_interrupts(cpu);
ps3_write_pm(cpu, pm_status, 0);
}
EXPORT_SYMBOL_GPL(ps3_disable_pm_interrupts);
/**
* ps3_lpm_open - Open the logical performance monitor device.
* @tb_type: Specifies the type of trace buffer lv1 should use for this lpm
* instance, specified by one of enum ps3_lpm_tb_type.
* @tb_cache: Optional user supplied buffer to use as the trace buffer cache.
* If NULL, the driver will allocate and manage an internal buffer.
* Unused when @tb_type is PS3_LPM_TB_TYPE_NONE.
* @tb_cache_size: The size in bytes of the user supplied @tb_cache buffer.
* Unused when @tb_cache is NULL or @tb_type is PS3_LPM_TB_TYPE_NONE.
*/
int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
u64 tb_cache_size)
{
int result;
u64 tb_size;
BUG_ON(!lpm_priv);
BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);
if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);
if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
return -EBUSY;
}
/* Note tb_cache needs 128 byte alignment. */
if (tb_type == PS3_LPM_TB_TYPE_NONE) {
lpm_priv->tb_cache_size = 0;
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = NULL;
} else if (tb_cache) {
if (tb_cache != (void *)ALIGN((unsigned long)tb_cache, 128)
|| tb_cache_size != ALIGN(tb_cache_size, 128)) {
dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
__func__, __LINE__);
result = -EINVAL;
goto fail_align;
}
lpm_priv->tb_cache_size = tb_cache_size;
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = tb_cache;
} else {
lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
lpm_priv->tb_cache_internal = kzalloc(
lpm_priv->tb_cache_size + 127, GFP_KERNEL);
if (!lpm_priv->tb_cache_internal) {
result = -ENOMEM;
goto fail_malloc;
}
lpm_priv->tb_cache = (void *)ALIGN(
(unsigned long)lpm_priv->tb_cache_internal, 128);
}
result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
&lpm_priv->outlet_id, &tb_size);
if (result) {
dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EINVAL;
goto fail_construct;
}
lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
lpm_priv->outlet_id, tb_size);
return 0;
fail_construct:
kfree(lpm_priv->tb_cache_internal);
lpm_priv->tb_cache_internal = NULL;
fail_malloc:
fail_align:
atomic_dec(&lpm_priv->open);
return result;
}
EXPORT_SYMBOL_GPL(ps3_lpm_open);
/**
* ps3_lpm_close - Close the lpm device.
*
*/
int ps3_lpm_close(void)
{
dev_dbg(sbd_core(), "%s:%u\n", __func__, __LINE__);
lv1_destruct_lpm(lpm_priv->lpm_id);
lpm_priv->lpm_id = 0;
kfree(lpm_priv->tb_cache_internal);
lpm_priv->tb_cache_internal = NULL;
atomic_dec(&lpm_priv->open);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_close);
static int ps3_lpm_probe(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u\n", __func__, __LINE__);
if (lpm_priv) {
dev_info(&dev->core, "%s:%u: called twice\n",
__func__, __LINE__);
return -EBUSY;
}
lpm_priv = kzalloc(sizeof(*lpm_priv), GFP_KERNEL);
if (!lpm_priv)
return -ENOMEM;
lpm_priv->sbd = dev;
lpm_priv->node_id = dev->lpm.node_id;
lpm_priv->pu_id = dev->lpm.pu_id;
lpm_priv->rights = dev->lpm.rights;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
return 0;
}
static void ps3_lpm_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u:\n", __func__, __LINE__);
ps3_lpm_close();
kfree(lpm_priv);
lpm_priv = NULL;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
}
static struct ps3_system_bus_driver ps3_lpm_driver = {
.match_id = PS3_MATCH_ID_LPM,
.core.name = "ps3-lpm",
.core.owner = THIS_MODULE,
.probe = ps3_lpm_probe,
.remove = ps3_lpm_remove,
.shutdown = ps3_lpm_remove,
};
static int __init ps3_lpm_init(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
return ps3_system_bus_driver_register(&ps3_lpm_driver);
}
static void __exit ps3_lpm_exit(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
ps3_system_bus_driver_unregister(&ps3_lpm_driver);
}
module_init(ps3_lpm_init);
module_exit(ps3_lpm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 Logical Performance Monitor Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_LPM);
|
linux-master
|
drivers/ps3/ps3-lpm.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 System Manager.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/sched/signal.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#include "vuart.h"
/**
* ps3_sys_manager - PS3 system manager driver.
*
* The system manager provides an asynchronous system event notification
* mechanism for reporting events like thermal alert and button presses to
* guests. It also provides support to control system shutdown and startup.
*
* The actual system manager is implemented as an application running in the
* system policy module in lpar_1. Guests communicate with the system manager
* through port 2 of the vuart using a simple packet message protocol.
* Messages are comprised of a fixed field header followed by a message
* specific payload.
*/
/**
* struct ps3_sys_manager_header - System manager message header.
* @version: Header version, currently 1.
* @size: Header size in bytes, currently 16.
* @payload_size: Message payload size in bytes.
* @service_id: Message type, one of enum ps3_sys_manager_service_id.
* @request_tag: Unique number to identify reply.
*/
struct ps3_sys_manager_header {
/* version 1 */
u8 version;
u8 size;
u16 reserved_1;
u32 payload_size;
u16 service_id;
u16 reserved_2;
u32 request_tag;
};
#define dump_sm_header(_h) _dump_sm_header(_h, __func__, __LINE__)
static void __maybe_unused _dump_sm_header(
const struct ps3_sys_manager_header *h, const char *func, int line)
{
pr_debug("%s:%d: version: %xh\n", func, line, h->version);
pr_debug("%s:%d: size: %xh\n", func, line, h->size);
pr_debug("%s:%d: payload_size: %xh\n", func, line, h->payload_size);
pr_debug("%s:%d: service_id: %xh\n", func, line, h->service_id);
pr_debug("%s:%d: request_tag: %xh\n", func, line, h->request_tag);
}
/**
* @PS3_SM_RX_MSG_LEN_MIN - Shortest received message length.
* @PS3_SM_RX_MSG_LEN_MAX - Longest received message length.
*
* Currently all messages received from the system manager are either
* (16 bytes header + 8 bytes payload = 24 bytes) or (16 bytes header
* + 16 bytes payload = 32 bytes). This knowledge is used to simplify
* the logic.
*/
enum {
PS3_SM_RX_MSG_LEN_MIN = 24,
PS3_SM_RX_MSG_LEN_MAX = 32,
};
/**
* enum ps3_sys_manager_service_id - Message header service_id.
* @PS3_SM_SERVICE_ID_REQUEST: guest --> sys_manager.
* @PS3_SM_SERVICE_ID_REQUEST_ERROR: guest <-- sys_manager.
* @PS3_SM_SERVICE_ID_COMMAND: guest <-- sys_manager.
* @PS3_SM_SERVICE_ID_RESPONSE: guest --> sys_manager.
* @PS3_SM_SERVICE_ID_SET_ATTR: guest --> sys_manager.
* @PS3_SM_SERVICE_ID_EXTERN_EVENT: guest <-- sys_manager.
* @PS3_SM_SERVICE_ID_SET_NEXT_OP: guest --> sys_manager.
*
* PS3_SM_SERVICE_ID_REQUEST_ERROR is returned for invalid data values in a
* a PS3_SM_SERVICE_ID_REQUEST message. It also seems to be returned when
* a REQUEST message is sent at the wrong time.
*/
enum ps3_sys_manager_service_id {
/* version 1 */
PS3_SM_SERVICE_ID_REQUEST = 1,
PS3_SM_SERVICE_ID_RESPONSE = 2,
PS3_SM_SERVICE_ID_COMMAND = 3,
PS3_SM_SERVICE_ID_EXTERN_EVENT = 4,
PS3_SM_SERVICE_ID_SET_NEXT_OP = 5,
PS3_SM_SERVICE_ID_REQUEST_ERROR = 6,
PS3_SM_SERVICE_ID_SET_ATTR = 8,
};
/**
* enum ps3_sys_manager_attr - Notification attribute (bit position mask).
* @PS3_SM_ATTR_POWER: Power button.
* @PS3_SM_ATTR_RESET: Reset button, not available on retail console.
* @PS3_SM_ATTR_THERMAL: System thermal alert.
* @PS3_SM_ATTR_CONTROLLER: Remote controller event.
* @PS3_SM_ATTR_ALL: Logical OR of all.
*
* The guest tells the system manager which events it is interested in receiving
* notice of by sending the system manager a logical OR of notification
* attributes via the ps3_sys_manager_send_attr() routine.
*/
enum ps3_sys_manager_attr {
/* version 1 */
PS3_SM_ATTR_POWER = 1,
PS3_SM_ATTR_RESET = 2,
PS3_SM_ATTR_THERMAL = 4,
PS3_SM_ATTR_CONTROLLER = 8, /* bogus? */
PS3_SM_ATTR_ALL = 0x0f,
};
/**
* enum ps3_sys_manager_event - External event type, reported by system manager.
* @PS3_SM_EVENT_POWER_PRESSED: payload.value =
* enum ps3_sys_manager_button_event.
* @PS3_SM_EVENT_POWER_RELEASED: payload.value = time pressed in millisec.
* @PS3_SM_EVENT_RESET_PRESSED: payload.value =
* enum ps3_sys_manager_button_event.
* @PS3_SM_EVENT_RESET_RELEASED: payload.value = time pressed in millisec.
* @PS3_SM_EVENT_THERMAL_ALERT: payload.value = thermal zone id.
* @PS3_SM_EVENT_THERMAL_CLEARED: payload.value = thermal zone id.
*/
enum ps3_sys_manager_event {
/* version 1 */
PS3_SM_EVENT_POWER_PRESSED = 3,
PS3_SM_EVENT_POWER_RELEASED = 4,
PS3_SM_EVENT_RESET_PRESSED = 5,
PS3_SM_EVENT_RESET_RELEASED = 6,
PS3_SM_EVENT_THERMAL_ALERT = 7,
PS3_SM_EVENT_THERMAL_CLEARED = 8,
/* no info on controller events */
};
/**
* enum ps3_sys_manager_button_event - Button event payload values.
* @PS3_SM_BUTTON_EVENT_HARD: Hardware generated event.
* @PS3_SM_BUTTON_EVENT_SOFT: Software generated event.
*/
enum ps3_sys_manager_button_event {
PS3_SM_BUTTON_EVENT_HARD = 0,
PS3_SM_BUTTON_EVENT_SOFT = 1,
};
/**
* enum ps3_sys_manager_next_op - Operation to perform after lpar is destroyed.
*/
enum ps3_sys_manager_next_op {
/* version 3 */
PS3_SM_NEXT_OP_SYS_SHUTDOWN = 1,
PS3_SM_NEXT_OP_SYS_REBOOT = 2,
PS3_SM_NEXT_OP_LPAR_REBOOT = 0x82,
};
/**
* enum ps3_sys_manager_wake_source - Next-op wakeup source (bit position mask).
* @PS3_SM_WAKE_DEFAULT: Disk insert, power button, eject button.
* @PS3_SM_WAKE_W_O_L: Ether or wireless LAN.
* @PS3_SM_WAKE_P_O_R: Power on reset.
*
* Additional wakeup sources when specifying PS3_SM_NEXT_OP_SYS_SHUTDOWN.
* The system will always wake from the PS3_SM_WAKE_DEFAULT sources.
* Sources listed here are the only ones available to guests in the
* other-os lpar.
*/
enum ps3_sys_manager_wake_source {
/* version 3 */
PS3_SM_WAKE_DEFAULT = 0,
PS3_SM_WAKE_W_O_L = 0x00000400,
PS3_SM_WAKE_P_O_R = 0x80000000,
};
/**
* user_wake_sources - User specified wakeup sources.
*
* Logical OR of enum ps3_sys_manager_wake_source types.
*/
static u32 user_wake_sources = PS3_SM_WAKE_DEFAULT;
/**
* enum ps3_sys_manager_cmd - Command from system manager to guest.
*
* The guest completes the actions needed, then acks or naks the command via
* ps3_sys_manager_send_response(). In the case of @PS3_SM_CMD_SHUTDOWN,
* the guest must be fully prepared for a system poweroff prior to acking the
* command.
*/
enum ps3_sys_manager_cmd {
/* version 1 */
PS3_SM_CMD_SHUTDOWN = 1, /* shutdown guest OS */
};
/**
* ps3_sm_force_power_off - Poweroff helper.
*
* A global variable used to force a poweroff when the power button has
* been pressed irrespective of how init handles the ctrl_alt_del signal.
*
*/
static unsigned int ps3_sm_force_power_off;
/**
* ps3_sys_manager_write - Helper to write a two part message to the vuart.
*
*/
static int ps3_sys_manager_write(struct ps3_system_bus_device *dev,
const struct ps3_sys_manager_header *header, const void *payload)
{
int result;
BUG_ON(header->version != 1);
BUG_ON(header->size != 16);
BUG_ON(header->payload_size != 8 && header->payload_size != 16);
BUG_ON(header->service_id > 8);
result = ps3_vuart_write(dev, header,
sizeof(struct ps3_sys_manager_header));
if (!result)
result = ps3_vuart_write(dev, payload, header->payload_size);
return result;
}
/**
* ps3_sys_manager_send_attr - Send a 'set attribute' to the system manager.
*
*/
static int ps3_sys_manager_send_attr(struct ps3_system_bus_device *dev,
enum ps3_sys_manager_attr attr)
{
struct ps3_sys_manager_header header;
struct {
u8 version;
u8 reserved_1[3];
u32 attribute;
} payload;
BUILD_BUG_ON(sizeof(payload) != 8);
dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, attr);
memset(&header, 0, sizeof(header));
header.version = 1;
header.size = 16;
header.payload_size = 16;
header.service_id = PS3_SM_SERVICE_ID_SET_ATTR;
memset(&payload, 0, sizeof(payload));
payload.version = 1;
payload.attribute = attr;
return ps3_sys_manager_write(dev, &header, &payload);
}
/**
* ps3_sys_manager_send_next_op - Send a 'set next op' to the system manager.
*
* Tell the system manager what to do after this lpar is destroyed.
*/
static int ps3_sys_manager_send_next_op(struct ps3_system_bus_device *dev,
enum ps3_sys_manager_next_op op,
enum ps3_sys_manager_wake_source wake_source)
{
struct ps3_sys_manager_header header;
struct {
u8 version;
u8 type;
u8 gos_id;
u8 reserved_1;
u32 wake_source;
u8 reserved_2[8];
} payload;
BUILD_BUG_ON(sizeof(payload) != 16);
dev_dbg(&dev->core, "%s:%d: (%xh)\n", __func__, __LINE__, op);
memset(&header, 0, sizeof(header));
header.version = 1;
header.size = 16;
header.payload_size = 16;
header.service_id = PS3_SM_SERVICE_ID_SET_NEXT_OP;
memset(&payload, 0, sizeof(payload));
payload.version = 3;
payload.type = op;
payload.gos_id = 3; /* other os */
payload.wake_source = wake_source;
return ps3_sys_manager_write(dev, &header, &payload);
}
/**
* ps3_sys_manager_send_request_shutdown - Send 'request' to the system manager.
*
* The guest sends this message to request an operation or action of the system
* manager. The reply is a command message from the system manager. In the
* command handler the guest performs the requested operation. The result of
* the command is then communicated back to the system manager with a response
* message.
*
* Currently, the only supported request is the 'shutdown self' request.
*/
static int ps3_sys_manager_send_request_shutdown(
struct ps3_system_bus_device *dev)
{
struct ps3_sys_manager_header header;
struct {
u8 version;
u8 type;
u8 gos_id;
u8 reserved_1[13];
} payload;
BUILD_BUG_ON(sizeof(payload) != 16);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
memset(&header, 0, sizeof(header));
header.version = 1;
header.size = 16;
header.payload_size = 16;
header.service_id = PS3_SM_SERVICE_ID_REQUEST;
memset(&payload, 0, sizeof(payload));
payload.version = 1;
payload.type = 1; /* shutdown */
payload.gos_id = 0; /* self */
return ps3_sys_manager_write(dev, &header, &payload);
}
/**
* ps3_sys_manager_send_response - Send a 'response' to the system manager.
* @status: zero = success, others fail.
*
* The guest sends this message to the system manager to acnowledge success or
* failure of a command sent by the system manager.
*/
static int ps3_sys_manager_send_response(struct ps3_system_bus_device *dev,
u64 status)
{
struct ps3_sys_manager_header header;
struct {
u8 version;
u8 reserved_1[3];
u8 status;
u8 reserved_2[11];
} payload;
BUILD_BUG_ON(sizeof(payload) != 16);
dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
(status ? "nak" : "ack"));
memset(&header, 0, sizeof(header));
header.version = 1;
header.size = 16;
header.payload_size = 16;
header.service_id = PS3_SM_SERVICE_ID_RESPONSE;
memset(&payload, 0, sizeof(payload));
payload.version = 1;
payload.status = status;
return ps3_sys_manager_write(dev, &header, &payload);
}
/**
* ps3_sys_manager_handle_event - Second stage event msg handler.
*
*/
static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
{
int result;
struct {
u8 version;
u8 type;
u8 reserved_1[2];
u32 value;
u8 reserved_2[8];
} event;
BUILD_BUG_ON(sizeof(event) != 16);
result = ps3_vuart_read(dev, &event, sizeof(event));
BUG_ON(result && "need to retry here");
if (event.version != 1) {
dev_dbg(&dev->core, "%s:%d: unsupported event version (%u)\n",
__func__, __LINE__, event.version);
return -EIO;
}
switch (event.type) {
case PS3_SM_EVENT_POWER_PRESSED:
dev_dbg(&dev->core, "%s:%d: POWER_PRESSED (%s)\n",
__func__, __LINE__,
(event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft"
: "hard"));
ps3_sm_force_power_off = 1;
/*
* A memory barrier is use here to sync memory since
* ps3_sys_manager_final_restart() could be called on
* another cpu.
*/
wmb();
kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */
break;
case PS3_SM_EVENT_POWER_RELEASED:
dev_dbg(&dev->core, "%s:%d: POWER_RELEASED (%u ms)\n",
__func__, __LINE__, event.value);
break;
case PS3_SM_EVENT_RESET_PRESSED:
dev_dbg(&dev->core, "%s:%d: RESET_PRESSED (%s)\n",
__func__, __LINE__,
(event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft"
: "hard"));
ps3_sm_force_power_off = 0;
/*
* A memory barrier is use here to sync memory since
* ps3_sys_manager_final_restart() could be called on
* another cpu.
*/
wmb();
kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */
break;
case PS3_SM_EVENT_RESET_RELEASED:
dev_dbg(&dev->core, "%s:%d: RESET_RELEASED (%u ms)\n",
__func__, __LINE__, event.value);
break;
case PS3_SM_EVENT_THERMAL_ALERT:
dev_dbg(&dev->core, "%s:%d: THERMAL_ALERT (zone %u)\n",
__func__, __LINE__, event.value);
pr_info("PS3 Thermal Alert Zone %u\n", event.value);
break;
case PS3_SM_EVENT_THERMAL_CLEARED:
dev_dbg(&dev->core, "%s:%d: THERMAL_CLEARED (zone %u)\n",
__func__, __LINE__, event.value);
break;
default:
dev_dbg(&dev->core, "%s:%d: unknown event (%u)\n",
__func__, __LINE__, event.type);
return -EIO;
}
return 0;
}
/**
* ps3_sys_manager_handle_cmd - Second stage command msg handler.
*
* The system manager sends this in reply to a 'request' message from the guest.
*/
static int ps3_sys_manager_handle_cmd(struct ps3_system_bus_device *dev)
{
int result;
struct {
u8 version;
u8 type;
u8 reserved_1[14];
} cmd;
BUILD_BUG_ON(sizeof(cmd) != 16);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
result = ps3_vuart_read(dev, &cmd, sizeof(cmd));
BUG_ON(result && "need to retry here");
if (result)
return result;
if (cmd.version != 1) {
dev_dbg(&dev->core, "%s:%d: unsupported cmd version (%u)\n",
__func__, __LINE__, cmd.version);
return -EIO;
}
if (cmd.type != PS3_SM_CMD_SHUTDOWN) {
dev_dbg(&dev->core, "%s:%d: unknown cmd (%u)\n",
__func__, __LINE__, cmd.type);
return -EIO;
}
ps3_sys_manager_send_response(dev, 0);
return 0;
}
/**
* ps3_sys_manager_handle_msg - First stage msg handler.
*
* Can be called directly to manually poll vuart and pump message handler.
*/
static int ps3_sys_manager_handle_msg(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_sys_manager_header header;
result = ps3_vuart_read(dev, &header,
sizeof(struct ps3_sys_manager_header));
if (result)
return result;
if (header.version != 1) {
dev_dbg(&dev->core, "%s:%d: unsupported header version (%u)\n",
__func__, __LINE__, header.version);
dump_sm_header(&header);
goto fail_header;
}
BUILD_BUG_ON(sizeof(header) != 16);
if (header.size != 16 || (header.payload_size != 8
&& header.payload_size != 16)) {
dump_sm_header(&header);
BUG();
}
switch (header.service_id) {
case PS3_SM_SERVICE_ID_EXTERN_EVENT:
dev_dbg(&dev->core, "%s:%d: EVENT\n", __func__, __LINE__);
return ps3_sys_manager_handle_event(dev);
case PS3_SM_SERVICE_ID_COMMAND:
dev_dbg(&dev->core, "%s:%d: COMMAND\n", __func__, __LINE__);
return ps3_sys_manager_handle_cmd(dev);
case PS3_SM_SERVICE_ID_REQUEST_ERROR:
dev_dbg(&dev->core, "%s:%d: REQUEST_ERROR\n", __func__,
__LINE__);
dump_sm_header(&header);
break;
default:
dev_dbg(&dev->core, "%s:%d: unknown service_id (%u)\n",
__func__, __LINE__, header.service_id);
break;
}
goto fail_id;
fail_header:
ps3_vuart_clear_rx_bytes(dev, 0);
return -EIO;
fail_id:
ps3_vuart_clear_rx_bytes(dev, header.payload_size);
return -EIO;
}
static void ps3_sys_manager_fin(struct ps3_system_bus_device *dev)
{
ps3_sys_manager_send_request_shutdown(dev);
pr_emerg("System Halted, OK to turn off power\n");
while (ps3_sys_manager_handle_msg(dev)) {
/* pause until next DEC interrupt */
lv1_pause(0);
}
while (1) {
/* pause, ignoring DEC interrupt */
lv1_pause(1);
}
}
/**
* ps3_sys_manager_final_power_off - The final platform machine_power_off routine.
*
* This routine never returns. The routine disables asynchronous vuart reads
* then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
* the shutdown command sent from the system manager. Soon after the
* acknowledgement is sent the lpar is destroyed by the HV. This routine
* should only be called from ps3_power_off() through
* ps3_sys_manager_ops.power_off.
*/
static void ps3_sys_manager_final_power_off(struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
ps3_vuart_cancel_async(dev);
ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_SHUTDOWN,
user_wake_sources);
ps3_sys_manager_fin(dev);
}
/**
* ps3_sys_manager_final_restart - The final platform machine_restart routine.
*
* This routine never returns. The routine disables asynchronous vuart reads
* then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
* the shutdown command sent from the system manager. Soon after the
* acknowledgement is sent the lpar is destroyed by the HV. This routine
* should only be called from ps3_restart() through ps3_sys_manager_ops.restart.
*/
static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
/* Check if we got here via a power button event. */
if (ps3_sm_force_power_off) {
dev_dbg(&dev->core, "%s:%d: forcing poweroff\n",
__func__, __LINE__);
ps3_sys_manager_final_power_off(dev);
}
ps3_vuart_cancel_async(dev);
ps3_sys_manager_send_attr(dev, 0);
ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_REBOOT,
user_wake_sources);
ps3_sys_manager_fin(dev);
}
/**
* ps3_sys_manager_get_wol - Get wake-on-lan setting.
*/
int ps3_sys_manager_get_wol(void)
{
pr_debug("%s:%d\n", __func__, __LINE__);
return (user_wake_sources & PS3_SM_WAKE_W_O_L) != 0;
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_get_wol);
/**
* ps3_sys_manager_set_wol - Set wake-on-lan setting.
*/
void ps3_sys_manager_set_wol(int state)
{
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
pr_debug("%s:%d: %d\n", __func__, __LINE__, state);
if (state)
user_wake_sources |= PS3_SM_WAKE_W_O_L;
else
user_wake_sources &= ~PS3_SM_WAKE_W_O_L;
mutex_unlock(&mutex);
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_set_wol);
/**
* ps3_sys_manager_work - Asynchronous read handler.
*
* Signaled when PS3_SM_RX_MSG_LEN_MIN bytes arrive at the vuart port.
*/
static void ps3_sys_manager_work(struct ps3_system_bus_device *dev)
{
ps3_sys_manager_handle_msg(dev);
ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
}
static int ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_sys_manager_ops ops;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
ops.power_off = ps3_sys_manager_final_power_off;
ops.restart = ps3_sys_manager_final_restart;
ops.dev = dev;
/* ps3_sys_manager_register_ops copies ops. */
ps3_sys_manager_register_ops(&ops);
result = ps3_sys_manager_send_attr(dev, PS3_SM_ATTR_ALL);
BUG_ON(result);
result = ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
BUG_ON(result);
return result;
}
static int ps3_sys_manager_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
return 0;
}
static void ps3_sys_manager_shutdown(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
}
static struct ps3_vuart_port_driver ps3_sys_manager = {
.core.match_id = PS3_MATCH_ID_SYSTEM_MANAGER,
.core.core.name = "ps3_sys_manager",
.probe = ps3_sys_manager_probe,
.remove = ps3_sys_manager_remove,
.shutdown = ps3_sys_manager_shutdown,
.work = ps3_sys_manager_work,
};
static int __init ps3_sys_manager_init(void)
{
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
return ps3_vuart_port_driver_register(&ps3_sys_manager);
}
module_init(ps3_sys_manager_init);
/* Module remove not supported. */
MODULE_AUTHOR("Sony Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 System Manager");
MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER);
|
linux-master
|
drivers/ps3/ps3-sys-manager.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 Storage Library
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
/*
* A workaround for flash memory I/O errors when the internal hard disk
* has not been formatted for OtherOS use. Delay disk close until flash
* memory is closed.
*/
static struct ps3_flash_workaround {
int flash_open;
int disk_open;
struct ps3_system_bus_device *disk_sbd;
} ps3_flash_workaround;
static int ps3stor_open_hv_device(struct ps3_system_bus_device *sbd)
{
int error = ps3_open_hv_device(sbd);
if (error)
return error;
if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH)
ps3_flash_workaround.flash_open = 1;
if (sbd->match_id == PS3_MATCH_ID_STOR_DISK)
ps3_flash_workaround.disk_open = 1;
return 0;
}
static int ps3stor_close_hv_device(struct ps3_system_bus_device *sbd)
{
int error;
if (sbd->match_id == PS3_MATCH_ID_STOR_DISK
&& ps3_flash_workaround.disk_open
&& ps3_flash_workaround.flash_open) {
ps3_flash_workaround.disk_sbd = sbd;
return 0;
}
error = ps3_close_hv_device(sbd);
if (error)
return error;
if (sbd->match_id == PS3_MATCH_ID_STOR_DISK)
ps3_flash_workaround.disk_open = 0;
if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH) {
ps3_flash_workaround.flash_open = 0;
if (ps3_flash_workaround.disk_sbd) {
ps3_close_hv_device(ps3_flash_workaround.disk_sbd);
ps3_flash_workaround.disk_open = 0;
ps3_flash_workaround.disk_sbd = NULL;
}
}
return 0;
}
static int ps3stor_probe_access(struct ps3_storage_device *dev)
{
int res, error;
unsigned int i;
unsigned long n;
if (dev->sbd.match_id == PS3_MATCH_ID_STOR_ROM) {
/* special case: CD-ROM is assumed always accessible */
dev->accessible_regions = 1;
return 0;
}
error = -EPERM;
for (i = 0; i < dev->num_regions; i++) {
dev_dbg(&dev->sbd.core,
"%s:%u: checking accessibility of region %u\n",
__func__, __LINE__, i);
dev->region_idx = i;
res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, 0, 1,
0);
if (res) {
dev_dbg(&dev->sbd.core, "%s:%u: read failed, "
"region %u is not accessible\n", __func__,
__LINE__, i);
continue;
}
dev_dbg(&dev->sbd.core, "%s:%u: region %u is accessible\n",
__func__, __LINE__, i);
set_bit(i, &dev->accessible_regions);
/* We can access at least one region */
error = 0;
}
if (error)
return error;
n = hweight_long(dev->accessible_regions);
if (n > 1)
dev_info(&dev->sbd.core,
"%s:%u: %lu accessible regions found. Only the first "
"one will be used\n",
__func__, __LINE__, n);
dev->region_idx = __ffs(dev->accessible_regions);
dev_info(&dev->sbd.core,
"First accessible region has index %u start %llu size %llu\n",
dev->region_idx, dev->regions[dev->region_idx].start,
dev->regions[dev->region_idx].size);
return 0;
}
/**
* ps3stor_setup - Setup a storage device before use
* @dev: Pointer to a struct ps3_storage_device
* @handler: Pointer to an interrupt handler
*
* Returns 0 for success, or an error code
*/
int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
{
int error, res, alignment;
enum ps3_dma_page_size page_size;
error = ps3stor_open_hv_device(&dev->sbd);
if (error) {
dev_err(&dev->sbd.core,
"%s:%u: ps3_open_hv_device failed %d\n", __func__,
__LINE__, error);
goto fail;
}
error = ps3_sb_event_receive_port_setup(&dev->sbd, PS3_BINDING_CPU_ANY,
&dev->irq);
if (error) {
dev_err(&dev->sbd.core,
"%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
__func__, __LINE__, error);
goto fail_close_device;
}
error = request_irq(dev->irq, handler, 0,
dev->sbd.core.driver->name, dev);
if (error) {
dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
__func__, __LINE__, error);
goto fail_sb_event_receive_port_destroy;
}
alignment = min(__ffs(dev->bounce_size),
__ffs((unsigned long)dev->bounce_buf));
if (alignment < 12) {
dev_err(&dev->sbd.core,
"%s:%u: bounce buffer not aligned (%lx at 0x%p)\n",
__func__, __LINE__, dev->bounce_size, dev->bounce_buf);
error = -EINVAL;
goto fail_free_irq;
} else if (alignment < 16)
page_size = PS3_DMA_4K;
else
page_size = PS3_DMA_64K;
dev->sbd.d_region = &dev->dma_region;
ps3_dma_region_init(&dev->sbd, &dev->dma_region, page_size,
PS3_DMA_OTHER, dev->bounce_buf, dev->bounce_size);
res = ps3_dma_region_create(&dev->dma_region);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: cannot create DMA region\n",
__func__, __LINE__);
error = -ENOMEM;
goto fail_free_irq;
}
dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
dev->bounce_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) {
dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
__func__, __LINE__);
error = -ENODEV;
goto fail_free_dma;
}
error = ps3stor_probe_access(dev);
if (error) {
dev_err(&dev->sbd.core, "%s:%u: No accessible regions found\n",
__func__, __LINE__);
goto fail_unmap_dma;
}
return 0;
fail_unmap_dma:
dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
DMA_BIDIRECTIONAL);
fail_free_dma:
ps3_dma_region_free(&dev->dma_region);
fail_free_irq:
free_irq(dev->irq, dev);
fail_sb_event_receive_port_destroy:
ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
fail_close_device:
ps3stor_close_hv_device(&dev->sbd);
fail:
return error;
}
EXPORT_SYMBOL_GPL(ps3stor_setup);
/**
* ps3stor_teardown - Tear down a storage device after use
* @dev: Pointer to a struct ps3_storage_device
*/
void ps3stor_teardown(struct ps3_storage_device *dev)
{
int error;
dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
DMA_BIDIRECTIONAL);
ps3_dma_region_free(&dev->dma_region);
free_irq(dev->irq, dev);
error = ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
if (error)
dev_err(&dev->sbd.core,
"%s:%u: destroy event receive port failed %d\n",
__func__, __LINE__, error);
error = ps3stor_close_hv_device(&dev->sbd);
if (error)
dev_err(&dev->sbd.core,
"%s:%u: ps3_close_hv_device failed %d\n", __func__,
__LINE__, error);
}
EXPORT_SYMBOL_GPL(ps3stor_teardown);
/**
* ps3stor_read_write_sectors - read/write from/to a storage device
* @dev: Pointer to a struct ps3_storage_device
* @lpar: HV logical partition address
* @start_sector: First sector to read/write
* @sectors: Number of sectors to read/write
* @write: Flag indicating write (non-zero) or read (zero)
*
* Returns 0 for success, -1 in case of failure to submit the command, or
* an LV1 status value in case of other errors
*/
u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
u64 start_sector, u64 sectors, int write)
{
unsigned int region_id = dev->regions[dev->region_idx].id;
const char *op = write ? "write" : "read";
int res;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
init_completion(&dev->done);
res = write ? lv1_storage_write(dev->sbd.dev_id, region_id,
start_sector, sectors, 0, lpar,
&dev->tag)
: lv1_storage_read(dev->sbd.dev_id, region_id,
start_sector, sectors, 0, lpar,
&dev->tag);
if (res) {
dev_dbg(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
return -1;
}
wait_for_completion(&dev->done);
if (dev->lv1_status) {
dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
__LINE__, op, dev->lv1_status);
return dev->lv1_status;
}
dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, __LINE__,
op);
return 0;
}
EXPORT_SYMBOL_GPL(ps3stor_read_write_sectors);
/**
* ps3stor_send_command - send a device command to a storage device
* @dev: Pointer to a struct ps3_storage_device
* @cmd: Command number
* @arg1: First command argument
* @arg2: Second command argument
* @arg3: Third command argument
* @arg4: Fourth command argument
*
* Returns 0 for success, -1 in case of failure to submit the command, or
* an LV1 status value in case of other errors
*/
u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd, u64 arg1,
u64 arg2, u64 arg3, u64 arg4)
{
int res;
dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%llx\n", __func__,
__LINE__, cmd);
init_completion(&dev->done);
res = lv1_storage_send_device_command(dev->sbd.dev_id, cmd, arg1,
arg2, arg3, arg4, &dev->tag);
if (res) {
dev_err(&dev->sbd.core,
"%s:%u: send_device_command 0x%llx failed %d\n",
__func__, __LINE__, cmd, res);
return -1;
}
wait_for_completion(&dev->done);
if (dev->lv1_status) {
dev_dbg(&dev->sbd.core, "%s:%u: command 0x%llx failed 0x%llx\n",
__func__, __LINE__, cmd, dev->lv1_status);
return dev->lv1_status;
}
dev_dbg(&dev->sbd.core, "%s:%u: command 0x%llx completed\n", __func__,
__LINE__, cmd);
return 0;
}
EXPORT_SYMBOL_GPL(ps3stor_send_command);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 Storage Bus Library");
MODULE_AUTHOR("Sony Corporation");
|
linux-master
|
drivers/ps3/ps3stor_lib.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006, 2007 Sony Corporation
*
* AV backend support for PS3
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/ps3av.h>
#include <asm/ps3.h>
#include <asm/ps3gpu.h>
#include "vuart.h"
static const struct video_fmt {
u32 format;
u32 order;
} ps3av_video_fmt_table[] = {
{ PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT, PS3AV_CMD_VIDEO_ORDER_RGB },
{ PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT, PS3AV_CMD_VIDEO_ORDER_BGR },
};
static const struct {
int cs;
u32 av;
u32 bl;
} ps3av_cs_video2av_table[] = {
{
.cs = PS3AV_CMD_VIDEO_CS_RGB_8,
.av = PS3AV_CMD_AV_CS_RGB_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_RGB_10,
.av = PS3AV_CMD_AV_CS_RGB_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_RGB_12,
.av = PS3AV_CMD_AV_CS_RGB_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV444_8,
.av = PS3AV_CMD_AV_CS_YUV444_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV444_10,
.av = PS3AV_CMD_AV_CS_YUV444_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV444_12,
.av = PS3AV_CMD_AV_CS_YUV444_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV422_8,
.av = PS3AV_CMD_AV_CS_YUV422_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV422_10,
.av = PS3AV_CMD_AV_CS_YUV422_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV422_12,
.av = PS3AV_CMD_AV_CS_YUV422_8,
.bl = PS3AV_CMD_AV_CS_12
}, {
.cs = PS3AV_CMD_VIDEO_CS_XVYCC_8,
.av = PS3AV_CMD_AV_CS_XVYCC_8,
.bl = PS3AV_CMD_AV_CS_12
}, {
.cs = PS3AV_CMD_VIDEO_CS_XVYCC_10,
.av = PS3AV_CMD_AV_CS_XVYCC_8,
.bl = PS3AV_CMD_AV_CS_12
}, {
.cs = PS3AV_CMD_VIDEO_CS_XVYCC_12,
.av = PS3AV_CMD_AV_CS_XVYCC_8,
.bl = PS3AV_CMD_AV_CS_12
}
};
static u32 ps3av_cs_video2av(int cs)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_cs_video2av_table); i++)
if (ps3av_cs_video2av_table[i].cs == cs)
return ps3av_cs_video2av_table[i].av;
return PS3AV_CMD_AV_CS_RGB_8;
}
static u32 ps3av_cs_video2av_bitlen(int cs)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_cs_video2av_table); i++)
if (ps3av_cs_video2av_table[i].cs == cs)
return ps3av_cs_video2av_table[i].bl;
return PS3AV_CMD_AV_CS_8;
}
static const struct {
int vid;
u32 av;
} ps3av_vid_video2av_table[] = {
{ PS3AV_CMD_VIDEO_VID_480I, PS3AV_CMD_AV_VID_480I },
{ PS3AV_CMD_VIDEO_VID_480P, PS3AV_CMD_AV_VID_480P },
{ PS3AV_CMD_VIDEO_VID_576I, PS3AV_CMD_AV_VID_576I },
{ PS3AV_CMD_VIDEO_VID_576P, PS3AV_CMD_AV_VID_576P },
{ PS3AV_CMD_VIDEO_VID_1080I_60HZ, PS3AV_CMD_AV_VID_1080I_60HZ },
{ PS3AV_CMD_VIDEO_VID_720P_60HZ, PS3AV_CMD_AV_VID_720P_60HZ },
{ PS3AV_CMD_VIDEO_VID_1080P_60HZ, PS3AV_CMD_AV_VID_1080P_60HZ },
{ PS3AV_CMD_VIDEO_VID_1080I_50HZ, PS3AV_CMD_AV_VID_1080I_50HZ },
{ PS3AV_CMD_VIDEO_VID_720P_50HZ, PS3AV_CMD_AV_VID_720P_50HZ },
{ PS3AV_CMD_VIDEO_VID_1080P_50HZ, PS3AV_CMD_AV_VID_1080P_50HZ },
{ PS3AV_CMD_VIDEO_VID_WXGA, PS3AV_CMD_AV_VID_WXGA },
{ PS3AV_CMD_VIDEO_VID_SXGA, PS3AV_CMD_AV_VID_SXGA },
{ PS3AV_CMD_VIDEO_VID_WUXGA, PS3AV_CMD_AV_VID_WUXGA }
};
static u32 ps3av_vid_video2av(int vid)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_vid_video2av_table); i++)
if (ps3av_vid_video2av_table[i].vid == vid)
return ps3av_vid_video2av_table[i].av;
return PS3AV_CMD_AV_VID_480P;
}
static int ps3av_hdmi_range(void)
{
if (ps3_compare_firmware_version(1, 8, 0) < 0)
return 0;
else
return 1; /* supported */
}
int ps3av_cmd_init(void)
{
int res;
struct ps3av_pkt_av_init av_init;
struct ps3av_pkt_video_init video_init;
struct ps3av_pkt_audio_init audio_init;
/* video init */
memset(&video_init, 0, sizeof(video_init));
res = ps3av_do_pkt(PS3AV_CID_VIDEO_INIT, sizeof(video_init.send_hdr),
sizeof(video_init), &video_init.send_hdr);
if (res < 0)
return res;
res = get_status(&video_init);
if (res) {
printk(KERN_ERR "PS3AV_CID_VIDEO_INIT: failed %x\n", res);
return res;
}
/* audio init */
memset(&audio_init, 0, sizeof(audio_init));
res = ps3av_do_pkt(PS3AV_CID_AUDIO_INIT, sizeof(audio_init.send_hdr),
sizeof(audio_init), &audio_init.send_hdr);
if (res < 0)
return res;
res = get_status(&audio_init);
if (res) {
printk(KERN_ERR "PS3AV_CID_AUDIO_INIT: failed %x\n", res);
return res;
}
/* av init */
memset(&av_init, 0, sizeof(av_init));
av_init.event_bit = 0;
res = ps3av_do_pkt(PS3AV_CID_AV_INIT, sizeof(av_init), sizeof(av_init),
&av_init.send_hdr);
if (res < 0)
return res;
res = get_status(&av_init);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_INIT: failed %x\n", res);
return res;
}
int ps3av_cmd_fin(void)
{
int res;
struct ps3av_pkt_av_fin av_fin;
memset(&av_fin, 0, sizeof(av_fin));
res = ps3av_do_pkt(PS3AV_CID_AV_FIN, sizeof(av_fin.send_hdr),
sizeof(av_fin), &av_fin.send_hdr);
if (res < 0)
return res;
res = get_status(&av_fin);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_FIN: failed %x\n", res);
return res;
}
int ps3av_cmd_av_video_mute(int num_of_port, u32 *port, u32 mute)
{
int i, send_len, res;
struct ps3av_pkt_av_video_mute av_video_mute;
if (num_of_port > PS3AV_MUTE_PORT_MAX)
return -EINVAL;
memset(&av_video_mute, 0, sizeof(av_video_mute));
for (i = 0; i < num_of_port; i++) {
av_video_mute.mute[i].avport = port[i];
av_video_mute.mute[i].mute = mute;
}
send_len = sizeof(av_video_mute.send_hdr) +
sizeof(struct ps3av_av_mute) * num_of_port;
res = ps3av_do_pkt(PS3AV_CID_AV_VIDEO_MUTE, send_len,
sizeof(av_video_mute), &av_video_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&av_video_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_VIDEO_MUTE: failed %x\n", res);
return res;
}
int ps3av_cmd_av_video_disable_sig(u32 port)
{
int res;
struct ps3av_pkt_av_video_disable_sig av_video_sig;
memset(&av_video_sig, 0, sizeof(av_video_sig));
av_video_sig.avport = port;
res = ps3av_do_pkt(PS3AV_CID_AV_VIDEO_DISABLE_SIG,
sizeof(av_video_sig), sizeof(av_video_sig),
&av_video_sig.send_hdr);
if (res < 0)
return res;
res = get_status(&av_video_sig);
if (res)
printk(KERN_ERR
"PS3AV_CID_AV_VIDEO_DISABLE_SIG: failed %x port:%x\n",
res, port);
return res;
}
int ps3av_cmd_av_tv_mute(u32 avport, u32 mute)
{
int res;
struct ps3av_pkt_av_tv_mute tv_mute;
memset(&tv_mute, 0, sizeof(tv_mute));
tv_mute.avport = avport;
tv_mute.mute = mute;
res = ps3av_do_pkt(PS3AV_CID_AV_TV_MUTE, sizeof(tv_mute),
sizeof(tv_mute), &tv_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&tv_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_TV_MUTE: failed %x port:%x\n",
res, avport);
return res;
}
int ps3av_cmd_enable_event(void)
{
int res;
struct ps3av_pkt_av_event av_event;
memset(&av_event, 0, sizeof(av_event));
av_event.event_bit = PS3AV_CMD_EVENT_BIT_UNPLUGGED |
PS3AV_CMD_EVENT_BIT_PLUGGED | PS3AV_CMD_EVENT_BIT_HDCP_DONE;
res = ps3av_do_pkt(PS3AV_CID_AV_ENABLE_EVENT, sizeof(av_event),
sizeof(av_event), &av_event.send_hdr);
if (res < 0)
return res;
res = get_status(&av_event);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_ENABLE_EVENT: failed %x\n", res);
return res;
}
int ps3av_cmd_av_hdmi_mode(u8 mode)
{
int res;
struct ps3av_pkt_av_hdmi_mode hdmi_mode;
memset(&hdmi_mode, 0, sizeof(hdmi_mode));
hdmi_mode.mode = mode;
res = ps3av_do_pkt(PS3AV_CID_AV_HDMI_MODE, sizeof(hdmi_mode),
sizeof(hdmi_mode), &hdmi_mode.send_hdr);
if (res < 0)
return res;
res = get_status(&hdmi_mode);
if (res && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
printk(KERN_ERR "PS3AV_CID_AV_HDMI_MODE: failed %x\n", res);
return res;
}
u32 ps3av_cmd_set_av_video_cs(void *p, u32 avport, int video_vid, int cs_out,
int aspect, u32 id)
{
struct ps3av_pkt_av_video_cs *av_video_cs;
av_video_cs = (struct ps3av_pkt_av_video_cs *)p;
if (video_vid == -1)
video_vid = PS3AV_CMD_VIDEO_VID_720P_60HZ;
if (cs_out == -1)
cs_out = PS3AV_CMD_VIDEO_CS_YUV444_8;
if (aspect == -1)
aspect = 0;
memset(av_video_cs, 0, sizeof(*av_video_cs));
ps3av_set_hdr(PS3AV_CID_AV_VIDEO_CS, sizeof(*av_video_cs),
&av_video_cs->send_hdr);
av_video_cs->avport = avport;
/* should be same as video_mode.resolution */
av_video_cs->av_vid = ps3av_vid_video2av(video_vid);
av_video_cs->av_cs_out = ps3av_cs_video2av(cs_out);
/* should be same as video_mode.video_cs_out */
av_video_cs->av_cs_in = ps3av_cs_video2av(PS3AV_CMD_VIDEO_CS_RGB_8);
av_video_cs->bitlen_out = ps3av_cs_video2av_bitlen(cs_out);
if ((id & PS3AV_MODE_WHITE) && ps3av_hdmi_range())
av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_ON;
else /* default off */
av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_OFF;
av_video_cs->aspect = aspect;
if (id & PS3AV_MODE_DITHER) {
av_video_cs->dither = PS3AV_CMD_AV_DITHER_ON
| PS3AV_CMD_AV_DITHER_8BIT;
} else {
/* default off */
av_video_cs->dither = PS3AV_CMD_AV_DITHER_OFF;
}
return sizeof(*av_video_cs);
}
u32 ps3av_cmd_set_video_mode(void *p, u32 head, int video_vid, int video_fmt,
u32 id)
{
struct ps3av_pkt_video_mode *video_mode;
u32 x, y;
video_mode = (struct ps3av_pkt_video_mode *)p;
if (video_vid == -1)
video_vid = PS3AV_CMD_VIDEO_VID_720P_60HZ;
if (video_fmt == -1)
video_fmt = PS3AV_CMD_VIDEO_FMT_X8R8G8B8;
if (ps3av_video_mode2res(id, &x, &y))
return 0;
/* video mode */
memset(video_mode, 0, sizeof(*video_mode));
ps3av_set_hdr(PS3AV_CID_VIDEO_MODE, sizeof(*video_mode),
&video_mode->send_hdr);
video_mode->video_head = head;
if (video_vid == PS3AV_CMD_VIDEO_VID_480I
&& head == PS3AV_CMD_VIDEO_HEAD_B)
video_mode->video_vid = PS3AV_CMD_VIDEO_VID_480I_A;
else
video_mode->video_vid = video_vid;
video_mode->width = (u16) x;
video_mode->height = (u16) y;
video_mode->pitch = video_mode->width * 4; /* line_length */
video_mode->video_out_format = PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT;
video_mode->video_format = ps3av_video_fmt_table[video_fmt].format;
if ((id & PS3AV_MODE_COLOR) && ps3av_hdmi_range())
video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT;
else /* default enable */
video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT;
video_mode->video_order = ps3av_video_fmt_table[video_fmt].order;
pr_debug("%s: video_mode:vid:%x width:%d height:%d pitch:%d out_format:%d format:%x order:%x\n",
__func__, video_vid, video_mode->width, video_mode->height,
video_mode->pitch, video_mode->video_out_format,
video_mode->video_format, video_mode->video_order);
return sizeof(*video_mode);
}
int ps3av_cmd_video_format_black(u32 head, u32 video_fmt, u32 mute)
{
int res;
struct ps3av_pkt_video_format video_format;
memset(&video_format, 0, sizeof(video_format));
video_format.video_head = head;
if (mute != PS3AV_CMD_MUTE_OFF)
video_format.video_format = PS3AV_CMD_VIDEO_FORMAT_BLACK;
else
video_format.video_format =
ps3av_video_fmt_table[video_fmt].format;
video_format.video_order = ps3av_video_fmt_table[video_fmt].order;
res = ps3av_do_pkt(PS3AV_CID_VIDEO_FORMAT, sizeof(video_format),
sizeof(video_format), &video_format.send_hdr);
if (res < 0)
return res;
res = get_status(&video_format);
if (res)
printk(KERN_ERR "PS3AV_CID_VIDEO_FORMAT: failed %x\n", res);
return res;
}
int ps3av_cmd_av_audio_mute(int num_of_port, u32 *port, u32 mute)
{
int i, res;
struct ps3av_pkt_av_audio_mute av_audio_mute;
if (num_of_port > PS3AV_MUTE_PORT_MAX)
return -EINVAL;
/* audio mute */
memset(&av_audio_mute, 0, sizeof(av_audio_mute));
for (i = 0; i < num_of_port; i++) {
av_audio_mute.mute[i].avport = port[i];
av_audio_mute.mute[i].mute = mute;
}
res = ps3av_do_pkt(PS3AV_CID_AV_AUDIO_MUTE,
sizeof(av_audio_mute.send_hdr) +
sizeof(struct ps3av_av_mute) * num_of_port,
sizeof(av_audio_mute), &av_audio_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&av_audio_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_AUDIO_MUTE: failed %x\n", res);
return res;
}
static const struct {
u32 fs;
u8 mclk;
} ps3av_cnv_mclk_table[] = {
{ PS3AV_CMD_AUDIO_FS_44K, PS3AV_CMD_AV_MCLK_512 },
{ PS3AV_CMD_AUDIO_FS_48K, PS3AV_CMD_AV_MCLK_512 },
{ PS3AV_CMD_AUDIO_FS_88K, PS3AV_CMD_AV_MCLK_256 },
{ PS3AV_CMD_AUDIO_FS_96K, PS3AV_CMD_AV_MCLK_256 },
{ PS3AV_CMD_AUDIO_FS_176K, PS3AV_CMD_AV_MCLK_128 },
{ PS3AV_CMD_AUDIO_FS_192K, PS3AV_CMD_AV_MCLK_128 }
};
static u8 ps3av_cnv_mclk(u32 fs)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_cnv_mclk_table); i++)
if (ps3av_cnv_mclk_table[i].fs == fs)
return ps3av_cnv_mclk_table[i].mclk;
printk(KERN_ERR "%s failed, fs:%x\n", __func__, fs);
return 0;
}
#define BASE PS3AV_CMD_AUDIO_FS_44K
static const u32 ps3av_ns_table[][5] = {
/* D1, D2, D3, D4, D5 */
[PS3AV_CMD_AUDIO_FS_44K-BASE] = { 6272, 6272, 17836, 17836, 8918 },
[PS3AV_CMD_AUDIO_FS_48K-BASE] = { 6144, 6144, 11648, 11648, 5824 },
[PS3AV_CMD_AUDIO_FS_88K-BASE] = { 12544, 12544, 35672, 35672, 17836 },
[PS3AV_CMD_AUDIO_FS_96K-BASE] = { 12288, 12288, 23296, 23296, 11648 },
[PS3AV_CMD_AUDIO_FS_176K-BASE] = { 25088, 25088, 71344, 71344, 35672 },
[PS3AV_CMD_AUDIO_FS_192K-BASE] = { 24576, 24576, 46592, 46592, 23296 }
};
static void ps3av_cnv_ns(u8 *ns, u32 fs, u32 video_vid)
{
u32 av_vid, ns_val;
int d;
d = ns_val = 0;
av_vid = ps3av_vid_video2av(video_vid);
switch (av_vid) {
case PS3AV_CMD_AV_VID_480I:
case PS3AV_CMD_AV_VID_576I:
d = 0;
break;
case PS3AV_CMD_AV_VID_480P:
case PS3AV_CMD_AV_VID_576P:
d = 1;
break;
case PS3AV_CMD_AV_VID_1080I_60HZ:
case PS3AV_CMD_AV_VID_1080I_50HZ:
d = 2;
break;
case PS3AV_CMD_AV_VID_720P_60HZ:
case PS3AV_CMD_AV_VID_720P_50HZ:
d = 3;
break;
case PS3AV_CMD_AV_VID_1080P_60HZ:
case PS3AV_CMD_AV_VID_1080P_50HZ:
case PS3AV_CMD_AV_VID_WXGA:
case PS3AV_CMD_AV_VID_SXGA:
case PS3AV_CMD_AV_VID_WUXGA:
d = 4;
break;
default:
printk(KERN_ERR "%s failed, vid:%x\n", __func__, video_vid);
break;
}
if (fs < PS3AV_CMD_AUDIO_FS_44K || fs > PS3AV_CMD_AUDIO_FS_192K)
printk(KERN_ERR "%s failed, fs:%x\n", __func__, fs);
else
ns_val = ps3av_ns_table[PS3AV_CMD_AUDIO_FS_44K-BASE][d];
*ns++ = ns_val & 0x000000FF;
*ns++ = (ns_val & 0x0000FF00) >> 8;
*ns = (ns_val & 0x00FF0000) >> 16;
}
#undef BASE
static u8 ps3av_cnv_enable(u32 source, const u8 *enable)
{
u8 ret = 0;
if (source == PS3AV_CMD_AUDIO_SOURCE_SPDIF) {
ret = 0x03;
} else if (source == PS3AV_CMD_AUDIO_SOURCE_SERIAL) {
ret = ((enable[0] << 4) + (enable[1] << 5) + (enable[2] << 6) +
(enable[3] << 7)) | 0x01;
} else
printk(KERN_ERR "%s failed, source:%x\n", __func__, source);
return ret;
}
static u8 ps3av_cnv_fifomap(const u8 *map)
{
u8 ret = 0;
ret = map[0] + (map[1] << 2) + (map[2] << 4) + (map[3] << 6);
return ret;
}
static u8 ps3av_cnv_inputlen(u32 word_bits)
{
u8 ret = 0;
switch (word_bits) {
case PS3AV_CMD_AUDIO_WORD_BITS_16:
ret = PS3AV_CMD_AV_INPUTLEN_16;
break;
case PS3AV_CMD_AUDIO_WORD_BITS_20:
ret = PS3AV_CMD_AV_INPUTLEN_20;
break;
case PS3AV_CMD_AUDIO_WORD_BITS_24:
ret = PS3AV_CMD_AV_INPUTLEN_24;
break;
default:
printk(KERN_ERR "%s failed, word_bits:%x\n", __func__,
word_bits);
break;
}
return ret;
}
static u8 ps3av_cnv_layout(u32 num_of_ch)
{
if (num_of_ch > PS3AV_CMD_AUDIO_NUM_OF_CH_8) {
printk(KERN_ERR "%s failed, num_of_ch:%x\n", __func__,
num_of_ch);
return 0;
}
return num_of_ch == PS3AV_CMD_AUDIO_NUM_OF_CH_2 ? 0x0 : 0x1;
}
static void ps3av_cnv_info(struct ps3av_audio_info_frame *info,
const struct ps3av_pkt_audio_mode *mode)
{
info->pb1.cc = mode->audio_num_of_ch + 1; /* CH2:0x01 --- CH8:0x07 */
info->pb1.ct = 0;
info->pb2.sf = 0;
info->pb2.ss = 0;
info->pb3 = 0; /* check mode->audio_format ?? */
info->pb4 = mode->audio_layout;
info->pb5.dm = mode->audio_downmix;
info->pb5.lsv = mode->audio_downmix_level;
}
static void ps3av_cnv_chstat(u8 *chstat, const u8 *cs_info)
{
memcpy(chstat, cs_info, 5);
}
u32 ps3av_cmd_set_av_audio_param(void *p, u32 port,
const struct ps3av_pkt_audio_mode *audio_mode,
u32 video_vid)
{
struct ps3av_pkt_av_audio_param *param;
param = (struct ps3av_pkt_av_audio_param *)p;
memset(param, 0, sizeof(*param));
ps3av_set_hdr(PS3AV_CID_AV_AUDIO_PARAM, sizeof(*param),
¶m->send_hdr);
param->avport = port;
param->mclk = ps3av_cnv_mclk(audio_mode->audio_fs) | 0x80;
ps3av_cnv_ns(param->ns, audio_mode->audio_fs, video_vid);
param->enable = ps3av_cnv_enable(audio_mode->audio_source,
audio_mode->audio_enable);
param->swaplr = 0x09;
param->fifomap = ps3av_cnv_fifomap(audio_mode->audio_map);
param->inputctrl = 0x49;
param->inputlen = ps3av_cnv_inputlen(audio_mode->audio_word_bits);
param->layout = ps3av_cnv_layout(audio_mode->audio_num_of_ch);
ps3av_cnv_info(¶m->info, audio_mode);
ps3av_cnv_chstat(param->chstat, audio_mode->audio_cs_info);
return sizeof(*param);
}
/* default cs val */
u8 ps3av_mode_cs_info[] = {
0x00, 0x09, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00
};
EXPORT_SYMBOL_GPL(ps3av_mode_cs_info);
#define CS_44 0x00
#define CS_48 0x02
#define CS_88 0x08
#define CS_96 0x0a
#define CS_176 0x0c
#define CS_192 0x0e
#define CS_MASK 0x0f
#define CS_BIT 0x40
void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *audio, u32 avport,
u32 ch, u32 fs, u32 word_bits, u32 format,
u32 source)
{
int spdif_through;
int i;
if (!(ch | fs | format | word_bits | source)) {
ch = PS3AV_CMD_AUDIO_NUM_OF_CH_2;
fs = PS3AV_CMD_AUDIO_FS_48K;
word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16;
format = PS3AV_CMD_AUDIO_FORMAT_PCM;
source = PS3AV_CMD_AUDIO_SOURCE_SERIAL;
}
/* audio mode */
memset(audio, 0, sizeof(*audio));
ps3av_set_hdr(PS3AV_CID_AUDIO_MODE, sizeof(*audio), &audio->send_hdr);
audio->avport = (u8) avport;
audio->mask = 0x0FFF; /* XXX set all */
audio->audio_num_of_ch = ch;
audio->audio_fs = fs;
audio->audio_word_bits = word_bits;
audio->audio_format = format;
audio->audio_source = source;
switch (ch) {
case PS3AV_CMD_AUDIO_NUM_OF_CH_8:
audio->audio_enable[3] = 1;
fallthrough;
case PS3AV_CMD_AUDIO_NUM_OF_CH_6:
audio->audio_enable[2] = 1;
audio->audio_enable[1] = 1;
fallthrough;
case PS3AV_CMD_AUDIO_NUM_OF_CH_2:
default:
audio->audio_enable[0] = 1;
}
/* audio swap L/R */
for (i = 0; i < 4; i++)
audio->audio_swap[i] = PS3AV_CMD_AUDIO_SWAP_0; /* no swap */
/* audio serial input mapping */
audio->audio_map[0] = PS3AV_CMD_AUDIO_MAP_OUTPUT_0;
audio->audio_map[1] = PS3AV_CMD_AUDIO_MAP_OUTPUT_1;
audio->audio_map[2] = PS3AV_CMD_AUDIO_MAP_OUTPUT_2;
audio->audio_map[3] = PS3AV_CMD_AUDIO_MAP_OUTPUT_3;
/* audio speaker layout */
if (avport == PS3AV_CMD_AVPORT_HDMI_0 ||
avport == PS3AV_CMD_AVPORT_HDMI_1) {
switch (ch) {
case PS3AV_CMD_AUDIO_NUM_OF_CH_8:
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_8CH;
break;
case PS3AV_CMD_AUDIO_NUM_OF_CH_6:
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_6CH;
break;
case PS3AV_CMD_AUDIO_NUM_OF_CH_2:
default:
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_2CH;
break;
}
} else {
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_2CH;
}
/* audio downmix permission */
audio->audio_downmix = PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED;
/* audio downmix level shift (0:0dB to 15:15dB) */
audio->audio_downmix_level = 0; /* 0dB */
/* set ch status */
for (i = 0; i < 8; i++)
audio->audio_cs_info[i] = ps3av_mode_cs_info[i];
switch (fs) {
case PS3AV_CMD_AUDIO_FS_44K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_44;
break;
case PS3AV_CMD_AUDIO_FS_88K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_88;
break;
case PS3AV_CMD_AUDIO_FS_96K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_96;
break;
case PS3AV_CMD_AUDIO_FS_176K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_176;
break;
case PS3AV_CMD_AUDIO_FS_192K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_192;
break;
default:
break;
}
/* non-audio bit */
spdif_through = audio->audio_cs_info[0] & 0x02;
/* pass through setting */
if (spdif_through &&
(avport == PS3AV_CMD_AVPORT_SPDIF_0 ||
avport == PS3AV_CMD_AVPORT_SPDIF_1 ||
avport == PS3AV_CMD_AVPORT_HDMI_0 ||
avport == PS3AV_CMD_AVPORT_HDMI_1)) {
audio->audio_word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16;
audio->audio_format = PS3AV_CMD_AUDIO_FORMAT_BITSTREAM;
}
}
int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *audio_mode)
{
int res;
res = ps3av_do_pkt(PS3AV_CID_AUDIO_MODE, sizeof(*audio_mode),
sizeof(*audio_mode), &audio_mode->send_hdr);
if (res < 0)
return res;
res = get_status(audio_mode);
if (res)
printk(KERN_ERR "PS3AV_CID_AUDIO_MODE: failed %x\n", res);
return res;
}
int ps3av_cmd_audio_mute(int num_of_port, u32 *port, u32 mute)
{
int i, res;
struct ps3av_pkt_audio_mute audio_mute;
if (num_of_port > PS3AV_OPT_PORT_MAX)
return -EINVAL;
/* audio mute */
memset(&audio_mute, 0, sizeof(audio_mute));
for (i = 0; i < num_of_port; i++) {
audio_mute.mute[i].avport = port[i];
audio_mute.mute[i].mute = mute;
}
res = ps3av_do_pkt(PS3AV_CID_AUDIO_MUTE,
sizeof(audio_mute.send_hdr) +
sizeof(struct ps3av_audio_mute) * num_of_port,
sizeof(audio_mute), &audio_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&audio_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AUDIO_MUTE: failed %x\n", res);
return res;
}
int ps3av_cmd_audio_active(int active, u32 port)
{
int res;
struct ps3av_pkt_audio_active audio_active;
u32 cid;
/* audio active */
memset(&audio_active, 0, sizeof(audio_active));
audio_active.audio_port = port;
cid = active ? PS3AV_CID_AUDIO_ACTIVE : PS3AV_CID_AUDIO_INACTIVE;
res = ps3av_do_pkt(cid, sizeof(audio_active), sizeof(audio_active),
&audio_active.send_hdr);
if (res < 0)
return res;
res = get_status(&audio_active);
if (res)
printk(KERN_ERR "PS3AV_CID_AUDIO_ACTIVE:%x failed %x\n", cid,
res);
return res;
}
int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len)
{
int res;
mutex_lock(&ps3_gpu_mutex);
/* avb packet */
res = ps3av_do_pkt(PS3AV_CID_AVB_PARAM, send_len, sizeof(*avb),
&avb->send_hdr);
if (res < 0)
goto out;
res = get_status(avb);
if (res)
pr_debug("%s: PS3AV_CID_AVB_PARAM: failed %x\n", __func__,
res);
out:
mutex_unlock(&ps3_gpu_mutex);
return res;
}
int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *hw_conf)
{
int res;
memset(hw_conf, 0, sizeof(*hw_conf));
res = ps3av_do_pkt(PS3AV_CID_AV_GET_HW_CONF, sizeof(hw_conf->send_hdr),
sizeof(*hw_conf), &hw_conf->send_hdr);
if (res < 0)
return res;
res = get_status(hw_conf);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_GET_HW_CONF: failed %x\n", res);
return res;
}
int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *info,
u32 avport)
{
int res;
memset(info, 0, sizeof(*info));
info->avport = avport;
res = ps3av_do_pkt(PS3AV_CID_AV_GET_MONITOR_INFO,
sizeof(info->send_hdr) + sizeof(info->avport) +
sizeof(info->reserved),
sizeof(*info), &info->send_hdr);
if (res < 0)
return res;
res = get_status(info);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_GET_MONITOR_INFO: failed %x\n",
res);
return res;
}
#define PS3AV_AV_LAYOUT_0 (PS3AV_CMD_AV_LAYOUT_32 \
| PS3AV_CMD_AV_LAYOUT_44 \
| PS3AV_CMD_AV_LAYOUT_48)
#define PS3AV_AV_LAYOUT_1 (PS3AV_AV_LAYOUT_0 \
| PS3AV_CMD_AV_LAYOUT_88 \
| PS3AV_CMD_AV_LAYOUT_96 \
| PS3AV_CMD_AV_LAYOUT_176 \
| PS3AV_CMD_AV_LAYOUT_192)
|
linux-master
|
drivers/ps3/ps3av_cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 System Manager core.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
/**
* Staticly linked routines that allow late binding of a loaded sys-manager
* module.
*/
static struct ps3_sys_manager_ops ps3_sys_manager_ops;
/**
* ps3_register_sys_manager_ops - Bind ps3_sys_manager_ops to a module.
* @ops: struct ps3_sys_manager_ops.
*
* To be called from ps3_sys_manager_probe() and ps3_sys_manager_remove() to
* register call back ops for power control. Copies data to the static
* variable ps3_sys_manager_ops.
*/
void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
{
BUG_ON(!ops);
BUG_ON(!ops->dev);
ps3_sys_manager_ops = *ops;
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
void __noreturn ps3_sys_manager_power_off(void)
{
if (ps3_sys_manager_ops.power_off)
ps3_sys_manager_ops.power_off(ps3_sys_manager_ops.dev);
ps3_sys_manager_halt();
}
void __noreturn ps3_sys_manager_restart(void)
{
if (ps3_sys_manager_ops.restart)
ps3_sys_manager_ops.restart(ps3_sys_manager_ops.dev);
ps3_sys_manager_halt();
}
void __noreturn ps3_sys_manager_halt(void)
{
pr_emerg("System Halted, OK to turn off power\n");
local_irq_disable();
while (1)
lv1_pause(1);
}
|
linux-master
|
drivers/ps3/sys-manager-core.c
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Apple RTKit IPC library
* Copyright (C) The Asahi Linux Contributors
*/
#include "rtkit-internal.h"
enum {
APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */
APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
};
enum {
APPLE_RTKIT_EP_MGMT = 0,
APPLE_RTKIT_EP_CRASHLOG = 1,
APPLE_RTKIT_EP_SYSLOG = 2,
APPLE_RTKIT_EP_DEBUG = 3,
APPLE_RTKIT_EP_IOREPORT = 4,
APPLE_RTKIT_EP_OSLOG = 8,
};
#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52)
enum {
APPLE_RTKIT_MGMT_HELLO = 1,
APPLE_RTKIT_MGMT_HELLO_REPLY = 2,
APPLE_RTKIT_MGMT_STARTEP = 5,
APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6,
APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7,
APPLE_RTKIT_MGMT_EPMAP = 8,
APPLE_RTKIT_MGMT_EPMAP_REPLY = 8,
APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb,
APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb,
};
#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0)
#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16)
#define APPLE_RTKIT_MGMT_EPMAP_LAST BIT_ULL(51)
#define APPLE_RTKIT_MGMT_EPMAP_BASE GENMASK_ULL(34, 32)
#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0)
#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0)
#define APPLE_RTKIT_MGMT_STARTEP_EP GENMASK_ULL(39, 32)
#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1)
#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0)
#define APPLE_RTKIT_CRASHLOG_CRASH 1
#define APPLE_RTKIT_BUFFER_REQUEST 1
#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44)
#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(43, 0)
#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52)
#define APPLE_RTKIT_SYSLOG_LOG 5
#define APPLE_RTKIT_SYSLOG_INIT 8
#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0)
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
#define APPLE_RTKIT_OSLOG_INIT 1
#define APPLE_RTKIT_OSLOG_ACK 3
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
struct apple_rtkit_msg {
struct completion *completion;
struct apple_mbox_msg mbox_msg;
};
struct apple_rtkit_rx_work {
struct apple_rtkit *rtk;
u8 ep;
u64 msg;
struct work_struct work;
};
bool apple_rtkit_is_running(struct apple_rtkit *rtk)
{
if (rtk->crashed)
return false;
if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
return false;
if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
return false;
return true;
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_running);
bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
{
return rtk->crashed;
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
{
u64 reply;
int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg);
int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg);
int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver);
dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver);
if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) {
dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n",
min_ver);
goto abort_boot;
}
if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) {
dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n",
max_ver);
goto abort_boot;
}
dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n",
want_ver);
rtk->version = want_ver;
reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver);
reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply);
return;
abort_boot:
rtk->boot_result = -EINVAL;
complete_all(&rtk->epmap_completion);
}
static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg)
{
int i, ep;
u64 reply;
unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg);
u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg);
dev_dbg(rtk->dev,
"RTKit: received endpoint bitmap 0x%lx with base 0x%x\n",
bitmap, base);
for_each_set_bit(i, &bitmap, 32) {
ep = 32 * base + i;
dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep);
set_bit(ep, rtk->endpoints);
}
reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base);
if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST)
reply |= APPLE_RTKIT_MGMT_EPMAP_LAST;
else
reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE;
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply);
if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST))
return;
for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) {
switch (ep) {
/* the management endpoint is started by default */
case APPLE_RTKIT_EP_MGMT:
break;
/* without starting these RTKit refuses to boot */
case APPLE_RTKIT_EP_SYSLOG:
case APPLE_RTKIT_EP_CRASHLOG:
case APPLE_RTKIT_EP_DEBUG:
case APPLE_RTKIT_EP_IOREPORT:
case APPLE_RTKIT_EP_OSLOG:
dev_dbg(rtk->dev,
"RTKit: Starting system endpoint 0x%02x\n", ep);
apple_rtkit_start_ep(rtk, ep);
break;
default:
dev_warn(rtk->dev,
"RTKit: Unknown system endpoint: 0x%02x\n",
ep);
}
}
rtk->boot_result = 0;
complete_all(&rtk->epmap_completion);
}
static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk,
u64 msg)
{
unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n",
rtk->iop_power_state, new_state);
rtk->iop_power_state = new_state;
complete_all(&rtk->iop_pwr_ack_completion);
}
static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk,
u64 msg)
{
unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n",
rtk->ap_power_state, new_state);
rtk->ap_power_state = new_state;
complete_all(&rtk->ap_pwr_ack_completion);
}
static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg);
switch (type) {
case APPLE_RTKIT_MGMT_HELLO:
apple_rtkit_management_rx_hello(rtk, msg);
break;
case APPLE_RTKIT_MGMT_EPMAP:
apple_rtkit_management_rx_epmap(rtk, msg);
break;
case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK:
apple_rtkit_management_rx_iop_pwr_ack(rtk, msg);
break;
case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK:
apple_rtkit_management_rx_ap_pwr_ack(rtk, msg);
break;
default:
dev_warn(
rtk->dev,
"RTKit: unknown management message: 0x%llx (type: 0x%02x)\n",
msg, type);
}
}
static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
if (buffer->iova &&
(!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
err = -EINVAL;
goto error;
}
if (rtk->ops->shmem_setup) {
err = rtk->ops->shmem_setup(rtk->cookie, buffer);
if (err)
goto error;
} else {
buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size,
&buffer->iova, GFP_KERNEL);
if (!buffer->buffer) {
err = -ENOMEM;
goto error;
}
}
if (!buffer->is_mapped) {
reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
APPLE_RTKIT_BUFFER_REQUEST);
reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
buffer->iova);
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
return 0;
error:
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
buffer->size = 0;
buffer->is_mapped = false;
return err;
}
static void apple_rtkit_free_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *bfr)
{
if (bfr->size == 0)
return;
if (rtk->ops->shmem_destroy)
rtk->ops->shmem_destroy(rtk->cookie, bfr);
else if (bfr->buffer)
dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova);
bfr->buffer = NULL;
bfr->iomem = NULL;
bfr->iova = 0;
bfr->size = 0;
bfr->is_mapped = false;
}
static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst,
struct apple_rtkit_shmem *bfr, size_t offset,
size_t len)
{
if (bfr->iomem)
memcpy_fromio(dst, bfr->iomem + offset, len);
else
memcpy(dst, bfr->buffer + offset, len);
}
static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
u8 *bfr;
if (type != APPLE_RTKIT_CRASHLOG_CRASH) {
dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n",
msg);
return;
}
if (!rtk->crashlog_buffer.size) {
apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer,
APPLE_RTKIT_EP_CRASHLOG, msg);
return;
}
dev_err(rtk->dev, "RTKit: co-processor has crashed\n");
/*
* create a shadow copy here to make sure the co-processor isn't able
* to change the log while we're dumping it. this also ensures
* the buffer is in normal memory and not iomem for e.g. the SMC
*/
bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL);
if (bfr) {
apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
rtk->crashlog_buffer.size);
apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
kfree(bfr);
} else {
dev_err(rtk->dev,
"RTKit: Couldn't allocate crashlog shadow buffer\n");
}
rtk->crashed = true;
if (rtk->ops->crashed)
rtk->ops->crashed(rtk->cookie);
}
static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
switch (type) {
case APPLE_RTKIT_BUFFER_REQUEST:
apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer,
APPLE_RTKIT_EP_IOREPORT, msg);
break;
/* unknown, must be ACKed or the co-processor will hang */
case 0x8:
case 0xc:
apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg,
NULL, false);
break;
default:
dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n",
msg);
}
}
static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg)
{
rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg);
rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg);
rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL);
dev_dbg(rtk->dev,
"RTKit: syslog initialized: entries: %zd, msg_size: %zd\n",
rtk->syslog_n_entries, rtk->syslog_msg_size);
}
static bool should_crop_syslog_char(char c)
{
return c == '\n' || c == '\r' || c == ' ' || c == '\0';
}
static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
{
u8 idx = msg & 0xff;
char log_context[24];
size_t entry_size = 0x20 + rtk->syslog_msg_size;
int msglen;
if (!rtk->syslog_msg_buffer) {
dev_warn(
rtk->dev,
"RTKit: received syslog message but no syslog_msg_buffer\n");
goto done;
}
if (!rtk->syslog_buffer.size) {
dev_warn(
rtk->dev,
"RTKit: received syslog message but syslog_buffer.size is zero\n");
goto done;
}
if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) {
dev_warn(
rtk->dev,
"RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n");
goto done;
}
if (idx > rtk->syslog_n_entries) {
dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n",
idx);
goto done;
}
apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer,
idx * entry_size + 8, sizeof(log_context));
apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer,
idx * entry_size + 8 + sizeof(log_context),
rtk->syslog_msg_size);
log_context[sizeof(log_context) - 1] = 0;
msglen = rtk->syslog_msg_size - 1;
while (msglen > 0 &&
should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
msglen--;
rtk->syslog_msg_buffer[msglen] = 0;
dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context,
rtk->syslog_msg_buffer);
done:
apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false);
}
static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
switch (type) {
case APPLE_RTKIT_BUFFER_REQUEST:
apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer,
APPLE_RTKIT_EP_SYSLOG, msg);
break;
case APPLE_RTKIT_SYSLOG_INIT:
apple_rtkit_syslog_rx_init(rtk, msg);
break;
case APPLE_RTKIT_SYSLOG_LOG:
apple_rtkit_syslog_rx_log(rtk, msg);
break;
default:
dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n",
msg);
}
}
static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
{
u64 ack;
dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
}
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
case APPLE_RTKIT_OSLOG_INIT:
apple_rtkit_oslog_rx_init(rtk, msg);
break;
default:
dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
}
}
static void apple_rtkit_rx_work(struct work_struct *work)
{
struct apple_rtkit_rx_work *rtk_work =
container_of(work, struct apple_rtkit_rx_work, work);
struct apple_rtkit *rtk = rtk_work->rtk;
switch (rtk_work->ep) {
case APPLE_RTKIT_EP_MGMT:
apple_rtkit_management_rx(rtk, rtk_work->msg);
break;
case APPLE_RTKIT_EP_CRASHLOG:
apple_rtkit_crashlog_rx(rtk, rtk_work->msg);
break;
case APPLE_RTKIT_EP_SYSLOG:
apple_rtkit_syslog_rx(rtk, rtk_work->msg);
break;
case APPLE_RTKIT_EP_IOREPORT:
apple_rtkit_ioreport_rx(rtk, rtk_work->msg);
break;
case APPLE_RTKIT_EP_OSLOG:
apple_rtkit_oslog_rx(rtk, rtk_work->msg);
break;
case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff:
if (rtk->ops->recv_message)
rtk->ops->recv_message(rtk->cookie, rtk_work->ep,
rtk_work->msg);
else
dev_warn(
rtk->dev,
"Received unexpected message to EP%02d: %llx\n",
rtk_work->ep, rtk_work->msg);
break;
default:
dev_warn(rtk->dev,
"RTKit: message to unknown endpoint %02x: %llx\n",
rtk_work->ep, rtk_work->msg);
}
kfree(rtk_work);
}
static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
{
struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
struct apple_mbox_msg *msg = mssg;
struct apple_rtkit_rx_work *work;
u8 ep = msg->msg1;
/*
* The message was read from a MMIO FIFO and we have to make
* sure all reads from buffers sent with that message happen
* afterwards.
*/
dma_rmb();
if (!test_bit(ep, rtk->endpoints))
dev_warn(rtk->dev,
"RTKit: Message to undiscovered endpoint 0x%02x\n",
ep);
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
rtk->ops->recv_message_early &&
rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
return;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
work->rtk = rtk;
work->ep = ep;
work->msg = msg->msg0;
INIT_WORK(&work->work, apple_rtkit_rx_work);
queue_work(rtk->wq, &work->work);
}
static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
{
struct apple_rtkit_msg *msg =
container_of(mssg, struct apple_rtkit_msg, mbox_msg);
if (r == -ETIME)
return;
if (msg->completion)
complete(msg->completion);
kfree(msg);
}
int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
struct completion *completion, bool atomic)
{
struct apple_rtkit_msg *msg;
int ret;
gfp_t flags;
if (rtk->crashed)
return -EINVAL;
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
!apple_rtkit_is_running(rtk))
return -EINVAL;
if (atomic)
flags = GFP_ATOMIC;
else
flags = GFP_KERNEL;
msg = kzalloc(sizeof(*msg), flags);
if (!msg)
return -ENOMEM;
msg->mbox_msg.msg0 = message;
msg->mbox_msg.msg1 = ep;
msg->completion = completion;
/*
* The message will be sent with a MMIO write. We need the barrier
* here to ensure any previous writes to buffers are visible to the
* device before that MMIO write happens.
*/
dma_wmb();
ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
if (ret < 0) {
kfree(msg);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
unsigned long timeout, bool atomic)
{
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
long t;
ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
if (ret < 0)
return ret;
if (atomic) {
ret = mbox_flush(rtk->mbox_chan, timeout);
if (ret < 0)
return ret;
if (try_wait_for_completion(&completion))
return 0;
return -ETIME;
} else {
t = wait_for_completion_interruptible_timeout(
&completion, msecs_to_jiffies(timeout));
if (t < 0)
return t;
else if (t == 0)
return -ETIME;
return 0;
}
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
int apple_rtkit_poll(struct apple_rtkit *rtk)
{
return mbox_client_peek_data(rtk->mbox_chan);
}
EXPORT_SYMBOL_GPL(apple_rtkit_poll);
int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
{
u64 msg;
if (!test_bit(endpoint, rtk->endpoints))
return -EINVAL;
if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START &&
!apple_rtkit_is_running(rtk))
return -EINVAL;
msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint);
msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG;
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg);
return 0;
}
EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
{
if (rtk->mbox_name)
rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
rtk->mbox_name);
else
rtk->mbox_chan =
mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
if (IS_ERR(rtk->mbox_chan))
return PTR_ERR(rtk->mbox_chan);
return 0;
}
struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
const struct apple_rtkit_ops *ops)
{
struct apple_rtkit *rtk;
int ret;
if (!ops)
return ERR_PTR(-EINVAL);
rtk = kzalloc(sizeof(*rtk), GFP_KERNEL);
if (!rtk)
return ERR_PTR(-ENOMEM);
rtk->dev = dev;
rtk->cookie = cookie;
rtk->ops = ops;
init_completion(&rtk->epmap_completion);
init_completion(&rtk->iop_pwr_ack_completion);
init_completion(&rtk->ap_pwr_ack_completion);
bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
rtk->mbox_name = mbox_name;
rtk->mbox_idx = mbox_idx;
rtk->mbox_cl.dev = dev;
rtk->mbox_cl.tx_block = false;
rtk->mbox_cl.knows_txdone = false;
rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
goto free_rtk;
}
ret = apple_rtkit_request_mbox_chan(rtk);
if (ret)
goto destroy_wq;
return rtk;
destroy_wq:
destroy_workqueue(rtk->wq);
free_rtk:
kfree(rtk);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(apple_rtkit_init);
static int apple_rtkit_wait_for_completion(struct completion *c)
{
long t;
t = wait_for_completion_interruptible_timeout(c,
msecs_to_jiffies(1000));
if (t < 0)
return t;
else if (t == 0)
return -ETIME;
else
return 0;
}
int apple_rtkit_reinit(struct apple_rtkit *rtk)
{
/* make sure we don't handle any messages while reinitializing */
mbox_free_channel(rtk->mbox_chan);
flush_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
rtk->syslog_msg_buffer = NULL;
rtk->syslog_n_entries = 0;
rtk->syslog_msg_size = 0;
bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
reinit_completion(&rtk->epmap_completion);
reinit_completion(&rtk->iop_pwr_ack_completion);
reinit_completion(&rtk->ap_pwr_ack_completion);
rtk->crashed = false;
rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
return apple_rtkit_request_mbox_chan(rtk);
}
EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
unsigned int state)
{
u64 msg;
int ret;
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
msg);
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
return ret;
if (rtk->ap_power_state != state)
return -EINVAL;
return 0;
}
static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
unsigned int state)
{
u64 msg;
int ret;
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
msg);
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
return ret;
if (rtk->iop_power_state != state)
return -EINVAL;
return 0;
}
int apple_rtkit_boot(struct apple_rtkit *rtk)
{
int ret;
if (apple_rtkit_is_running(rtk))
return 0;
if (rtk->crashed)
return -EINVAL;
dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n");
ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion);
if (ret)
return ret;
if (rtk->boot_result)
return rtk->boot_result;
dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n");
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
return ret;
return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON);
}
EXPORT_SYMBOL_GPL(apple_rtkit_boot);
int apple_rtkit_shutdown(struct apple_rtkit *rtk)
{
int ret;
/* if OFF is used here the co-processor will not wake up again */
ret = apple_rtkit_set_ap_power_state(rtk,
APPLE_RTKIT_PWR_STATE_QUIESCED);
if (ret)
return ret;
ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP);
if (ret)
return ret;
return apple_rtkit_reinit(rtk);
}
EXPORT_SYMBOL_GPL(apple_rtkit_shutdown);
int apple_rtkit_idle(struct apple_rtkit *rtk)
{
int ret;
/* if OFF is used here the co-processor will not wake up again */
ret = apple_rtkit_set_ap_power_state(rtk,
APPLE_RTKIT_PWR_STATE_IDLE);
if (ret)
return ret;
ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_IDLE);
if (ret)
return ret;
rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_IDLE;
rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_IDLE;
return 0;
}
EXPORT_SYMBOL_GPL(apple_rtkit_idle);
int apple_rtkit_quiesce(struct apple_rtkit *rtk)
{
int ret;
ret = apple_rtkit_set_ap_power_state(rtk,
APPLE_RTKIT_PWR_STATE_QUIESCED);
if (ret)
return ret;
ret = apple_rtkit_set_iop_power_state(rtk,
APPLE_RTKIT_PWR_STATE_QUIESCED);
if (ret)
return ret;
ret = apple_rtkit_reinit(rtk);
if (ret)
return ret;
rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
return 0;
}
EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
reinit_completion(&rtk->iop_pwr_ack_completion);
/*
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
msg);
return apple_rtkit_boot(rtk);
}
EXPORT_SYMBOL_GPL(apple_rtkit_wake);
void apple_rtkit_free(struct apple_rtkit *rtk)
{
mbox_free_channel(rtk->mbox_chan);
destroy_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
kfree(rtk);
}
EXPORT_SYMBOL_GPL(apple_rtkit_free);
static void apple_rtkit_free_wrapper(void *data)
{
apple_rtkit_free(data);
}
struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
const struct apple_rtkit_ops *ops)
{
struct apple_rtkit *rtk;
int ret;
rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops);
if (IS_ERR(rtk))
return rtk;
ret = devm_add_action_or_reset(dev, apple_rtkit_free_wrapper, rtk);
if (ret)
return ERR_PTR(ret);
return rtk;
}
EXPORT_SYMBOL_GPL(devm_apple_rtkit_init);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Sven Peter <[email protected]>");
MODULE_DESCRIPTION("Apple RTKit driver");
|
linux-master
|
drivers/soc/apple/rtkit.c
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Apple SART device driver
* Copyright (C) The Asahi Linux Contributors
*
* Apple SART is a simple address filter for some DMA transactions.
* Regions of physical memory must be added to the SART's allow
* list before any DMA can target these. Unlike a proper
* IOMMU no remapping can be done and special support in the
* consumer driver is required since not all DMA transactions of
* a single device are subject to SART filtering.
*/
#include <linux/soc/apple/sart.h>
#include <linux/atomic.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#define APPLE_SART_MAX_ENTRIES 16
/* This is probably a bitfield but the exact meaning of each bit is unknown. */
#define APPLE_SART_FLAGS_ALLOW 0xff
/* SARTv2 registers */
#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
#define APPLE_SART2_CONFIG_FLAGS GENMASK(31, 24)
#define APPLE_SART2_CONFIG_SIZE GENMASK(23, 0)
#define APPLE_SART2_CONFIG_SIZE_SHIFT 12
#define APPLE_SART2_CONFIG_SIZE_MAX GENMASK(23, 0)
#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
#define APPLE_SART2_PADDR_SHIFT 12
/* SARTv3 registers */
#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx))
#define APPLE_SART3_PADDR_SHIFT 12
#define APPLE_SART3_SIZE(idx) (0x80 + 4 * (idx))
#define APPLE_SART3_SIZE_SHIFT 12
#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
struct apple_sart_ops {
void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size);
void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr_shifted, size_t size_shifted);
unsigned int size_shift;
unsigned int paddr_shift;
size_t size_max;
};
struct apple_sart {
struct device *dev;
void __iomem *regs;
const struct apple_sart_ops *ops;
unsigned long protected_entries;
unsigned long used_entries;
};
static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size)
{
u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index));
phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index));
size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg);
*flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg);
*size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT;
*paddr = paddr_ << APPLE_SART2_PADDR_SHIFT;
}
static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr_shifted, size_t size_shifted)
{
u32 cfg;
cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags);
cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted);
writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index));
writel(cfg, sart->regs + APPLE_SART2_CONFIG(index));
}
static struct apple_sart_ops sart_ops_v2 = {
.get_entry = sart2_get_entry,
.set_entry = sart2_set_entry,
.size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
.paddr_shift = APPLE_SART2_PADDR_SHIFT,
.size_max = APPLE_SART2_CONFIG_SIZE_MAX,
};
static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size)
{
phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index));
size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index));
*flags = readl(sart->regs + APPLE_SART3_CONFIG(index));
*size = size_ << APPLE_SART3_SIZE_SHIFT;
*paddr = paddr_ << APPLE_SART3_PADDR_SHIFT;
}
static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr_shifted, size_t size_shifted)
{
writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index));
writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index));
writel(flags, sart->regs + APPLE_SART3_CONFIG(index));
}
static struct apple_sart_ops sart_ops_v3 = {
.get_entry = sart3_get_entry,
.set_entry = sart3_set_entry,
.size_shift = APPLE_SART3_SIZE_SHIFT,
.paddr_shift = APPLE_SART3_PADDR_SHIFT,
.size_max = APPLE_SART3_SIZE_MAX,
};
static int apple_sart_probe(struct platform_device *pdev)
{
int i;
struct apple_sart *sart;
struct device *dev = &pdev->dev;
sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL);
if (!sart)
return -ENOMEM;
sart->dev = dev;
sart->ops = of_device_get_match_data(dev);
sart->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sart->regs))
return PTR_ERR(sart->regs);
for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
u8 flags;
size_t size;
phys_addr_t paddr;
sart->ops->get_entry(sart, i, &flags, &paddr, &size);
if (!flags)
continue;
dev_dbg(sart->dev,
"SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n",
i, flags, &paddr, size);
set_bit(i, &sart->protected_entries);
}
platform_set_drvdata(pdev, sart);
return 0;
}
static void apple_sart_put_device(void *dev)
{
put_device(dev);
}
struct apple_sart *devm_apple_sart_get(struct device *dev)
{
struct device_node *sart_node;
struct platform_device *sart_pdev;
struct apple_sart *sart;
int ret;
sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
if (!sart_node)
return ERR_PTR(-ENODEV);
sart_pdev = of_find_device_by_node(sart_node);
of_node_put(sart_node);
if (!sart_pdev)
return ERR_PTR(-ENODEV);
sart = dev_get_drvdata(&sart_pdev->dev);
if (!sart) {
put_device(&sart_pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
ret = devm_add_action_or_reset(dev, apple_sart_put_device,
&sart_pdev->dev);
if (ret)
return ERR_PTR(ret);
device_link_add(dev, &sart_pdev->dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
return sart;
}
EXPORT_SYMBOL_GPL(devm_apple_sart_get);
static int sart_set_entry(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr, size_t size)
{
if (size & ((1 << sart->ops->size_shift) - 1))
return -EINVAL;
if (paddr & ((1 << sart->ops->paddr_shift) - 1))
return -EINVAL;
paddr >>= sart->ops->size_shift;
size >>= sart->ops->paddr_shift;
if (size > sart->ops->size_max)
return -EINVAL;
sart->ops->set_entry(sart, index, flags, paddr, size);
return 0;
}
int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
size_t size)
{
int i, ret;
for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
if (test_bit(i, &sart->protected_entries))
continue;
if (test_and_set_bit(i, &sart->used_entries))
continue;
ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
size);
if (ret) {
dev_dbg(sart->dev,
"unable to set entry %d to [%pa, 0x%zx]\n",
i, &paddr, size);
clear_bit(i, &sart->used_entries);
return ret;
}
dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size,
i);
return 0;
}
dev_warn(sart->dev,
"no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n",
&paddr, size);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region);
int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
size_t size)
{
int i;
dev_dbg(sart->dev,
"will remove [paddr: %pa, size: 0x%zx] from allowed regions\n",
&paddr, size);
for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
u8 eflags;
size_t esize;
phys_addr_t epaddr;
if (test_bit(i, &sart->protected_entries))
continue;
sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize);
if (epaddr != paddr || esize != size)
continue;
sart->ops->set_entry(sart, i, 0, 0, 0);
clear_bit(i, &sart->used_entries);
dev_dbg(sart->dev, "cleared entry %d\n", i);
return 0;
}
dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n",
&paddr, size);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region);
static void apple_sart_shutdown(struct platform_device *pdev)
{
struct apple_sart *sart = dev_get_drvdata(&pdev->dev);
int i;
for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
if (test_bit(i, &sart->protected_entries))
continue;
sart->ops->set_entry(sart, i, 0, 0, 0);
}
}
static const struct of_device_id apple_sart_of_match[] = {
{
.compatible = "apple,t6000-sart",
.data = &sart_ops_v3,
},
{
.compatible = "apple,t8103-sart",
.data = &sart_ops_v2,
},
{}
};
MODULE_DEVICE_TABLE(of, apple_sart_of_match);
static struct platform_driver apple_sart_driver = {
.driver = {
.name = "apple-sart",
.of_match_table = apple_sart_of_match,
},
.probe = apple_sart_probe,
.shutdown = apple_sart_shutdown,
};
module_platform_driver(apple_sart_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Sven Peter <[email protected]>");
MODULE_DESCRIPTION("Apple SART driver");
|
linux-master
|
drivers/soc/apple/sart.c
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Apple RTKit IPC library
* Copyright (C) The Asahi Linux Contributors
*/
#include "rtkit-internal.h"
#define FOURCC(a, b, c, d) \
(((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E')
#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r')
#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r')
#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x')
#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm')
#define APPLE_RTKIT_CRASHLOG_REGS FOURCC('C', 'r', 'g', '8')
/* For COMPILE_TEST on non-ARM64 architectures */
#ifndef PSR_MODE_EL0t
#define PSR_MODE_EL0t 0x00000000
#define PSR_MODE_EL1t 0x00000004
#define PSR_MODE_EL1h 0x00000005
#define PSR_MODE_EL2t 0x00000008
#define PSR_MODE_EL2h 0x00000009
#define PSR_MODE_MASK 0x0000000f
#endif
struct apple_rtkit_crashlog_header {
u32 fourcc;
u32 version;
u32 size;
u32 flags;
u8 _unk[16];
};
static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20);
struct apple_rtkit_crashlog_mbox_entry {
u64 msg0;
u64 msg1;
u32 timestamp;
u8 _unk[4];
};
static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18);
struct apple_rtkit_crashlog_regs {
u32 unk_0;
u32 unk_4;
u64 regs[31];
u64 sp;
u64 pc;
u64 psr;
u64 cpacr;
u64 fpsr;
u64 fpcr;
u64 unk[64];
u64 far;
u64 unk_X;
u64 esr;
u64 unk_Z;
} __packed;
static_assert(sizeof(struct apple_rtkit_crashlog_regs) == 0x350);
static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr,
size_t size)
{
u32 idx;
u8 *ptr, *end;
memcpy(&idx, bfr, 4);
ptr = bfr + 4;
end = bfr + size;
while (ptr < end) {
u8 *newline = memchr(ptr, '\n', end - ptr);
if (newline) {
u8 tmp = *newline;
*newline = '\0';
dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx,
ptr);
*newline = tmp;
ptr = newline + 1;
} else {
dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx,
ptr);
break;
}
}
}
static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr,
size_t size)
{
dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16);
}
static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr,
size_t size)
{
u64 crash_time;
memcpy(&crash_time, bfr, 8);
dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time);
}
static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr,
size_t size)
{
u32 type, index, i;
size_t n_messages;
struct apple_rtkit_crashlog_mbox_entry entry;
memcpy(&type, bfr + 16, 4);
memcpy(&index, bfr + 24, 4);
n_messages = (size - 28) / sizeof(entry);
dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)",
type, index);
for (i = 0; i < n_messages; ++i) {
memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry));
dev_warn(rtk->dev, "RTKit: #%03d@%08x: %016llx %016llx", i,
entry.timestamp, entry.msg0, entry.msg1);
}
}
static void apple_rtkit_crashlog_dump_regs(struct apple_rtkit *rtk, u8 *bfr,
size_t size)
{
struct apple_rtkit_crashlog_regs *regs;
const char *el;
int i;
if (size < sizeof(*regs)) {
dev_warn(rtk->dev, "RTKit: Regs section too small: 0x%zx", size);
return;
}
regs = (struct apple_rtkit_crashlog_regs *)bfr;
switch (regs->psr & PSR_MODE_MASK) {
case PSR_MODE_EL0t:
el = "EL0t";
break;
case PSR_MODE_EL1t:
el = "EL1t";
break;
case PSR_MODE_EL1h:
el = "EL1h";
break;
case PSR_MODE_EL2t:
el = "EL2t";
break;
case PSR_MODE_EL2h:
el = "EL2h";
break;
default:
el = "unknown";
break;
}
dev_warn(rtk->dev, "RTKit: Exception dump:");
dev_warn(rtk->dev, " == Exception taken from %s ==", el);
dev_warn(rtk->dev, " PSR = 0x%llx", regs->psr);
dev_warn(rtk->dev, " PC = 0x%llx\n", regs->pc);
dev_warn(rtk->dev, " ESR = 0x%llx\n", regs->esr);
dev_warn(rtk->dev, " FAR = 0x%llx\n", regs->far);
dev_warn(rtk->dev, " SP = 0x%llx\n", regs->sp);
dev_warn(rtk->dev, "\n");
for (i = 0; i < 31; i += 4) {
if (i < 28)
dev_warn(rtk->dev,
" x%02d-x%02d = %016llx %016llx %016llx %016llx\n",
i, i + 3,
regs->regs[i], regs->regs[i + 1],
regs->regs[i + 2], regs->regs[i + 3]);
else
dev_warn(rtk->dev,
" x%02d-x%02d = %016llx %016llx %016llx\n", i, i + 3,
regs->regs[i], regs->regs[i + 1], regs->regs[i + 2]);
}
dev_warn(rtk->dev, "\n");
}
void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
{
size_t offset;
u32 section_fourcc, section_size;
struct apple_rtkit_crashlog_header header;
memcpy(&header, bfr, sizeof(header));
if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) {
dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x",
header.fourcc);
return;
}
if (header.size > size) {
dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large",
header.size);
return;
}
size = header.size;
offset = sizeof(header);
while (offset < size) {
memcpy(§ion_fourcc, bfr + offset, 4);
memcpy(§ion_size, bfr + offset + 12, 4);
switch (section_fourcc) {
case APPLE_RTKIT_CRASHLOG_HEADER:
dev_dbg(rtk->dev, "RTKit: End of crashlog reached");
return;
case APPLE_RTKIT_CRASHLOG_STR:
apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16,
section_size);
break;
case APPLE_RTKIT_CRASHLOG_VERSION:
apple_rtkit_crashlog_dump_version(
rtk, bfr + offset + 16, section_size);
break;
case APPLE_RTKIT_CRASHLOG_MBOX:
apple_rtkit_crashlog_dump_mailbox(
rtk, bfr + offset + 16, section_size);
break;
case APPLE_RTKIT_CRASHLOG_TIME:
apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16,
section_size);
break;
case APPLE_RTKIT_CRASHLOG_REGS:
apple_rtkit_crashlog_dump_regs(rtk, bfr + offset + 16,
section_size);
break;
default:
dev_warn(rtk->dev,
"RTKit: Unknown crashlog section: %x",
section_fourcc);
}
offset += section_size;
}
dev_warn(rtk->dev,
"RTKit: End of crashlog reached but no footer present");
}
|
linux-master
|
drivers/soc/apple/rtkit-crashlog.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/clk.h>
#define REV_B1 0x21
#define IMX8MQ_SW_INFO_B1 0x40
#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
#define IMX_SIP_GET_SOC_INFO 0xc2000006
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420
#define IMX8MP_OCOTP_UID_OFFSET 0x10
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
struct imx8_soc_data {
char *name;
u32 (*soc_revision)(void);
};
static u64 soc_uid;
#ifdef CONFIG_HAVE_ARM_SMCCC
static u32 imx8mq_soc_revision_from_atf(void)
{
struct arm_smccc_res res;
arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
return 0;
else
return res.a0 & 0xff;
}
#else
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
static u32 __init imx8mq_soc_revision(void)
{
struct device_node *np;
void __iomem *ocotp_base;
u32 magic;
u32 rev;
struct clk *clk;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
if (!np)
return 0;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
clk = of_clk_get_by_name(np, NULL);
if (IS_ERR(clk)) {
WARN_ON(IS_ERR(clk));
return 0;
}
clk_prepare_enable(clk);
/*
* SOC revision on older imx8mq is not available in fuses so query
* the value from ATF instead.
*/
rev = imx8mq_soc_revision_from_atf();
if (!rev) {
magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
if (magic == IMX8MQ_SW_MAGIC_B1)
rev = REV_B1;
}
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
clk_disable_unprepare(clk);
clk_put(clk);
iounmap(ocotp_base);
of_node_put(np);
return rev;
}
static void __init imx8mm_soc_uid(void)
{
void __iomem *ocotp_base;
struct device_node *np;
u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
IMX8MP_OCOTP_UID_OFFSET : 0;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
if (!np)
return;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
iounmap(ocotp_base);
of_node_put(np);
}
static u32 __init imx8mm_soc_revision(void)
{
struct device_node *np;
void __iomem *anatop_base;
u32 rev;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
if (!np)
return 0;
anatop_base = of_iomap(np, 0);
WARN_ON(!anatop_base);
rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
iounmap(anatop_base);
of_node_put(np);
imx8mm_soc_uid();
return rev;
}
static const struct imx8_soc_data imx8mq_soc_data = {
.name = "i.MX8MQ",
.soc_revision = imx8mq_soc_revision,
};
static const struct imx8_soc_data imx8mm_soc_data = {
.name = "i.MX8MM",
.soc_revision = imx8mm_soc_revision,
};
static const struct imx8_soc_data imx8mn_soc_data = {
.name = "i.MX8MN",
.soc_revision = imx8mm_soc_revision,
};
static const struct imx8_soc_data imx8mp_soc_data = {
.name = "i.MX8MP",
.soc_revision = imx8mm_soc_revision,
};
static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
{ .compatible = "fsl,imx8mp", .data = &imx8mp_soc_data, },
{ }
};
#define imx8_revision(soc_rev) \
soc_rev ? \
kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
"unknown"
static int __init imx8_soc_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
const struct of_device_id *id;
u32 soc_rev = 0;
const struct imx8_soc_data *data;
int ret;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Freescale i.MX";
ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
if (ret)
goto free_soc;
id = of_match_node(imx8_soc_match, of_root);
if (!id) {
ret = -ENODEV;
goto free_soc;
}
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
if (data->soc_revision)
soc_rev = data->soc_revision();
}
soc_dev_attr->revision = imx8_revision(soc_rev);
if (!soc_dev_attr->revision) {
ret = -ENOMEM;
goto free_soc;
}
soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
if (!soc_dev_attr->serial_number) {
ret = -ENOMEM;
goto free_rev;
}
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
ret = PTR_ERR(soc_dev);
goto free_serial_number;
}
pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
return 0;
free_serial_number:
kfree(soc_dev_attr->serial_number);
free_rev:
if (strcmp(soc_dev_attr->revision, "unknown"))
kfree(soc_dev_attr->revision);
free_soc:
kfree(soc_dev_attr);
return ret;
}
device_initcall(imx8_soc_init);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/soc/imx/soc-imx8m.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 NXP
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <soc/imx/cpu.h>
#include <soc/imx/revision.h>
#define IIM_UID 0x820
#define OCOTP_UID_H 0x420
#define OCOTP_UID_L 0x410
#define OCOTP_ULP_UID_1 0x4b0
#define OCOTP_ULP_UID_2 0x4c0
#define OCOTP_ULP_UID_3 0x4d0
#define OCOTP_ULP_UID_4 0x4e0
static int __init imx_soc_device_init(void)
{
struct soc_device_attribute *soc_dev_attr;
const char *ocotp_compat = NULL;
struct soc_device *soc_dev;
struct device_node *root;
struct regmap *ocotp = NULL;
const char *soc_id;
u64 soc_uid = 0;
u32 val;
int ret;
int i;
/* Return early if this is running on devices with different SoCs */
if (!__mxc_cpu_type)
return 0;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Freescale i.MX";
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
of_node_put(root);
if (ret)
goto free_soc;
switch (__mxc_cpu_type) {
case MXC_CPU_MX1:
soc_id = "i.MX1";
break;
case MXC_CPU_MX21:
soc_id = "i.MX21";
break;
case MXC_CPU_MX25:
soc_id = "i.MX25";
break;
case MXC_CPU_MX27:
soc_id = "i.MX27";
break;
case MXC_CPU_MX31:
soc_id = "i.MX31";
break;
case MXC_CPU_MX35:
soc_id = "i.MX35";
break;
case MXC_CPU_MX50:
soc_id = "i.MX50";
break;
case MXC_CPU_MX51:
ocotp_compat = "fsl,imx51-iim";
soc_id = "i.MX51";
break;
case MXC_CPU_MX53:
ocotp_compat = "fsl,imx53-iim";
soc_id = "i.MX53";
break;
case MXC_CPU_IMX6SL:
ocotp_compat = "fsl,imx6sl-ocotp";
soc_id = "i.MX6SL";
break;
case MXC_CPU_IMX6DL:
ocotp_compat = "fsl,imx6q-ocotp";
soc_id = "i.MX6DL";
break;
case MXC_CPU_IMX6SX:
ocotp_compat = "fsl,imx6sx-ocotp";
soc_id = "i.MX6SX";
break;
case MXC_CPU_IMX6Q:
ocotp_compat = "fsl,imx6q-ocotp";
soc_id = "i.MX6Q";
break;
case MXC_CPU_IMX6UL:
ocotp_compat = "fsl,imx6ul-ocotp";
soc_id = "i.MX6UL";
break;
case MXC_CPU_IMX6ULL:
ocotp_compat = "fsl,imx6ull-ocotp";
soc_id = "i.MX6ULL";
break;
case MXC_CPU_IMX6ULZ:
ocotp_compat = "fsl,imx6ull-ocotp";
soc_id = "i.MX6ULZ";
break;
case MXC_CPU_IMX6SLL:
ocotp_compat = "fsl,imx6sll-ocotp";
soc_id = "i.MX6SLL";
break;
case MXC_CPU_IMX7D:
ocotp_compat = "fsl,imx7d-ocotp";
soc_id = "i.MX7D";
break;
case MXC_CPU_IMX7ULP:
ocotp_compat = "fsl,imx7ulp-ocotp";
soc_id = "i.MX7ULP";
break;
case MXC_CPU_VF500:
ocotp_compat = "fsl,vf610-ocotp";
soc_id = "VF500";
break;
case MXC_CPU_VF510:
ocotp_compat = "fsl,vf610-ocotp";
soc_id = "VF510";
break;
case MXC_CPU_VF600:
ocotp_compat = "fsl,vf610-ocotp";
soc_id = "VF600";
break;
case MXC_CPU_VF610:
ocotp_compat = "fsl,vf610-ocotp";
soc_id = "VF610";
break;
default:
soc_id = "Unknown";
}
soc_dev_attr->soc_id = soc_id;
if (ocotp_compat) {
ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
if (IS_ERR(ocotp))
pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
}
if (!IS_ERR_OR_NULL(ocotp)) {
if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
soc_uid = val & 0xffff;
regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
soc_uid <<= 16;
soc_uid |= val & 0xffff;
regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
soc_uid <<= 16;
soc_uid |= val & 0xffff;
regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
soc_uid <<= 16;
soc_uid |= val & 0xffff;
} else if (__mxc_cpu_type == MXC_CPU_MX51 ||
__mxc_cpu_type == MXC_CPU_MX53) {
for (i=0; i < 8; i++) {
regmap_read(ocotp, IIM_UID + i*4, &val);
soc_uid <<= 8;
soc_uid |= (val & 0xff);
}
} else {
regmap_read(ocotp, OCOTP_UID_H, &val);
soc_uid = val;
regmap_read(ocotp, OCOTP_UID_L, &val);
soc_uid <<= 32;
soc_uid |= val;
}
}
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
(imx_get_soc_revision() >> 4) & 0xf,
imx_get_soc_revision() & 0xf);
if (!soc_dev_attr->revision) {
ret = -ENOMEM;
goto free_soc;
}
soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
if (!soc_dev_attr->serial_number) {
ret = -ENOMEM;
goto free_rev;
}
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
ret = PTR_ERR(soc_dev);
goto free_serial_number;
}
return 0;
free_serial_number:
kfree(soc_dev_attr->serial_number);
free_rev:
kfree(soc_dev_attr->revision);
free_soc:
kfree(soc_dev_attr);
return ret;
}
device_initcall(imx_soc_device_init);
|
linux-master
|
drivers/soc/imx/soc-imx.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 NXP
*/
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
static int imx93_src_probe(struct platform_device *pdev)
{
return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id imx93_src_ids[] = {
{ .compatible = "fsl,imx93-src" },
{ }
};
MODULE_DEVICE_TABLE(of, imx93_src_ids);
static struct platform_driver imx93_src_driver = {
.driver = {
.name = "imx93_src",
.of_match_table = imx93_src_ids,
},
.probe = imx93_src_probe,
};
module_platform_driver(imx93_src_driver);
MODULE_AUTHOR("Peng Fan <[email protected]>");
MODULE_DESCRIPTION("NXP i.MX93 src driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/soc/imx/imx93-src.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Rabin Vincent <[email protected]> for ST-Ericsson
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/sys_soc.h>
#include <asm/cputype.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mach/map.h>
/**
* struct dbx500_asic_id - fields of the ASIC ID
* @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
* @partnumber: hithereto 0x8500 for DB8500
* @revision: version code in the series
*/
struct dbx500_asic_id {
u16 partnumber;
u8 revision;
u8 process;
};
static struct dbx500_asic_id dbx500_id;
static unsigned int __init ux500_read_asicid(phys_addr_t addr)
{
void __iomem *virt = ioremap(addr, 4);
unsigned int asicid;
if (!virt)
return 0;
asicid = readl(virt);
iounmap(virt);
return asicid;
}
static void ux500_print_soc_info(unsigned int asicid)
{
unsigned int rev = dbx500_id.revision;
pr_info("DB%4x ", dbx500_id.partnumber);
if (rev == 0x01)
pr_cont("Early Drop");
else if (rev >= 0xA0)
pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
else
pr_cont("Unknown");
pr_cont(" [%#010x]\n", asicid);
}
static unsigned int partnumber(unsigned int asicid)
{
return (asicid >> 8) & 0xffff;
}
/*
* SOC MIDR ASICID ADDRESS ASICID VALUE
* DB8500ed 0x410fc090 0x9001FFF4 0x00850001
* DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
* DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
* DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
* DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
* DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
* DB9540 0x413fc090 0xFFFFDBF4 0x009540xx
*/
static void __init ux500_setup_id(void)
{
unsigned int cpuid = read_cpuid_id();
unsigned int asicid = 0;
phys_addr_t addr = 0;
switch (cpuid) {
case 0x410fc090: /* DB8500ed */
case 0x411fc091: /* DB8500v1 */
addr = 0x9001FFF4;
break;
case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
asicid = ux500_read_asicid(0x9001DBF4);
if (partnumber(asicid) == 0x8500 ||
partnumber(asicid) == 0x8520)
/* DB8500v2 */
break;
/* DB5500v1 */
addr = 0x9001FFF4;
break;
case 0x413fc090: /* DB9540 */
addr = 0xFFFFDBF4;
break;
}
if (addr)
asicid = ux500_read_asicid(addr);
if (!asicid) {
pr_err("Unable to identify SoC\n");
BUG();
}
dbx500_id.process = asicid >> 24;
dbx500_id.partnumber = partnumber(asicid);
dbx500_id.revision = asicid & 0xff;
ux500_print_soc_info(asicid);
}
static const char * __init ux500_get_machine(void)
{
return kasprintf(GFP_KERNEL, "DB%4x", dbx500_id.partnumber);
}
static const char * __init ux500_get_family(void)
{
return kasprintf(GFP_KERNEL, "ux500");
}
static const char * __init ux500_get_revision(void)
{
unsigned int rev = dbx500_id.revision;
if (rev == 0x01)
return kasprintf(GFP_KERNEL, "%s", "ED");
else if (rev >= 0xA0)
return kasprintf(GFP_KERNEL, "%d.%d",
(rev >> 4) - 0xA + 1, rev & 0xf);
return kasprintf(GFP_KERNEL, "%s", "Unknown");
}
static ssize_t
process_show(struct device *dev, struct device_attribute *attr, char *buf)
{
if (dbx500_id.process == 0x00)
return sprintf(buf, "Standard\n");
return sprintf(buf, "%02xnm\n", dbx500_id.process);
}
static DEVICE_ATTR_RO(process);
static struct attribute *ux500_soc_attrs[] = {
&dev_attr_process.attr,
NULL
};
ATTRIBUTE_GROUPS(ux500_soc);
static const char *db8500_read_soc_id(struct device_node *backupram)
{
void __iomem *base;
const char *retstr;
u32 uid[5];
base = of_iomap(backupram, 0);
if (!base)
return NULL;
memcpy_fromio(uid, base + 0x1fc0, sizeof(uid));
/* Throw these device-specific numbers into the entropy pool */
add_device_randomness(uid, sizeof(uid));
retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
uid[0], uid[1], uid[2], uid[3], uid[4]);
iounmap(base);
return retstr;
}
static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
struct device_node *backupram)
{
soc_dev_attr->soc_id = db8500_read_soc_id(backupram);
soc_dev_attr->machine = ux500_get_machine();
soc_dev_attr->family = ux500_get_family();
soc_dev_attr->revision = ux500_get_revision();
soc_dev_attr->custom_attr_group = ux500_soc_groups[0];
}
static int __init ux500_soc_device_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
struct device_node *backupram;
backupram = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
if (!backupram)
return 0;
ux500_setup_id();
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr) {
of_node_put(backupram);
return -ENOMEM;
}
soc_info_populate(soc_dev_attr, backupram);
of_node_put(backupram);
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
return 0;
}
subsys_initcall(ux500_soc_device_init);
|
linux-master
|
drivers/soc/ux500/ux500-soc-id.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-pxa/ssp.c
*
* based on linux/arch/arm/mach-sa1100/ssp.c by Russell King
*
* Copyright (C) 2003 Russell King.
* Copyright (C) 2003 Wolfson Microelectronics PLC
*
* PXA2xx SSP driver. This provides the generic core for simple
* IO-based SSP applications and allows easy port setup for DMA access.
*
* Author: Liam Girdwood <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/irq.h>
static DEFINE_MUTEX(ssp_lock);
static LIST_HEAD(ssp_list);
struct ssp_device *pxa_ssp_request(int port, const char *label)
{
struct ssp_device *ssp = NULL;
mutex_lock(&ssp_lock);
list_for_each_entry(ssp, &ssp_list, node) {
if (ssp->port_id == port && ssp->use_count == 0) {
ssp->use_count++;
ssp->label = label;
break;
}
}
mutex_unlock(&ssp_lock);
if (&ssp->node == &ssp_list)
return NULL;
return ssp;
}
EXPORT_SYMBOL(pxa_ssp_request);
struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
const char *label)
{
struct ssp_device *ssp = NULL;
mutex_lock(&ssp_lock);
list_for_each_entry(ssp, &ssp_list, node) {
if (ssp->of_node == of_node && ssp->use_count == 0) {
ssp->use_count++;
ssp->label = label;
break;
}
}
mutex_unlock(&ssp_lock);
if (&ssp->node == &ssp_list)
return NULL;
return ssp;
}
EXPORT_SYMBOL(pxa_ssp_request_of);
void pxa_ssp_free(struct ssp_device *ssp)
{
mutex_lock(&ssp_lock);
if (ssp->use_count) {
ssp->use_count--;
ssp->label = NULL;
} else
dev_err(ssp->dev, "device already free\n");
mutex_unlock(&ssp_lock);
}
EXPORT_SYMBOL(pxa_ssp_free);
#ifdef CONFIG_OF
static const struct of_device_id pxa_ssp_of_ids[] = {
{ .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP },
{ .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP },
{ .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP },
{ .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP },
{ .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
{ .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
{ .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
{ },
};
MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
#endif
static int pxa_ssp_probe(struct platform_device *pdev)
{
struct resource *res;
struct ssp_device *ssp;
struct device *dev = &pdev->dev;
ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
if (ssp == NULL)
return -ENOMEM;
ssp->dev = dev;
ssp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
return -ENODEV;
}
res = devm_request_mem_region(dev, res->start, resource_size(res),
pdev->name);
if (res == NULL) {
dev_err(dev, "failed to request memory resource\n");
return -EBUSY;
}
ssp->phys_base = res->start;
ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
if (ssp->mmio_base == NULL) {
dev_err(dev, "failed to ioremap() registers\n");
return -ENODEV;
}
ssp->irq = platform_get_irq(pdev, 0);
if (ssp->irq < 0)
return -ENODEV;
if (dev->of_node) {
const struct of_device_id *id =
of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
ssp->type = (int) id->data;
} else {
const struct platform_device_id *id =
platform_get_device_id(pdev);
ssp->type = (int) id->driver_data;
/* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
* starts from 0, do a translation here
*/
ssp->port_id = pdev->id + 1;
}
ssp->use_count = 0;
ssp->of_node = dev->of_node;
mutex_lock(&ssp_lock);
list_add(&ssp->node, &ssp_list);
mutex_unlock(&ssp_lock);
platform_set_drvdata(pdev, ssp);
return 0;
}
static int pxa_ssp_remove(struct platform_device *pdev)
{
struct ssp_device *ssp = platform_get_drvdata(pdev);
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
return 0;
}
static const struct platform_device_id ssp_id_table[] = {
{ "pxa25x-ssp", PXA25x_SSP },
{ "pxa25x-nssp", PXA25x_NSSP },
{ "pxa27x-ssp", PXA27x_SSP },
{ "pxa3xx-ssp", PXA3xx_SSP },
{ "pxa168-ssp", PXA168_SSP },
{ "pxa910-ssp", PXA910_SSP },
{ },
};
static struct platform_driver pxa_ssp_driver = {
.probe = pxa_ssp_probe,
.remove = pxa_ssp_remove,
.driver = {
.name = "pxa2xx-ssp",
.of_match_table = of_match_ptr(pxa_ssp_of_ids),
},
.id_table = ssp_id_table,
};
static int __init pxa_ssp_init(void)
{
return platform_driver_register(&pxa_ssp_driver);
}
static void __exit pxa_ssp_exit(void)
{
platform_driver_unregister(&pxa_ssp_driver);
}
arch_initcall(pxa_ssp_init);
module_exit(pxa_ssp_exit);
MODULE_DESCRIPTION("PXA SSP driver");
MODULE_AUTHOR("Liam Girdwood");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/soc/pxa/ssp.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/plat-pxa/mfp.c
*
* Multi-Function Pin Support
*
* Copyright (C) 2007 Marvell Internation Ltd.
*
* 2007-08-21: eric miao <[email protected]>
* initial version
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/soc/pxa/mfp.h>
#define MFPR_SIZE (PAGE_SIZE)
/* MFPR register bit definitions */
#define MFPR_PULL_SEL (0x1 << 15)
#define MFPR_PULLUP_EN (0x1 << 14)
#define MFPR_PULLDOWN_EN (0x1 << 13)
#define MFPR_SLEEP_SEL (0x1 << 9)
#define MFPR_SLEEP_OE_N (0x1 << 7)
#define MFPR_EDGE_CLEAR (0x1 << 6)
#define MFPR_EDGE_FALL_EN (0x1 << 5)
#define MFPR_EDGE_RISE_EN (0x1 << 4)
#define MFPR_SLEEP_DATA(x) ((x) << 8)
#define MFPR_DRIVE(x) (((x) & 0x7) << 10)
#define MFPR_AF_SEL(x) (((x) & 0x7) << 0)
#define MFPR_EDGE_NONE (0)
#define MFPR_EDGE_RISE (MFPR_EDGE_RISE_EN)
#define MFPR_EDGE_FALL (MFPR_EDGE_FALL_EN)
#define MFPR_EDGE_BOTH (MFPR_EDGE_RISE | MFPR_EDGE_FALL)
/*
* Table that determines the low power modes outputs, with actual settings
* used in parentheses for don't-care values. Except for the float output,
* the configured driven and pulled levels match, so if there is a need for
* non-LPM pulled output, the same configuration could probably be used.
*
* Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel
* (bit 7) (bit 8) (bit 14) (bit 13) (bit 15)
*
* Input 0 X(0) X(0) X(0) 0
* Drive 0 0 0 0 X(1) 0
* Drive 1 0 1 X(1) 0 0
* Pull hi (1) 1 X(1) 1 0 0
* Pull lo (0) 1 X(0) 0 1 0
* Z (float) 1 X(0) 0 0 0
*/
#define MFPR_LPM_INPUT (0)
#define MFPR_LPM_DRIVE_LOW (MFPR_SLEEP_DATA(0) | MFPR_PULLDOWN_EN)
#define MFPR_LPM_DRIVE_HIGH (MFPR_SLEEP_DATA(1) | MFPR_PULLUP_EN)
#define MFPR_LPM_PULL_LOW (MFPR_LPM_DRIVE_LOW | MFPR_SLEEP_OE_N)
#define MFPR_LPM_PULL_HIGH (MFPR_LPM_DRIVE_HIGH | MFPR_SLEEP_OE_N)
#define MFPR_LPM_FLOAT (MFPR_SLEEP_OE_N)
#define MFPR_LPM_MASK (0xe080)
/*
* The pullup and pulldown state of the MFP pin at run mode is by default
* determined by the selected alternate function. In case that some buggy
* devices need to override this default behavior, the definitions below
* indicates the setting of corresponding MFPR bits
*
* Definition pull_sel pullup_en pulldown_en
* MFPR_PULL_NONE 0 0 0
* MFPR_PULL_LOW 1 0 1
* MFPR_PULL_HIGH 1 1 0
* MFPR_PULL_BOTH 1 1 1
* MFPR_PULL_FLOAT 1 0 0
*/
#define MFPR_PULL_NONE (0)
#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN)
#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN)
#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN)
#define MFPR_PULL_FLOAT (MFPR_PULL_SEL)
/* mfp_spin_lock is used to ensure that MFP register configuration
* (most likely a read-modify-write operation) is atomic, and that
* mfp_table[] is consistent
*/
static DEFINE_SPINLOCK(mfp_spin_lock);
static void __iomem *mfpr_mmio_base;
struct mfp_pin {
unsigned long config; /* -1 for not configured */
unsigned long mfpr_off; /* MFPRxx Register offset */
unsigned long mfpr_run; /* Run-Mode Register Value */
unsigned long mfpr_lpm; /* Low Power Mode Register Value */
};
static struct mfp_pin mfp_table[MFP_PIN_MAX];
/* mapping of MFP_LPM_* definitions to MFPR_LPM_* register bits */
static const unsigned long mfpr_lpm[] = {
MFPR_LPM_INPUT,
MFPR_LPM_DRIVE_LOW,
MFPR_LPM_DRIVE_HIGH,
MFPR_LPM_PULL_LOW,
MFPR_LPM_PULL_HIGH,
MFPR_LPM_FLOAT,
MFPR_LPM_INPUT,
};
/* mapping of MFP_PULL_* definitions to MFPR_PULL_* register bits */
static const unsigned long mfpr_pull[] = {
MFPR_PULL_NONE,
MFPR_PULL_LOW,
MFPR_PULL_HIGH,
MFPR_PULL_BOTH,
MFPR_PULL_FLOAT,
};
/* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */
static const unsigned long mfpr_edge[] = {
MFPR_EDGE_NONE,
MFPR_EDGE_RISE,
MFPR_EDGE_FALL,
MFPR_EDGE_BOTH,
};
#define mfpr_readl(off) \
__raw_readl(mfpr_mmio_base + (off))
#define mfpr_writel(off, val) \
__raw_writel(val, mfpr_mmio_base + (off))
#define mfp_configured(p) ((p)->config != -1)
/*
* perform a read-back of any valid MFPR register to make sure the
* previous writings are finished
*/
static unsigned long mfpr_off_readback;
#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
static inline void __mfp_config_run(struct mfp_pin *p)
{
if (mfp_configured(p))
mfpr_writel(p->mfpr_off, p->mfpr_run);
}
static inline void __mfp_config_lpm(struct mfp_pin *p)
{
if (mfp_configured(p)) {
unsigned long mfpr_clr = (p->mfpr_run & ~MFPR_EDGE_BOTH) | MFPR_EDGE_CLEAR;
if (mfpr_clr != p->mfpr_run)
mfpr_writel(p->mfpr_off, mfpr_clr);
if (p->mfpr_lpm != mfpr_clr)
mfpr_writel(p->mfpr_off, p->mfpr_lpm);
}
}
void mfp_config(unsigned long *mfp_cfgs, int num)
{
unsigned long flags;
int i;
spin_lock_irqsave(&mfp_spin_lock, flags);
for (i = 0; i < num; i++, mfp_cfgs++) {
unsigned long tmp, c = *mfp_cfgs;
struct mfp_pin *p;
int pin, af, drv, lpm, edge, pull;
pin = MFP_PIN(c);
BUG_ON(pin >= MFP_PIN_MAX);
p = &mfp_table[pin];
af = MFP_AF(c);
drv = MFP_DS(c);
lpm = MFP_LPM_STATE(c);
edge = MFP_LPM_EDGE(c);
pull = MFP_PULL(c);
/* run-mode pull settings will conflict with MFPR bits of
* low power mode state, calculate mfpr_run and mfpr_lpm
* individually if pull != MFP_PULL_NONE
*/
tmp = MFPR_AF_SEL(af) | MFPR_DRIVE(drv);
if (likely(pull == MFP_PULL_NONE)) {
p->mfpr_run = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
p->mfpr_lpm = p->mfpr_run;
} else {
p->mfpr_lpm = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
p->mfpr_run = tmp | mfpr_pull[pull];
}
p->config = c; __mfp_config_run(p);
}
mfpr_sync();
spin_unlock_irqrestore(&mfp_spin_lock, flags);
}
unsigned long mfp_read(int mfp)
{
unsigned long val, flags;
BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
spin_lock_irqsave(&mfp_spin_lock, flags);
val = mfpr_readl(mfp_table[mfp].mfpr_off);
spin_unlock_irqrestore(&mfp_spin_lock, flags);
return val;
}
void mfp_write(int mfp, unsigned long val)
{
unsigned long flags;
BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
spin_lock_irqsave(&mfp_spin_lock, flags);
mfpr_writel(mfp_table[mfp].mfpr_off, val);
mfpr_sync();
spin_unlock_irqrestore(&mfp_spin_lock, flags);
}
void __init mfp_init_base(void __iomem *mfpr_base)
{
int i;
/* initialize the table with default - unconfigured */
for (i = 0; i < ARRAY_SIZE(mfp_table); i++)
mfp_table[i].config = -1;
mfpr_mmio_base = mfpr_base;
}
void __init mfp_init_addr(struct mfp_addr_map *map)
{
struct mfp_addr_map *p;
unsigned long offset, flags;
int i;
spin_lock_irqsave(&mfp_spin_lock, flags);
/* mfp offset for readback */
mfpr_off_readback = map[0].offset;
for (p = map; p->start != MFP_PIN_INVALID; p++) {
offset = p->offset;
i = p->start;
do {
mfp_table[i].mfpr_off = offset;
mfp_table[i].mfpr_run = 0;
mfp_table[i].mfpr_lpm = 0;
offset += 4; i++;
} while ((i <= p->end) && (p->end != -1));
}
spin_unlock_irqrestore(&mfp_spin_lock, flags);
}
void mfp_config_lpm(void)
{
struct mfp_pin *p = &mfp_table[0];
int pin;
for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
__mfp_config_lpm(p);
}
void mfp_config_run(void)
{
struct mfp_pin *p = &mfp_table[0];
int pin;
for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
__mfp_config_run(p);
}
|
linux-master
|
drivers/soc/pxa/mfp.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Zynq MPSoC Power Management
*
* Copyright (C) 2014-2019 Xilinx, Inc.
*
* Davorin Mista <[email protected]>
* Jolly Shah <[email protected]>
* Rajan Vaja <[email protected]>
*/
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/firmware/xlnx-event-manager.h>
#include <linux/mailbox/zynqmp-ipi-message.h>
/**
* struct zynqmp_pm_work_struct - Wrapper for struct work_struct
* @callback_work: Work structure
* @args: Callback arguments
*/
struct zynqmp_pm_work_struct {
struct work_struct callback_work;
u32 args[CB_ARG_CNT];
};
static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
static struct mbox_chan *rx_chan;
static bool event_registered;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST,
PM_SUSPEND_MODE_POWER_OFF,
};
#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD
static const char *const suspend_modes[] = {
[PM_SUSPEND_MODE_STD] = "standard",
[PM_SUSPEND_MODE_POWER_OFF] = "power-off",
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
static void zynqmp_pm_get_callback_data(u32 *buf)
{
zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
}
static void suspend_event_callback(const u32 *payload, void *data)
{
/* First element is callback API ID, others are callback arguments */
if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
return;
/* Copy callback arguments into work's structure */
memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
sizeof(zynqmp_pm_init_suspend_work->args));
queue_work(system_unbound_wq, &zynqmp_pm_init_suspend_work->callback_work);
}
static irqreturn_t zynqmp_pm_isr(int irq, void *data)
{
u32 payload[CB_PAYLOAD_SIZE];
zynqmp_pm_get_callback_data(payload);
/* First element is callback API ID, others are callback arguments */
if (payload[0] == PM_INIT_SUSPEND_CB) {
switch (payload[1]) {
case SUSPEND_SYSTEM_SHUTDOWN:
orderly_poweroff(true);
break;
case SUSPEND_POWER_REQUEST:
pm_suspend(PM_SUSPEND_MEM);
break;
default:
pr_err("%s Unsupported InitSuspendCb reason "
"code %d\n", __func__, payload[1]);
}
}
return IRQ_HANDLED;
}
static void ipi_receive_callback(struct mbox_client *cl, void *data)
{
struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
u32 payload[CB_PAYLOAD_SIZE];
int ret;
memcpy(payload, msg->data, sizeof(msg->len));
/* First element is callback API ID, others are callback arguments */
if (payload[0] == PM_INIT_SUSPEND_CB) {
if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
return;
/* Copy callback arguments into work's structure */
memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
sizeof(zynqmp_pm_init_suspend_work->args));
queue_work(system_unbound_wq,
&zynqmp_pm_init_suspend_work->callback_work);
/* Send NULL message to mbox controller to ack the message */
ret = mbox_send_message(rx_chan, NULL);
if (ret)
pr_err("IPI ack failed. Error %d\n", ret);
}
}
/**
* zynqmp_pm_init_suspend_work_fn - Initialize suspend
* @work: Pointer to work_struct
*
* Bottom-half of PM callback IRQ handler.
*/
static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
{
struct zynqmp_pm_work_struct *pm_work =
container_of(work, struct zynqmp_pm_work_struct, callback_work);
if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
orderly_poweroff(true);
} else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
pm_suspend(PM_SUSPEND_MEM);
} else {
pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
__func__, pm_work->args[0]);
}
}
static ssize_t suspend_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char *s = buf;
int md;
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
if (suspend_modes[md]) {
if (md == suspend_mode)
s += sprintf(s, "[%s] ", suspend_modes[md]);
else
s += sprintf(s, "%s ", suspend_modes[md]);
}
/* Convert last space to newline */
if (s != buf)
*(s - 1) = '\n';
return (s - buf);
}
static ssize_t suspend_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int md, ret = -EINVAL;
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
if (suspend_modes[md] &&
sysfs_streq(suspend_modes[md], buf)) {
ret = 0;
break;
}
if (!ret && md != suspend_mode) {
ret = zynqmp_pm_set_suspend_mode(md);
if (likely(!ret))
suspend_mode = md;
}
return ret ? ret : count;
}
static DEVICE_ATTR_RW(suspend_mode);
static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
u32 pm_api_version;
struct mbox_client *client;
zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
/*
* First try to use Xilinx Event Manager by registering suspend_event_callback
* for suspend/shutdown event.
* If xlnx_register_event() returns -EACCES (Xilinx Event Manager
* is not available to use) or -ENODEV(Xilinx Event Manager not compiled),
* then use ipi-mailbox or interrupt method.
*/
ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false,
suspend_event_callback, NULL);
if (!ret) {
zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
GFP_KERNEL);
if (!zynqmp_pm_init_suspend_work) {
xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
suspend_event_callback, NULL);
return -ENOMEM;
}
event_registered = true;
INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
zynqmp_pm_init_suspend_work_fn);
} else if (ret != -EACCES && ret != -ENODEV) {
dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
return ret;
} else if (of_property_present(pdev->dev.of_node, "mboxes")) {
zynqmp_pm_init_suspend_work =
devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
GFP_KERNEL);
if (!zynqmp_pm_init_suspend_work)
return -ENOMEM;
INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
zynqmp_pm_init_suspend_work_fn);
client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
if (!client)
return -ENOMEM;
client->dev = &pdev->dev;
client->rx_callback = ipi_receive_callback;
rx_chan = mbox_request_channel_byname(client, "rx");
if (IS_ERR(rx_chan)) {
dev_err(&pdev->dev, "Failed to request rx channel\n");
return PTR_ERR(rx_chan);
}
} else if (of_property_present(pdev->dev.of_node, "interrupts")) {
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
zynqmp_pm_isr,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&pdev->dev),
&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
"failed with %d\n", irq, ret);
return ret;
}
} else {
dev_err(&pdev->dev, "Required property not found in DT node\n");
return -ENOENT;
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (ret) {
if (event_registered) {
xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
NULL);
event_registered = false;
}
dev_err(&pdev->dev, "unable to create sysfs interface\n");
return ret;
}
return 0;
}
static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (event_registered)
xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
if (!rx_chan)
mbox_free_channel(rx_chan);
return 0;
}
static const struct of_device_id pm_of_match[] = {
{ .compatible = "xlnx,zynqmp-power", },
{ /* end of table */ },
};
MODULE_DEVICE_TABLE(of, pm_of_match);
static struct platform_driver zynqmp_pm_platform_driver = {
.probe = zynqmp_pm_probe,
.remove = zynqmp_pm_remove,
.driver = {
.name = "zynqmp_power",
.of_match_table = pm_of_match,
},
};
module_platform_driver(zynqmp_pm_platform_driver);
|
linux-master
|
drivers/soc/xilinx/zynqmp_power.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Event Management Driver
*
* Copyright (C) 2021 Xilinx, Inc.
*
* Abhyuday Godhasara <[email protected]>
*/
#include <linux/cpuhotplug.h>
#include <linux/firmware/xlnx-event-manager.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/hashtable.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
static int virq_sgi;
static int event_manager_availability = -EACCES;
/* SGI number used for Event management driver */
#define XLNX_EVENT_SGI_NUM (15)
/* Max number of driver can register for same event */
#define MAX_DRIVER_PER_EVENT (10U)
/* Max HashMap Order for PM API feature check (1<<7 = 128) */
#define REGISTERED_DRIVER_MAX_ORDER (7)
#define MAX_BITS (32U) /* Number of bits available for error mask */
#define FIRMWARE_VERSION_MASK (0xFFFFU)
#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
static int sgi_num = XLNX_EVENT_SGI_NUM;
static bool is_need_to_unregister;
/**
* struct agent_cb - Registered callback function and private data.
* @agent_data: Data passed back to handler function.
* @eve_cb: Function pointer to store the callback function.
* @list: member to create list.
*/
struct agent_cb {
void *agent_data;
event_cb_func_t eve_cb;
struct list_head list;
};
/**
* struct registered_event_data - Registered Event Data.
* @key: key is the combine id(Node-Id | Event-Id) of type u64
* where upper u32 for Node-Id and lower u32 for Event-Id,
* And this used as key to index into hashmap.
* @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
* @wake: If this flag set, firmware will wake up processor if is
* in sleep or power down state.
* @cb_list_head: Head of call back data list which contain the information
* about registered handler and private data.
* @hentry: hlist_node that hooks this entry into hashtable.
*/
struct registered_event_data {
u64 key;
enum pm_api_cb_id cb_type;
bool wake;
struct list_head cb_list_head;
struct hlist_node hentry;
};
static bool xlnx_is_error_event(const u32 node_id)
{
if (node_id == EVENT_ERROR_PMC_ERR1 ||
node_id == EVENT_ERROR_PMC_ERR2 ||
node_id == EVENT_ERROR_PSM_ERR1 ||
node_id == EVENT_ERROR_PSM_ERR2)
return true;
return false;
}
static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
event_cb_func_t cb_fun, void *data)
{
u64 key = 0;
bool present_in_hash = false;
struct registered_event_data *eve_data;
struct agent_cb *cb_data;
struct agent_cb *cb_pos;
struct agent_cb *cb_next;
key = ((u64)node_id << 32U) | (u64)event;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
if (eve_data->key == key) {
present_in_hash = true;
break;
}
}
if (!present_in_hash) {
/* Add new entry if not present in HASH table */
eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
if (!eve_data)
return -ENOMEM;
eve_data->key = key;
eve_data->cb_type = PM_NOTIFY_CB;
eve_data->wake = wake;
INIT_LIST_HEAD(&eve_data->cb_list_head);
cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
if (!cb_data) {
kfree(eve_data);
return -ENOMEM;
}
cb_data->eve_cb = cb_fun;
cb_data->agent_data = data;
/* Add into callback list */
list_add(&cb_data->list, &eve_data->cb_list_head);
/* Add into HASH table */
hash_add(reg_driver_map, &eve_data->hentry, key);
} else {
/* Search for callback function and private data in list */
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
if (cb_pos->eve_cb == cb_fun &&
cb_pos->agent_data == data) {
return 0;
}
}
/* Add multiple handler and private data in list */
cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
if (!cb_data)
return -ENOMEM;
cb_data->eve_cb = cb_fun;
cb_data->agent_data = data;
list_add(&cb_data->list, &eve_data->cb_list_head);
}
return 0;
}
static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
{
struct registered_event_data *eve_data;
struct agent_cb *cb_data;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
pr_err("Found as already registered\n");
return -EINVAL;
}
}
/* Add new entry if not present */
eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
if (!eve_data)
return -ENOMEM;
eve_data->key = 0;
eve_data->cb_type = PM_INIT_SUSPEND_CB;
INIT_LIST_HEAD(&eve_data->cb_list_head);
cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
if (!cb_data)
return -ENOMEM;
cb_data->eve_cb = cb_fun;
cb_data->agent_data = data;
/* Add into callback list */
list_add(&cb_data->list, &eve_data->cb_list_head);
hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
return 0;
}
static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
struct agent_cb *cb_pos;
struct agent_cb *cb_next;
struct hlist_node *tmp;
is_need_to_unregister = false;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
/* Delete the list of callback */
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
if (cb_pos->eve_cb == cb_fun) {
is_callback_found = true;
list_del_init(&cb_pos->list);
kfree(cb_pos);
}
}
/* remove an object from a hashtable */
hash_del(&eve_data->hentry);
kfree(eve_data);
is_need_to_unregister = true;
}
}
if (!is_callback_found) {
pr_warn("Didn't find any registered callback for suspend event\n");
return -EINVAL;
}
return 0;
}
static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
event_cb_func_t cb_fun, void *data)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
u64 key = ((u64)node_id << 32U) | (u64)event;
struct agent_cb *cb_pos;
struct agent_cb *cb_next;
struct hlist_node *tmp;
is_need_to_unregister = false;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
if (eve_data->key == key) {
/* Delete the list of callback */
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
if (cb_pos->eve_cb == cb_fun &&
cb_pos->agent_data == data) {
is_callback_found = true;
list_del_init(&cb_pos->list);
kfree(cb_pos);
}
}
/* Remove HASH table if callback list is empty */
if (list_empty(&eve_data->cb_list_head)) {
/* remove an object from a HASH table */
hash_del(&eve_data->hentry);
kfree(eve_data);
is_need_to_unregister = true;
}
}
}
if (!is_callback_found) {
pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
node_id, event);
return -EINVAL;
}
return 0;
}
/**
* xlnx_register_event() - Register for the event.
* @cb_type: Type of callback from pm_api_cb_id,
* PM_NOTIFY_CB - for Error Events,
* PM_INIT_SUSPEND_CB - for suspend callback.
* @node_id: Node-Id related to event.
* @event: Event Mask for the Error Event.
* @wake: Flag specifying whether the subsystem should be woken upon
* event notification.
* @cb_fun: Function pointer to store the callback function.
* @data: Pointer for the driver instance.
*
* Return: Returns 0 on successful registration else error code.
*/
int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
const bool wake, event_cb_func_t cb_fun, void *data)
{
int ret = 0;
u32 eve;
int pos;
if (event_manager_availability)
return event_manager_availability;
if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
return -EINVAL;
}
if (!cb_fun)
return -EFAULT;
if (cb_type == PM_INIT_SUSPEND_CB) {
ret = xlnx_add_cb_for_suspend(cb_fun, data);
} else {
if (!xlnx_is_error_event(node_id)) {
/* Add entry for Node-Id/Event in hash table */
ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
} else {
/* Add into Hash table */
for (pos = 0; pos < MAX_BITS; pos++) {
eve = event & (1 << pos);
if (!eve)
continue;
/* Add entry for Node-Id/Eve in hash table */
ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
data);
/* Break the loop if got error */
if (ret)
break;
}
if (ret) {
/* Skip the Event for which got the error */
pos--;
/* Remove registered(during this call) event from hash table */
for ( ; pos >= 0; pos--) {
eve = event & (1 << pos);
if (!eve)
continue;
xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
}
}
if (ret) {
pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
event, ret);
return ret;
}
/* Register for Node-Id/Event combination in firmware */
ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
if (ret) {
pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
event, ret);
/* Remove already registered event from hash table */
if (xlnx_is_error_event(node_id)) {
for (pos = 0; pos < MAX_BITS; pos++) {
eve = event & (1 << pos);
if (!eve)
continue;
xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
} else {
xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
}
return ret;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(xlnx_register_event);
/**
* xlnx_unregister_event() - Unregister for the event.
* @cb_type: Type of callback from pm_api_cb_id,
* PM_NOTIFY_CB - for Error Events,
* PM_INIT_SUSPEND_CB - for suspend callback.
* @node_id: Node-Id related to event.
* @event: Event Mask for the Error Event.
* @cb_fun: Function pointer of callback function.
* @data: Pointer of agent's private data.
*
* Return: Returns 0 on successful unregistration else error code.
*/
int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
event_cb_func_t cb_fun, void *data)
{
int ret = 0;
u32 eve, pos;
is_need_to_unregister = false;
if (event_manager_availability)
return event_manager_availability;
if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
return -EINVAL;
}
if (!cb_fun)
return -EFAULT;
if (cb_type == PM_INIT_SUSPEND_CB) {
ret = xlnx_remove_cb_for_suspend(cb_fun);
} else {
/* Remove Node-Id/Event from hash table */
if (!xlnx_is_error_event(node_id)) {
xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
} else {
for (pos = 0; pos < MAX_BITS; pos++) {
eve = event & (1 << pos);
if (!eve)
continue;
xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
}
/* Un-register if list is empty */
if (is_need_to_unregister) {
/* Un-register for Node-Id/Event combination */
ret = zynqmp_pm_register_notifier(node_id, event, false, false);
if (ret) {
pr_err("%s() failed for 0x%x and 0x%x: %d\n",
__func__, node_id, event, ret);
return ret;
}
}
}
return ret;
}
EXPORT_SYMBOL_GPL(xlnx_unregister_event);
static void xlnx_call_suspend_cb_handler(const u32 *payload)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
u32 cb_type = payload[0];
struct agent_cb *cb_pos;
struct agent_cb *cb_next;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
if (eve_data->cb_type == cb_type) {
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
is_callback_found = true;
}
}
}
if (!is_callback_found)
pr_warn("Didn't find any registered callback for suspend event\n");
}
static void xlnx_call_notify_cb_handler(const u32 *payload)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
int ret;
struct agent_cb *cb_pos;
struct agent_cb *cb_next;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
if (eve_data->key == key) {
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
is_callback_found = true;
}
/* re register with firmware to get future events */
ret = zynqmp_pm_register_notifier(payload[1], payload[2],
eve_data->wake, true);
if (ret) {
pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
payload[1], payload[2], ret);
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
list) {
/* Remove already registered event from hash table */
xlnx_remove_cb_for_notify_event(payload[1], payload[2],
cb_pos->eve_cb,
cb_pos->agent_data);
}
}
}
}
if (!is_callback_found)
pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
payload[1], payload[2]);
}
static void xlnx_get_event_callback_data(u32 *buf)
{
zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
}
static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
{
u32 cb_type, node_id, event, pos;
u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
/* Get event data */
xlnx_get_event_callback_data(payload);
/* First element is callback type, others are callback arguments */
cb_type = payload[0];
if (cb_type == PM_NOTIFY_CB) {
node_id = payload[1];
event = payload[2];
if (!xlnx_is_error_event(node_id)) {
xlnx_call_notify_cb_handler(payload);
} else {
/*
* Each call back function expecting payload as an input arguments.
* We can get multiple error events as in one call back through error
* mask. So payload[2] may can contain multiple error events.
* In reg_driver_map database we store data in the combination of single
* node_id-error combination.
* So coping the payload message into event_data and update the
* event_data[2] with Error Mask for single error event and use
* event_data as input argument for registered call back function.
*
*/
memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
/* Support Multiple Error Event */
for (pos = 0; pos < MAX_BITS; pos++) {
if ((0 == (event & (1 << pos))))
continue;
event_data[2] = (event & (1 << pos));
xlnx_call_notify_cb_handler(event_data);
}
}
} else if (cb_type == PM_INIT_SUSPEND_CB) {
xlnx_call_suspend_cb_handler(payload);
} else {
pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
}
return IRQ_HANDLED;
}
static int xlnx_event_cpuhp_start(unsigned int cpu)
{
enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
return 0;
}
static int xlnx_event_cpuhp_down(unsigned int cpu)
{
disable_percpu_irq(virq_sgi);
return 0;
}
static void xlnx_disable_percpu_irq(void *data)
{
disable_percpu_irq(virq_sgi);
}
static int xlnx_event_init_sgi(struct platform_device *pdev)
{
int ret = 0;
int cpu = smp_processor_id();
/*
* IRQ related structures are used for the following:
* for each SGI interrupt ensure its mapped by GIC IRQ domain
* and that each corresponding linux IRQ for the HW IRQ has
* a handler for when receiving an interrupt from the remote
* processor.
*/
struct irq_domain *domain;
struct irq_fwspec sgi_fwspec;
struct device_node *interrupt_parent = NULL;
struct device *parent = pdev->dev.parent;
/* Find GIC controller to map SGIs. */
interrupt_parent = of_irq_find_parent(parent->of_node);
if (!interrupt_parent) {
dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
return -EINVAL;
}
/* Each SGI needs to be associated with GIC's IRQ domain. */
domain = irq_find_host(interrupt_parent);
of_node_put(interrupt_parent);
/* Each mapping needs GIC domain when finding IRQ mapping. */
sgi_fwspec.fwnode = domain->fwnode;
/*
* When irq domain looks at mapping each arg is as follows:
* 3 args for: interrupt type (SGI), interrupt # (set later), type
*/
sgi_fwspec.param_count = 1;
/* Set SGI's hwirq */
sgi_fwspec.param[0] = sgi_num;
virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
per_cpu(cpu_number1, cpu) = cpu;
ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
&cpu_number1);
WARN_ON(ret);
if (ret) {
irq_dispose_mapping(virq_sgi);
return ret;
}
irq_to_desc(virq_sgi);
irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
return ret;
}
static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
{
int cpu = smp_processor_id();
per_cpu(cpu_number1, cpu) = cpu;
cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
free_percpu_irq(virq_sgi, &cpu_number1);
irq_dispose_mapping(virq_sgi);
}
static int xlnx_event_manager_probe(struct platform_device *pdev)
{
int ret;
ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
if (ret < 0) {
dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
return ret;
}
if ((ret & FIRMWARE_VERSION_MASK) <
REGISTER_NOTIFIER_FIRMWARE_VERSION) {
dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
REGISTER_NOTIFIER_FIRMWARE_VERSION,
ret & FIRMWARE_VERSION_MASK);
return -EOPNOTSUPP;
}
/* Initialize the SGI */
ret = xlnx_event_init_sgi(pdev);
if (ret) {
dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
return ret;
}
/* Setup function for the CPU hot-plug cases */
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
ret = zynqmp_pm_register_sgi(sgi_num, 0);
if (ret) {
dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
xlnx_event_cleanup_sgi(pdev);
return ret;
}
event_manager_availability = 0;
dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
return ret;
}
static void xlnx_event_manager_remove(struct platform_device *pdev)
{
int i;
struct registered_event_data *eve_data;
struct hlist_node *tmp;
int ret;
struct agent_cb *cb_pos;
struct agent_cb *cb_next;
hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
list_del_init(&cb_pos->list);
kfree(cb_pos);
}
hash_del(&eve_data->hentry);
kfree(eve_data);
}
ret = zynqmp_pm_register_sgi(0, 1);
if (ret)
dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
xlnx_event_cleanup_sgi(pdev);
event_manager_availability = -EACCES;
}
static struct platform_driver xlnx_event_manager_driver = {
.probe = xlnx_event_manager_probe,
.remove_new = xlnx_event_manager_remove,
.driver = {
.name = "xlnx_event_manager",
},
};
module_param(sgi_num, uint, 0);
module_platform_driver(xlnx_event_manager_driver);
|
linux-master
|
drivers/soc/xilinx/xlnx_event_manager.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Linaro Ltd.
*
* Author: Linus Walleij <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
#define GLOBAL_WORD_ID 0x00
#define GEMINI_GLOBAL_ARB1_CTRL 0x2c
#define GEMINI_ARB1_BURST_MASK GENMASK(21, 16)
#define GEMINI_ARB1_BURST_SHIFT 16
/* These all define the priority on the BUS2 backplane */
#define GEMINI_ARB1_PRIO_MASK GENMASK(9, 0)
#define GEMINI_ARB1_DMAC_HIGH_PRIO BIT(0)
#define GEMINI_ARB1_IDE_HIGH_PRIO BIT(1)
#define GEMINI_ARB1_RAID_HIGH_PRIO BIT(2)
#define GEMINI_ARB1_SECURITY_HIGH_PRIO BIT(3)
#define GEMINI_ARB1_GMAC0_HIGH_PRIO BIT(4)
#define GEMINI_ARB1_GMAC1_HIGH_PRIO BIT(5)
#define GEMINI_ARB1_USB0_HIGH_PRIO BIT(6)
#define GEMINI_ARB1_USB1_HIGH_PRIO BIT(7)
#define GEMINI_ARB1_PCI_HIGH_PRIO BIT(8)
#define GEMINI_ARB1_TVE_HIGH_PRIO BIT(9)
#define GEMINI_DEFAULT_BURST_SIZE 0x20
#define GEMINI_DEFAULT_PRIO (GEMINI_ARB1_GMAC0_HIGH_PRIO | \
GEMINI_ARB1_GMAC1_HIGH_PRIO)
static int __init gemini_soc_init(void)
{
struct regmap *map;
u32 rev;
u32 val;
int ret;
/* Multiplatform guard, only proceed on Gemini */
if (!of_machine_is_compatible("cortina,gemini"))
return 0;
map = syscon_regmap_lookup_by_compatible("cortina,gemini-syscon");
if (IS_ERR(map))
return PTR_ERR(map);
ret = regmap_read(map, GLOBAL_WORD_ID, &rev);
if (ret)
return ret;
val = (GEMINI_DEFAULT_BURST_SIZE << GEMINI_ARB1_BURST_SHIFT) |
GEMINI_DEFAULT_PRIO;
/* Set up system arbitration */
regmap_update_bits(map,
GEMINI_GLOBAL_ARB1_CTRL,
GEMINI_ARB1_BURST_MASK | GEMINI_ARB1_PRIO_MASK,
val);
pr_info("Gemini SoC %04x revision %02x, set arbitration %08x\n",
rev >> 8, rev & 0xff, val);
return 0;
}
subsys_initcall(gemini_soc_init);
|
linux-master
|
drivers/soc/gemini/soc-gemini.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2018 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/bitfield.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/regmap.h>
#include <linux/module.h>
static DEFINE_MUTEX(measure_lock);
#define MSR_CLK_DUTY 0x0
#define MSR_CLK_REG0 0x4
#define MSR_CLK_REG1 0x8
#define MSR_CLK_REG2 0xc
#define MSR_DURATION GENMASK(15, 0)
#define MSR_ENABLE BIT(16)
#define MSR_CONT BIT(17) /* continuous measurement */
#define MSR_INTR BIT(18) /* interrupts */
#define MSR_RUN BIT(19)
#define MSR_CLK_SRC GENMASK(26, 20)
#define MSR_BUSY BIT(31)
#define MSR_VAL_MASK GENMASK(15, 0)
#define DIV_MIN 32
#define DIV_STEP 32
#define DIV_MAX 640
#define CLK_MSR_MAX 128
struct meson_msr_id {
struct meson_msr *priv;
unsigned int id;
const char *name;
};
struct meson_msr {
struct regmap *regmap;
struct meson_msr_id msr_table[CLK_MSR_MAX];
};
#define CLK_MSR_ID(__id, __name) \
[__id] = {.id = __id, .name = __name,}
static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
CLK_MSR_ID(0, "ring_osc_out_ee0"),
CLK_MSR_ID(1, "ring_osc_out_ee1"),
CLK_MSR_ID(2, "ring_osc_out_ee2"),
CLK_MSR_ID(3, "a9_ring_osck"),
CLK_MSR_ID(6, "vid_pll"),
CLK_MSR_ID(7, "clk81"),
CLK_MSR_ID(8, "encp"),
CLK_MSR_ID(9, "encl"),
CLK_MSR_ID(11, "eth_rmii"),
CLK_MSR_ID(13, "amclk"),
CLK_MSR_ID(14, "fec_clk_0"),
CLK_MSR_ID(15, "fec_clk_1"),
CLK_MSR_ID(16, "fec_clk_2"),
CLK_MSR_ID(18, "a9_clk_div16"),
CLK_MSR_ID(19, "hdmi_sys"),
CLK_MSR_ID(20, "rtc_osc_clk_out"),
CLK_MSR_ID(21, "i2s_clk_in_src0"),
CLK_MSR_ID(22, "clk_rmii_from_pad"),
CLK_MSR_ID(23, "hdmi_ch0_tmds"),
CLK_MSR_ID(24, "lvds_fifo"),
CLK_MSR_ID(26, "sc_clk_int"),
CLK_MSR_ID(28, "sar_adc"),
CLK_MSR_ID(30, "mpll_clk_test_out"),
CLK_MSR_ID(31, "audac_clkpi"),
CLK_MSR_ID(32, "vdac"),
CLK_MSR_ID(33, "sdhc_rx"),
CLK_MSR_ID(34, "sdhc_sd"),
CLK_MSR_ID(35, "mali"),
CLK_MSR_ID(36, "hdmi_tx_pixel"),
CLK_MSR_ID(38, "vdin_meas"),
CLK_MSR_ID(39, "pcm_sclk"),
CLK_MSR_ID(40, "pcm_mclk"),
CLK_MSR_ID(41, "eth_rx_tx"),
CLK_MSR_ID(42, "pwm_d"),
CLK_MSR_ID(43, "pwm_c"),
CLK_MSR_ID(44, "pwm_b"),
CLK_MSR_ID(45, "pwm_a"),
CLK_MSR_ID(46, "pcm2_sclk"),
CLK_MSR_ID(47, "ddr_dpll_pt"),
CLK_MSR_ID(48, "pwm_f"),
CLK_MSR_ID(49, "pwm_e"),
CLK_MSR_ID(59, "hcodec"),
CLK_MSR_ID(60, "usb_32k_alt"),
CLK_MSR_ID(61, "gpio"),
CLK_MSR_ID(62, "vid2_pll"),
CLK_MSR_ID(63, "mipi_csi_cfg"),
};
static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
CLK_MSR_ID(3, "a53_ring_osc"),
CLK_MSR_ID(4, "gp0_pll"),
CLK_MSR_ID(6, "enci"),
CLK_MSR_ID(7, "clk81"),
CLK_MSR_ID(8, "encp"),
CLK_MSR_ID(9, "encl"),
CLK_MSR_ID(10, "vdac"),
CLK_MSR_ID(11, "rgmii_tx"),
CLK_MSR_ID(12, "pdm"),
CLK_MSR_ID(13, "amclk"),
CLK_MSR_ID(14, "fec_0"),
CLK_MSR_ID(15, "fec_1"),
CLK_MSR_ID(16, "fec_2"),
CLK_MSR_ID(17, "sys_pll_div16"),
CLK_MSR_ID(18, "sys_cpu_div16"),
CLK_MSR_ID(19, "hdmitx_sys"),
CLK_MSR_ID(20, "rtc_osc_out"),
CLK_MSR_ID(21, "i2s_in_src0"),
CLK_MSR_ID(22, "eth_phy_ref"),
CLK_MSR_ID(23, "hdmi_todig"),
CLK_MSR_ID(26, "sc_int"),
CLK_MSR_ID(28, "sar_adc"),
CLK_MSR_ID(31, "mpll_test_out"),
CLK_MSR_ID(32, "vdec"),
CLK_MSR_ID(35, "mali"),
CLK_MSR_ID(36, "hdmi_tx_pixel"),
CLK_MSR_ID(37, "i958"),
CLK_MSR_ID(38, "vdin_meas"),
CLK_MSR_ID(39, "pcm_sclk"),
CLK_MSR_ID(40, "pcm_mclk"),
CLK_MSR_ID(41, "eth_rx_or_rmii"),
CLK_MSR_ID(42, "mp0_out"),
CLK_MSR_ID(43, "fclk_div5"),
CLK_MSR_ID(44, "pwm_b"),
CLK_MSR_ID(45, "pwm_a"),
CLK_MSR_ID(46, "vpu"),
CLK_MSR_ID(47, "ddr_dpll_pt"),
CLK_MSR_ID(48, "mp1_out"),
CLK_MSR_ID(49, "mp2_out"),
CLK_MSR_ID(50, "mp3_out"),
CLK_MSR_ID(51, "nand_core"),
CLK_MSR_ID(52, "sd_emmc_b"),
CLK_MSR_ID(53, "sd_emmc_a"),
CLK_MSR_ID(55, "vid_pll_div_out"),
CLK_MSR_ID(56, "cci"),
CLK_MSR_ID(57, "wave420l_c"),
CLK_MSR_ID(58, "wave420l_b"),
CLK_MSR_ID(59, "hcodec"),
CLK_MSR_ID(60, "alt_32k"),
CLK_MSR_ID(61, "gpio_msr"),
CLK_MSR_ID(62, "hevc"),
CLK_MSR_ID(66, "vid_lock"),
CLK_MSR_ID(70, "pwm_f"),
CLK_MSR_ID(71, "pwm_e"),
CLK_MSR_ID(72, "pwm_d"),
CLK_MSR_ID(73, "pwm_c"),
CLK_MSR_ID(75, "aoclkx2_int"),
CLK_MSR_ID(76, "aoclk_int"),
CLK_MSR_ID(77, "rng_ring_osc_0"),
CLK_MSR_ID(78, "rng_ring_osc_1"),
CLK_MSR_ID(79, "rng_ring_osc_2"),
CLK_MSR_ID(80, "rng_ring_osc_3"),
CLK_MSR_ID(81, "vapb"),
CLK_MSR_ID(82, "ge2d"),
};
static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
CLK_MSR_ID(3, "a53_ring_osc"),
CLK_MSR_ID(4, "gp0_pll"),
CLK_MSR_ID(5, "gp1_pll"),
CLK_MSR_ID(7, "clk81"),
CLK_MSR_ID(9, "encl"),
CLK_MSR_ID(17, "sys_pll_div16"),
CLK_MSR_ID(18, "sys_cpu_div16"),
CLK_MSR_ID(20, "rtc_osc_out"),
CLK_MSR_ID(23, "mmc_clk"),
CLK_MSR_ID(28, "sar_adc"),
CLK_MSR_ID(31, "mpll_test_out"),
CLK_MSR_ID(40, "mod_eth_tx_clk"),
CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"),
CLK_MSR_ID(42, "mp0_out"),
CLK_MSR_ID(43, "fclk_div5"),
CLK_MSR_ID(44, "pwm_b"),
CLK_MSR_ID(45, "pwm_a"),
CLK_MSR_ID(46, "vpu"),
CLK_MSR_ID(47, "ddr_dpll_pt"),
CLK_MSR_ID(48, "mp1_out"),
CLK_MSR_ID(49, "mp2_out"),
CLK_MSR_ID(50, "mp3_out"),
CLK_MSR_ID(51, "sd_emmm_c"),
CLK_MSR_ID(52, "sd_emmc_b"),
CLK_MSR_ID(61, "gpio_msr"),
CLK_MSR_ID(66, "audio_slv_lrclk_c"),
CLK_MSR_ID(67, "audio_slv_lrclk_b"),
CLK_MSR_ID(68, "audio_slv_lrclk_a"),
CLK_MSR_ID(69, "audio_slv_sclk_c"),
CLK_MSR_ID(70, "audio_slv_sclk_b"),
CLK_MSR_ID(71, "audio_slv_sclk_a"),
CLK_MSR_ID(72, "pwm_d"),
CLK_MSR_ID(73, "pwm_c"),
CLK_MSR_ID(74, "wifi_beacon"),
CLK_MSR_ID(75, "tdmin_lb_lrcl"),
CLK_MSR_ID(76, "tdmin_lb_sclk"),
CLK_MSR_ID(77, "rng_ring_osc_0"),
CLK_MSR_ID(78, "rng_ring_osc_1"),
CLK_MSR_ID(79, "rng_ring_osc_2"),
CLK_MSR_ID(80, "rng_ring_osc_3"),
CLK_MSR_ID(81, "vapb"),
CLK_MSR_ID(82, "ge2d"),
CLK_MSR_ID(84, "audio_resample"),
CLK_MSR_ID(85, "audio_pdm_sys"),
CLK_MSR_ID(86, "audio_spdifout"),
CLK_MSR_ID(87, "audio_spdifin"),
CLK_MSR_ID(88, "audio_lrclk_f"),
CLK_MSR_ID(89, "audio_lrclk_e"),
CLK_MSR_ID(90, "audio_lrclk_d"),
CLK_MSR_ID(91, "audio_lrclk_c"),
CLK_MSR_ID(92, "audio_lrclk_b"),
CLK_MSR_ID(93, "audio_lrclk_a"),
CLK_MSR_ID(94, "audio_sclk_f"),
CLK_MSR_ID(95, "audio_sclk_e"),
CLK_MSR_ID(96, "audio_sclk_d"),
CLK_MSR_ID(97, "audio_sclk_c"),
CLK_MSR_ID(98, "audio_sclk_b"),
CLK_MSR_ID(99, "audio_sclk_a"),
CLK_MSR_ID(100, "audio_mclk_f"),
CLK_MSR_ID(101, "audio_mclk_e"),
CLK_MSR_ID(102, "audio_mclk_d"),
CLK_MSR_ID(103, "audio_mclk_c"),
CLK_MSR_ID(104, "audio_mclk_b"),
CLK_MSR_ID(105, "audio_mclk_a"),
CLK_MSR_ID(106, "pcie_refclk_n"),
CLK_MSR_ID(107, "pcie_refclk_p"),
CLK_MSR_ID(108, "audio_locker_out"),
CLK_MSR_ID(109, "audio_locker_in"),
};
static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
CLK_MSR_ID(3, "sys_cpu_ring_osc"),
CLK_MSR_ID(4, "gp0_pll"),
CLK_MSR_ID(6, "enci"),
CLK_MSR_ID(7, "clk81"),
CLK_MSR_ID(8, "encp"),
CLK_MSR_ID(9, "encl"),
CLK_MSR_ID(10, "vdac"),
CLK_MSR_ID(11, "eth_tx"),
CLK_MSR_ID(12, "hifi_pll"),
CLK_MSR_ID(13, "mod_tcon"),
CLK_MSR_ID(14, "fec_0"),
CLK_MSR_ID(15, "fec_1"),
CLK_MSR_ID(16, "fec_2"),
CLK_MSR_ID(17, "sys_pll_div16"),
CLK_MSR_ID(18, "sys_cpu_div16"),
CLK_MSR_ID(19, "lcd_an_ph2"),
CLK_MSR_ID(20, "rtc_osc_out"),
CLK_MSR_ID(21, "lcd_an_ph3"),
CLK_MSR_ID(22, "eth_phy_ref"),
CLK_MSR_ID(23, "mpll_50m"),
CLK_MSR_ID(24, "eth_125m"),
CLK_MSR_ID(25, "eth_rmii"),
CLK_MSR_ID(26, "sc_int"),
CLK_MSR_ID(27, "in_mac"),
CLK_MSR_ID(28, "sar_adc"),
CLK_MSR_ID(29, "pcie_inp"),
CLK_MSR_ID(30, "pcie_inn"),
CLK_MSR_ID(31, "mpll_test_out"),
CLK_MSR_ID(32, "vdec"),
CLK_MSR_ID(33, "sys_cpu_ring_osc_1"),
CLK_MSR_ID(34, "eth_mpll_50m"),
CLK_MSR_ID(35, "mali"),
CLK_MSR_ID(36, "hdmi_tx_pixel"),
CLK_MSR_ID(37, "cdac"),
CLK_MSR_ID(38, "vdin_meas"),
CLK_MSR_ID(39, "bt656"),
CLK_MSR_ID(41, "eth_rx_or_rmii"),
CLK_MSR_ID(42, "mp0_out"),
CLK_MSR_ID(43, "fclk_div5"),
CLK_MSR_ID(44, "pwm_b"),
CLK_MSR_ID(45, "pwm_a"),
CLK_MSR_ID(46, "vpu"),
CLK_MSR_ID(47, "ddr_dpll_pt"),
CLK_MSR_ID(48, "mp1_out"),
CLK_MSR_ID(49, "mp2_out"),
CLK_MSR_ID(50, "mp3_out"),
CLK_MSR_ID(51, "sd_emmc_c"),
CLK_MSR_ID(52, "sd_emmc_b"),
CLK_MSR_ID(53, "sd_emmc_a"),
CLK_MSR_ID(54, "vpu_clkc"),
CLK_MSR_ID(55, "vid_pll_div_out"),
CLK_MSR_ID(56, "wave420l_a"),
CLK_MSR_ID(57, "wave420l_c"),
CLK_MSR_ID(58, "wave420l_b"),
CLK_MSR_ID(59, "hcodec"),
CLK_MSR_ID(61, "gpio_msr"),
CLK_MSR_ID(62, "hevcb"),
CLK_MSR_ID(63, "dsi_meas"),
CLK_MSR_ID(64, "spicc_1"),
CLK_MSR_ID(65, "spicc_0"),
CLK_MSR_ID(66, "vid_lock"),
CLK_MSR_ID(67, "dsi_phy"),
CLK_MSR_ID(68, "hdcp22_esm"),
CLK_MSR_ID(69, "hdcp22_skp"),
CLK_MSR_ID(70, "pwm_f"),
CLK_MSR_ID(71, "pwm_e"),
CLK_MSR_ID(72, "pwm_d"),
CLK_MSR_ID(73, "pwm_c"),
CLK_MSR_ID(75, "hevcf"),
CLK_MSR_ID(77, "rng_ring_osc_0"),
CLK_MSR_ID(78, "rng_ring_osc_1"),
CLK_MSR_ID(79, "rng_ring_osc_2"),
CLK_MSR_ID(80, "rng_ring_osc_3"),
CLK_MSR_ID(81, "vapb"),
CLK_MSR_ID(82, "ge2d"),
CLK_MSR_ID(83, "co_rx"),
CLK_MSR_ID(84, "co_tx"),
CLK_MSR_ID(89, "hdmi_todig"),
CLK_MSR_ID(90, "hdmitx_sys"),
CLK_MSR_ID(91, "sys_cpub_div16"),
CLK_MSR_ID(92, "sys_pll_cpub_div16"),
CLK_MSR_ID(94, "eth_phy_rx"),
CLK_MSR_ID(95, "eth_phy_pll"),
CLK_MSR_ID(96, "vpu_b"),
CLK_MSR_ID(97, "cpu_b_tmp"),
CLK_MSR_ID(98, "ts"),
CLK_MSR_ID(99, "ring_osc_out_ee_3"),
CLK_MSR_ID(100, "ring_osc_out_ee_4"),
CLK_MSR_ID(101, "ring_osc_out_ee_5"),
CLK_MSR_ID(102, "ring_osc_out_ee_6"),
CLK_MSR_ID(103, "ring_osc_out_ee_7"),
CLK_MSR_ID(104, "ring_osc_out_ee_8"),
CLK_MSR_ID(105, "ring_osc_out_ee_9"),
CLK_MSR_ID(106, "ephy_test"),
CLK_MSR_ID(107, "au_dac_g128x"),
CLK_MSR_ID(108, "audio_locker_out"),
CLK_MSR_ID(109, "audio_locker_in"),
CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
CLK_MSR_ID(117, "audio_resample"),
CLK_MSR_ID(118, "audio_pdm_sys"),
CLK_MSR_ID(119, "audio_spdifout_b"),
CLK_MSR_ID(120, "audio_spdifout"),
CLK_MSR_ID(121, "audio_spdifin"),
CLK_MSR_ID(122, "audio_pdm_dclk"),
};
static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
CLK_MSR_ID(3, "ring_osc_out_ee_3"),
CLK_MSR_ID(4, "gp0_pll"),
CLK_MSR_ID(5, "gp1_pll"),
CLK_MSR_ID(6, "enci"),
CLK_MSR_ID(7, "clk81"),
CLK_MSR_ID(8, "encp"),
CLK_MSR_ID(9, "encl"),
CLK_MSR_ID(10, "vdac"),
CLK_MSR_ID(11, "eth_tx"),
CLK_MSR_ID(12, "hifi_pll"),
CLK_MSR_ID(13, "mod_tcon"),
CLK_MSR_ID(14, "fec_0"),
CLK_MSR_ID(15, "fec_1"),
CLK_MSR_ID(16, "fec_2"),
CLK_MSR_ID(17, "sys_pll_div16"),
CLK_MSR_ID(18, "sys_cpu_div16"),
CLK_MSR_ID(19, "lcd_an_ph2"),
CLK_MSR_ID(20, "rtc_osc_out"),
CLK_MSR_ID(21, "lcd_an_ph3"),
CLK_MSR_ID(22, "eth_phy_ref"),
CLK_MSR_ID(23, "mpll_50m"),
CLK_MSR_ID(24, "eth_125m"),
CLK_MSR_ID(25, "eth_rmii"),
CLK_MSR_ID(26, "sc_int"),
CLK_MSR_ID(27, "in_mac"),
CLK_MSR_ID(28, "sar_adc"),
CLK_MSR_ID(29, "pcie_inp"),
CLK_MSR_ID(30, "pcie_inn"),
CLK_MSR_ID(31, "mpll_test_out"),
CLK_MSR_ID(32, "vdec"),
CLK_MSR_ID(34, "eth_mpll_50m"),
CLK_MSR_ID(35, "mali"),
CLK_MSR_ID(36, "hdmi_tx_pixel"),
CLK_MSR_ID(37, "cdac"),
CLK_MSR_ID(38, "vdin_meas"),
CLK_MSR_ID(39, "bt656"),
CLK_MSR_ID(40, "arm_ring_osc_out_4"),
CLK_MSR_ID(41, "eth_rx_or_rmii"),
CLK_MSR_ID(42, "mp0_out"),
CLK_MSR_ID(43, "fclk_div5"),
CLK_MSR_ID(44, "pwm_b"),
CLK_MSR_ID(45, "pwm_a"),
CLK_MSR_ID(46, "vpu"),
CLK_MSR_ID(47, "ddr_dpll_pt"),
CLK_MSR_ID(48, "mp1_out"),
CLK_MSR_ID(49, "mp2_out"),
CLK_MSR_ID(50, "mp3_out"),
CLK_MSR_ID(51, "sd_emmc_c"),
CLK_MSR_ID(52, "sd_emmc_b"),
CLK_MSR_ID(53, "sd_emmc_a"),
CLK_MSR_ID(54, "vpu_clkc"),
CLK_MSR_ID(55, "vid_pll_div_out"),
CLK_MSR_ID(56, "wave420l_a"),
CLK_MSR_ID(57, "wave420l_c"),
CLK_MSR_ID(58, "wave420l_b"),
CLK_MSR_ID(59, "hcodec"),
CLK_MSR_ID(60, "arm_ring_osc_out_5"),
CLK_MSR_ID(61, "gpio_msr"),
CLK_MSR_ID(62, "hevcb"),
CLK_MSR_ID(63, "dsi_meas"),
CLK_MSR_ID(64, "spicc_1"),
CLK_MSR_ID(65, "spicc_0"),
CLK_MSR_ID(66, "vid_lock"),
CLK_MSR_ID(67, "dsi_phy"),
CLK_MSR_ID(68, "hdcp22_esm"),
CLK_MSR_ID(69, "hdcp22_skp"),
CLK_MSR_ID(70, "pwm_f"),
CLK_MSR_ID(71, "pwm_e"),
CLK_MSR_ID(72, "pwm_d"),
CLK_MSR_ID(73, "pwm_c"),
CLK_MSR_ID(74, "arm_ring_osc_out_6"),
CLK_MSR_ID(75, "hevcf"),
CLK_MSR_ID(76, "arm_ring_osc_out_7"),
CLK_MSR_ID(77, "rng_ring_osc_0"),
CLK_MSR_ID(78, "rng_ring_osc_1"),
CLK_MSR_ID(79, "rng_ring_osc_2"),
CLK_MSR_ID(80, "rng_ring_osc_3"),
CLK_MSR_ID(81, "vapb"),
CLK_MSR_ID(82, "ge2d"),
CLK_MSR_ID(83, "co_rx"),
CLK_MSR_ID(84, "co_tx"),
CLK_MSR_ID(85, "arm_ring_osc_out_8"),
CLK_MSR_ID(86, "arm_ring_osc_out_9"),
CLK_MSR_ID(87, "mipi_dsi_phy"),
CLK_MSR_ID(88, "cis2_adapt"),
CLK_MSR_ID(89, "hdmi_todig"),
CLK_MSR_ID(90, "hdmitx_sys"),
CLK_MSR_ID(91, "nna_core"),
CLK_MSR_ID(92, "nna_axi"),
CLK_MSR_ID(93, "vad"),
CLK_MSR_ID(94, "eth_phy_rx"),
CLK_MSR_ID(95, "eth_phy_pll"),
CLK_MSR_ID(96, "vpu_b"),
CLK_MSR_ID(97, "cpu_b_tmp"),
CLK_MSR_ID(98, "ts"),
CLK_MSR_ID(99, "arm_ring_osc_out_10"),
CLK_MSR_ID(100, "arm_ring_osc_out_11"),
CLK_MSR_ID(101, "arm_ring_osc_out_12"),
CLK_MSR_ID(102, "arm_ring_osc_out_13"),
CLK_MSR_ID(103, "arm_ring_osc_out_14"),
CLK_MSR_ID(104, "arm_ring_osc_out_15"),
CLK_MSR_ID(105, "arm_ring_osc_out_16"),
CLK_MSR_ID(106, "ephy_test"),
CLK_MSR_ID(107, "au_dac_g128x"),
CLK_MSR_ID(108, "audio_locker_out"),
CLK_MSR_ID(109, "audio_locker_in"),
CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
CLK_MSR_ID(117, "audio_resample"),
CLK_MSR_ID(118, "audio_pdm_sys"),
CLK_MSR_ID(119, "audio_spdifout_b"),
CLK_MSR_ID(120, "audio_spdifout"),
CLK_MSR_ID(121, "audio_spdifin"),
CLK_MSR_ID(122, "audio_pdm_dclk"),
CLK_MSR_ID(123, "audio_resampled"),
CLK_MSR_ID(124, "earcrx_pll"),
CLK_MSR_ID(125, "earcrx_pll_test"),
CLK_MSR_ID(126, "csi_phy0"),
CLK_MSR_ID(127, "csi2_data"),
};
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
unsigned int duration)
{
struct meson_msr *priv = clk_msr_id->priv;
unsigned int val;
int ret;
ret = mutex_lock_interruptible(&measure_lock);
if (ret)
return ret;
regmap_write(priv->regmap, MSR_CLK_REG0, 0);
/* Set measurement duration */
regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION,
FIELD_PREP(MSR_DURATION, duration - 1));
/* Set ID */
regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC,
FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id));
/* Enable & Start */
regmap_update_bits(priv->regmap, MSR_CLK_REG0,
MSR_RUN | MSR_ENABLE,
MSR_RUN | MSR_ENABLE);
ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
val, !(val & MSR_BUSY), 10, 10000);
if (ret) {
mutex_unlock(&measure_lock);
return ret;
}
/* Disable */
regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
/* Get the value in multiple of gate time counts */
regmap_read(priv->regmap, MSR_CLK_REG2, &val);
mutex_unlock(&measure_lock);
if (val >= MSR_VAL_MASK)
return -EINVAL;
return DIV_ROUND_CLOSEST_ULL((val & MSR_VAL_MASK) * 1000000ULL,
duration);
}
static int meson_measure_best_id(struct meson_msr_id *clk_msr_id,
unsigned int *precision)
{
unsigned int duration = DIV_MAX;
int ret;
/* Start from max duration and down to min duration */
do {
ret = meson_measure_id(clk_msr_id, duration);
if (ret >= 0)
*precision = (2 * 1000000) / duration;
else
duration -= DIV_STEP;
} while (duration >= DIV_MIN && ret == -EINVAL);
return ret;
}
static int clk_msr_show(struct seq_file *s, void *data)
{
struct meson_msr_id *clk_msr_id = s->private;
unsigned int precision = 0;
int val;
val = meson_measure_best_id(clk_msr_id, &precision);
if (val < 0)
return val;
seq_printf(s, "%d\t+/-%dHz\n", val, precision);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_msr);
static int clk_msr_summary_show(struct seq_file *s, void *data)
{
struct meson_msr_id *msr_table = s->private;
unsigned int precision = 0;
int val, i;
seq_puts(s, " clock rate precision\n");
seq_puts(s, "---------------------------------------------\n");
for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
if (!msr_table[i].name)
continue;
val = meson_measure_best_id(&msr_table[i], &precision);
if (val < 0)
return val;
seq_printf(s, " %-20s %10d +/-%dHz\n",
msr_table[i].name, val, precision);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_msr_summary);
static const struct regmap_config meson_clk_msr_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = MSR_CLK_REG2,
};
static int meson_msr_probe(struct platform_device *pdev)
{
const struct meson_msr_id *match_data;
struct meson_msr *priv;
struct dentry *root, *clks;
void __iomem *base;
int i;
priv = devm_kzalloc(&pdev->dev, sizeof(struct meson_msr),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
match_data = device_get_match_data(&pdev->dev);
if (!match_data) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_clk_msr_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
root = debugfs_create_dir("meson-clk-msr", NULL);
clks = debugfs_create_dir("clks", root);
debugfs_create_file("measure_summary", 0444, root,
priv->msr_table, &clk_msr_summary_fops);
for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
if (!priv->msr_table[i].name)
continue;
priv->msr_table[i].priv = priv;
debugfs_create_file(priv->msr_table[i].name, 0444, clks,
&priv->msr_table[i], &clk_msr_fops);
}
return 0;
}
static const struct of_device_id meson_msr_match_table[] = {
{
.compatible = "amlogic,meson-gx-clk-measure",
.data = (void *)clk_msr_gx,
},
{
.compatible = "amlogic,meson8-clk-measure",
.data = (void *)clk_msr_m8,
},
{
.compatible = "amlogic,meson8b-clk-measure",
.data = (void *)clk_msr_m8,
},
{
.compatible = "amlogic,meson-axg-clk-measure",
.data = (void *)clk_msr_axg,
},
{
.compatible = "amlogic,meson-g12a-clk-measure",
.data = (void *)clk_msr_g12a,
},
{
.compatible = "amlogic,meson-sm1-clk-measure",
.data = (void *)clk_msr_sm1,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_msr_match_table);
static struct platform_driver meson_msr_driver = {
.probe = meson_msr_probe,
.driver = {
.name = "meson_msr",
.of_match_table = meson_msr_match_table,
},
};
module_platform_driver(meson_msr_driver);
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/soc/amlogic/meson-clk-measure.c
|
/*
* Copyright (c) 2017 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#define AO_SEC_SD_CFG8 0xe0
#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
#define SOCINFO_MAJOR GENMASK(31, 24)
#define SOCINFO_PACK GENMASK(23, 16)
#define SOCINFO_MINOR GENMASK(15, 8)
#define SOCINFO_MISC GENMASK(7, 0)
static const struct meson_gx_soc_id {
const char *name;
unsigned int id;
} soc_ids[] = {
{ "GXBB", 0x1f },
{ "GXTVBB", 0x20 },
{ "GXL", 0x21 },
{ "GXM", 0x22 },
{ "TXL", 0x23 },
{ "TXLX", 0x24 },
{ "AXG", 0x25 },
{ "GXLX", 0x26 },
{ "TXHD", 0x27 },
{ "G12A", 0x28 },
{ "G12B", 0x29 },
{ "SM1", 0x2b },
{ "A1", 0x2c },
};
static const struct meson_gx_package_id {
const char *name;
unsigned int major_id;
unsigned int pack_id;
unsigned int pack_mask;
} soc_packages[] = {
{ "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */
{ "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */
{ "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */
{ "S905D", 0x21, 0, 0xf0 },
{ "S905X", 0x21, 0x80, 0xf0 },
{ "S905W", 0x21, 0xa0, 0xf0 },
{ "S905L", 0x21, 0xc0, 0xf0 },
{ "S905M2", 0x21, 0xe0, 0xf0 },
{ "S805X", 0x21, 0x30, 0xf0 },
{ "S805Y", 0x21, 0xb0, 0xf0 },
{ "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */
{ "962X", 0x24, 0x10, 0xf0 },
{ "962E", 0x24, 0x20, 0xf0 },
{ "A113X", 0x25, 0x37, 0xff },
{ "A113D", 0x25, 0x22, 0xff },
{ "S905D2", 0x28, 0x10, 0xf0 },
{ "S905Y2", 0x28, 0x30, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
{ "A311D", 0x29, 0x10, 0xf0 },
{ "S922X", 0x29, 0x40, 0xf0 },
{ "S905D3", 0x2b, 0x4, 0xf5 },
{ "S905X3", 0x2b, 0x5, 0xf5 },
{ "S905X3", 0x2b, 0x10, 0x3f },
{ "S905D3", 0x2b, 0x30, 0x3f },
{ "A113L", 0x2c, 0x0, 0xf8 },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
{
return FIELD_GET(SOCINFO_MAJOR, socinfo);
}
static inline unsigned int socinfo_to_minor(u32 socinfo)
{
return FIELD_GET(SOCINFO_MINOR, socinfo);
}
static inline unsigned int socinfo_to_pack(u32 socinfo)
{
return FIELD_GET(SOCINFO_PACK, socinfo);
}
static inline unsigned int socinfo_to_misc(u32 socinfo)
{
return FIELD_GET(SOCINFO_MISC, socinfo);
}
static const char *socinfo_to_package_id(u32 socinfo)
{
unsigned int pack = socinfo_to_pack(socinfo);
unsigned int major = socinfo_to_major(socinfo);
int i;
for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) {
if (soc_packages[i].major_id == major &&
soc_packages[i].pack_id ==
(pack & soc_packages[i].pack_mask))
return soc_packages[i].name;
}
return "Unknown";
}
static const char *socinfo_to_soc_id(u32 socinfo)
{
unsigned int id = socinfo_to_major(socinfo);
int i;
for (i = 0 ; i < ARRAY_SIZE(soc_ids) ; ++i) {
if (soc_ids[i].id == id)
return soc_ids[i].name;
}
return "Unknown";
}
static int __init meson_gx_socinfo_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device_node *np;
struct regmap *regmap;
unsigned int socinfo;
struct device *dev;
int ret;
/* look up for chipid node */
np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gx-ao-secure");
if (!np)
return -ENODEV;
/* check if interface is enabled */
if (!of_device_is_available(np)) {
of_node_put(np);
return -ENODEV;
}
/* check if chip-id is available */
if (!of_property_read_bool(np, "amlogic,has-chip-id")) {
of_node_put(np);
return -ENODEV;
}
/* node should be a syscon */
regmap = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(regmap)) {
pr_err("%s: failed to get regmap\n", __func__);
return -ENODEV;
}
ret = regmap_read(regmap, AO_SEC_SOCINFO_OFFSET, &socinfo);
if (ret < 0)
return ret;
if (!socinfo) {
pr_err("%s: invalid chipid value\n", __func__);
return -EINVAL;
}
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENODEV;
soc_dev_attr->family = "Amlogic Meson";
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x:%x - %x:%x",
socinfo_to_major(socinfo),
socinfo_to_minor(socinfo),
socinfo_to_pack(socinfo),
socinfo_to_misc(socinfo));
soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%s (%s)",
socinfo_to_soc_id(socinfo),
socinfo_to_package_id(socinfo));
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->revision);
kfree_const(soc_dev_attr->soc_id);
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
dev = soc_device_to_device(soc_dev);
dev_info(dev, "Amlogic Meson %s Revision %x:%x (%x:%x) Detected\n",
soc_dev_attr->soc_id,
socinfo_to_major(socinfo),
socinfo_to_minor(socinfo),
socinfo_to_pack(socinfo),
socinfo_to_misc(socinfo));
return 0;
}
device_initcall(meson_gx_socinfo_init);
|
linux-master
|
drivers/soc/amlogic/meson-gx-socinfo.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.